max_stars_repo_path
string | max_stars_repo_name
string | max_stars_count
int64 | id
string | content
string | score
float64 | int_score
int64 |
---|---|---|---|---|---|---|
src/command_modules/azure-cli-policyinsights/azure/cli/command_modules/policyinsights/tests/latest/test_policyinsights_scenario.py | diberry/azure-cli | 1 | 1535 | # --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
from azure.cli.testsdk import ScenarioTest, record_only
@record_only()
class PolicyInsightsTests(ScenarioTest):
def test_policy_insights(self):
top_clause = '--top 2'
filter_clause = '--filter "isCompliant eq false"'
apply_clause = '--apply "groupby((policyAssignmentId, resourceId), aggregate($count as numRecords))"'
select_clause = '--select "policyAssignmentId, resourceId, numRecords"'
order_by_clause = '--order-by "numRecords desc"'
from_clause = '--from "2018-04-04T00:00:00"'
to_clause = '--to "2018-05-22T00:00:00"'
scopes = [
'-m "azgovtest4"',
'',
'-g "defaultresourcegroup-eus"',
'--resource "/subscriptions/00000000-0000-0000-0000-000000000000/resourcegroups/eastusnsggroup/providers/microsoft.network/networksecuritygroups/eastusnsg/securityrules/allow-joba"',
'--resource "omssecuritydevkeyvalut" --namespace "microsoft.keyvault" --resource-type "vaults" -g "omssecurityintresourcegroup"',
'--resource "default" --namespace "microsoft.network" --resource-type "subnets" --parent "virtualnetworks/mms-wcus-vnet" -g "mms-wcus"',
'-s "335cefd2-ab16-430f-b364-974a170eb1d5"',
'-d "25bf1e2a-6004-47ad-9bd1-2a40dd6de016"',
'-a "96e22f7846e94bb186ae3a01"',
'-a "bc916e4f3ab54030822a11b3" -g "tipkeyvaultresourcegroup" '
]
for scope in scopes:
events = self.cmd('az policy event list {} {} {} {} {} {} {} {}'.format(
scope,
from_clause,
to_clause,
filter_clause,
apply_clause,
select_clause,
order_by_clause,
top_clause)).get_output_in_json()
assert len(events) >= 0
states = self.cmd('az policy state list {} {} {} {} {} {} {} {}'.format(
scope,
from_clause,
to_clause,
filter_clause,
apply_clause,
select_clause,
order_by_clause,
top_clause)).get_output_in_json()
assert len(states) >= 0
summary = self.cmd('az policy state summarize {} {} {} {} {}'.format(
scope,
from_clause,
to_clause,
filter_clause,
top_clause)).get_output_in_json()
assert summary["results"] is not None
assert len(summary["policyAssignments"]) >= 0
if len(summary["policyAssignments"]) > 0:
assert summary["policyAssignments"][0]["results"] is not None
assert len(summary["policyAssignments"][0]["policyDefinitions"]) >= 0
if len(summary["policyAssignments"][0]["policyDefinitions"]) > 0:
assert summary["policyAssignments"][0]["policyDefinitions"][0]["results"] is not None
| 1.390625 | 1 |
scripts/tator_tracker.py | openem-team/openem | 10 | 1551 | #!/usr/bin/env python3
import argparse
import openem
import os
import cv2
import numpy as np
from openem.tracking import *
import json
import sys
import datetime
import tator
from pprint import pprint
from collections import defaultdict
import yaml
import math
import subprocess
import sys
def crop_localization(frame_bgr, localization):
img_width = frame_bgr.shape[1]
img_height = frame_bgr.shape[0]
box_x = round(localization['x'] * img_width)
box_y = round(localization['y'] * img_height)
box_width = round(localization['width'] * img_width)
box_height = round(localization['height'] * img_height)
img_crop = frame_bgr[box_y:box_y+box_height,box_x:box_x+box_width,:]
return img_crop
def join_up_iteration(detections, track_ids):
tracklets = defaultdict(list)
num_tracklets = np.max(track_ids) + 1
assert(len(detections) == len(track_ids))
for d,tid in zip(detections, track_ids):
tracklets[tid].append(d)
return tracklets
def extend_tracklets(tracklets, length):
for track_id,track in tracklets.items():
if len(track) <= 16:
continue
ext_length = min(length,len(track))
sum_h=0.0
sum_w=0.0
track.sort(key=lambda x:x['frame'])
def restore_det(det):
det['x'] = det.get('orig_x',det['x'])
det['y'] = det.get('orig_y',det['y'])
det['width'] = det.get('orig_w',det['width'])
det['height'] = det.get('orig_h',det['height'])
det['orig_x'] = det['x']
det['orig_y'] = det['y']
det['orig_w'] = det['width']
det['orig_h'] = det['height']
restore_det(track[0])
restore_det(track[-1])
for d in track:
sum_h += d['height']
sum_w += d['width']
angle,vel,comps = track_vel(track)
vel_x = comps[0]
vel_y = comps[1]
avg_h = sum_h / len(track)
avg_w = sum_w / len(track)
new_x = min(1,max(0,track[-1]['x']+(vel_x*ext_length)))
new_y = min(1,max(0,track[-1]['y']+(vel_y*ext_length)))
old_x = min(1,max(0,track[0]['x']-(vel_x*ext_length)))
old_y = min(1,max(0,track[0]['y']-(vel_y*ext_length)))
min_x = min(track[-1]['x'],new_x)
min_y = min(track[-1]['y'],new_y)
if min_x > 0 and min_y > 0:
track[-1]['x'] = min_x
track[-1]['y'] = min_y
track[-1]['width'] = min(max(0,abs(new_x-track[-1]['x'])+avg_w),1)
track[-1]['height'] = min(max(0,abs(new_x-track[-1]['y'])+avg_h),1)
else:
track[-1]['width'] = 0
track[-1]['height'] = 0
min_x = min(track[0]['x'],old_x)
min_y = min(track[0]['y'],old_y)
if min_x > 0 and min_y > 0:
track[0]['x'] = min(max(0,min_x),1)
track[0]['y'] = min(max(0,min_y),1)
track[0]['width'] = min(max(abs(old_x-track[0]['x'])+avg_w,0),1)
track[0]['height'] = min(max(abs(old_x-track[0]['y'])+avg_h,0),1)
else:
track[0]['width'] = 0
track[0]['height'] = 0
return tracklets
def split_tracklets(tracklets):
track_ids=[]
detections=[]
for track_id,track in tracklets.items():
for d in track:
track_ids.append(track_id)
detections.append(d)
return detections,track_ids
def trim_tracklets(detections, track_ids, max_length):
tracklets = join_up_iteration(detections, track_ids)
next_track_id = 1
new_tracklets = {}
for track_id,detections in tracklets.items():
new_track_count=math.ceil(len(detections)/max_length)
for i in range(new_track_count):
start=max_length*i
end=max_length+(max_length*i)
new_tracklets[next_track_id] = detections[start:end]
next_track_id += 1
detections, track_ids = split_tracklets(new_tracklets)
track_ids = renumber_track_ids(track_ids)
return detections, track_ids
if __name__=="__main__":
parser = argparse.ArgumentParser(description=__doc__)
tator.get_parser(parser)
parser.add_argument("--detection-type-id", type=int, required=True)
parser.add_argument("--tracklet-type-id", type=int, required=True)
parser.add_argument("--version-id", type=int)
parser.add_argument("--input-version-id", type=int)
parser.add_argument("--strategy-config", type=str)
parser.add_argument("--dry-run", action='store_true')
parser.add_argument('media_files', type=str, nargs='*')
args = parser.parse_args()
# Weight methods
methods = ['hybrid', 'iou', 'iou-motion', 'iou-global-motion']
# Weight methods that require the video
visual_methods = ['hybrid', 'iou-global-motion']
api = tator.get_api(args.host, args.token)
detection_type = api.get_localization_type(args.detection_type_id)
project = detection_type.project
version_id = args.version_id
default_strategy = {"method": "hybrid",
"frame-diffs": [1,2,4,8,16,32,64,128,256],
"args": {},
"extension": {'method' : None},
"max-length": {},
"min-length": 0}
if args.strategy_config:
strategy = {**default_strategy}
with open(args.strategy_config, "r") as strategy_file:
strategy.update(yaml.load(strategy_file))
else:
strategy = default_strategy
if strategy['method'] == 'hybrid':
model_file = strategy['args']['model_file']
batch_size = strategy['args'].get('batch_size', 4)
comparator=FeaturesComparator(model_file)
#extractor=FeaturesExtractor(args.model_file)
class_method = strategy.get('class-method',None)
classify_function = None
classify_args = {}
if class_method:
pip_package=class_method.get('pip',None)
if pip_package:
p = subprocess.run([sys.executable,
"-m",
"pip",
"install",
pip_package])
print("Finished process.", flush=True)
function_name = class_method.get('function',None)
classify_args = class_method.get('args',None)
names = function_name.split('.')
module = __import__(names[0])
for name in names[1:-1]:
module = getattr(module,name)
classify_function = getattr(module,names[-1])
print("Strategy: ", flush=True)
pprint(strategy)
print(args.media_files, flush=True)
optional_fetch_args = {}
if args.input_version_id:
optional_fetch_args['version'] = [args.input_version_id]
for media_file in args.media_files:
comps=os.path.splitext(os.path.basename(media_file))[0]
media_id=comps.split('_')[0]
media = api.get_media(media_id)
if media.attributes.get("Tracklet Generator Processed") != "No":
print(f"Skipping media ID {media.id}, name {media.name} due to "
f"'Tracklet Generator Processed' attribute being set to "
f"something other than 'No'!")
continue
media_shape = (media.height, media.width)
fps = media.fps
localizations_by_frame = {}
localizations = api.get_localization_list(project,
type=args.detection_type_id,
media_id=[media_id],
**optional_fetch_args)
localizations = [l.to_dict() for l in localizations]
if len(localizations) == 0:
print(f"No localizations present in media {media_file}", flush=True)
continue
print(f"Processing {len(localizations)} detections", flush=True)
# Group by localizations by frame
for lid, local in enumerate(localizations):
frame = local['frame']
if frame in localizations_by_frame:
localizations_by_frame[frame].append(local)
else:
localizations_by_frame[frame] = [local]
detections=[]
track_ids=[]
track_id=1
# If media does not exist, download it.
if strategy['method'] == 'iou-global-motion':
if not os.path.exists(media_file):
temp_path = f'/tmp/{os.path.basename(media_file)}'
for progress in tator.util.download_media(api, media, temp_path):
print(f"Downloading {media_file}, {progress}%...")
print("Download finished!")
# Unfrag the file
subprocess.run(["ffmpeg", '-i', temp_path, '-c:v', 'copy', media_file])
os.remove(temp_path)
if strategy['method'] == 'hybrid': # Not all visual methods need detection images
vid=cv2.VideoCapture(media_file)
ok=True
frame = 0
while ok:
ok,frame_bgr = vid.read()
if frame in localizations_by_frame:
for l in localizations_by_frame[frame]:
l['bgr'] = crop_localization(frame_bgr, l)
if l['attributes']['Confidence'] < 0.50:
continue
detections.append(l)
track_ids.append(track_id)
track_id += 1
frame+=1
else:
# The method is analytical on the detections coordinates
# and does not require processing the video
for frame,frame_detections in localizations_by_frame.items():
for det in frame_detections:
detections.append(det)
track_ids.append(track_id)
track_id += 1
print("Loaded all detections", flush=True)
track_ids = renumber_track_ids(track_ids)
if strategy['method'] == 'hybrid':
weights_strategy = HybridWeights(comparator,
None,
None,
media_shape,
fps,
0.0,
batch_size)
elif strategy['method'] == 'iou':
weights_strategy = IoUWeights(media_shape, **strategy['args'])
elif strategy['method'] == 'iou-motion':
weights_strategy = IoUMotionWeights(media_shape, **strategy['args'])
elif strategy['method'] == 'iou-global-motion':
weights_strategy = IoUGlobalMotionWeights(media_shape, media_file, **strategy['args'])
# Generate localization bgr based on grouped localizations
for x in strategy['frame-diffs']:
print(f"Started {x}", flush=True)
detections, track_ids, pairs, weights, is_cut, constraints = join_tracklets(
detections,
track_ids,
x,
weights_strategy)
if x in strategy['max-length']:
trim_to = strategy['max-length'][x]
print(f"Trimming track to max length of {trim_to}")
detections, track_ids = trim_tracklets(detections, track_ids, trim_to)
_,det_counts_per_track=np.unique(track_ids,return_counts=True)
print(f"frame-diff {x}: {len(detections)} to {len(det_counts_per_track)}", flush=True)
if x > 1 and strategy['extension']['method'] == 'linear-motion':
ext_frames=x
print(f"Extending by linear motion, {ext_frames}")
tracklets = join_up_iteration(detections,track_ids)
tracklets = extend_tracklets(tracklets, ext_frames)
detections, track_ids = split_tracklets(tracklets)
# Now we make new track objects based on the result
# from the graph solver
# [ detection, detection, detection, ...]
# [ track#, track#, track#,...]
# [ 133, 33, 13, 133,]
# [ 0,0,1,1]
# TODO: Handle is_cut?
def join_up_final(detections, track_ids):
tracklets = defaultdict(list)
num_tracklets = np.max(track_ids) + 1
assert(len(detections) == len(track_ids))
for d,tid in zip(detections, track_ids):
tracklets[tid].append(d)
return tracklets
def make_object(track):
track.sort(key=lambda x:x['frame'])
if classify_function:
valid,attrs = classify_function(media.to_dict(),
track,
**classify_args)
elif len(track) >= strategy['min-length']:
valid = True
attrs = {}
else:
valid = False
attrs = {}
if valid:
obj={"type": args.tracklet_type_id,
"media_ids": [int(media_id)],
"localization_ids": [x['id'] for x in track],
**attrs,
"version": version_id}
return obj
else:
return None
tracklets = join_up_final(detections, track_ids)
new_objs=[make_object(tracklet) for tracklet in tracklets.values()]
new_objs=[x for x in new_objs if x is not None]
print(f"New objects = {len(new_objs)}")
with open(f"/work/{media_id}.json", "w") as f:
json.dump(new_objs,f)
if not args.dry_run:
for response in tator.util.chunked_create(api.create_state_list,project,
state_spec=new_objs):
pass
try:
api.update_media(int(media_id), {"attributes":{"Tracklet Generator Processed": str(datetime.datetime.now())}})
except:
print("WARNING: Unable to set 'Tracklet Generator Processed' attribute")
| 1.6875 | 2 |
test/testMatrix.py | turkeydonkey/nzmath3 | 1 | 1559 |
import unittest
from nzmath.matrix import *
import nzmath.vector as vector
import nzmath.rational as rational
import nzmath.poly.uniutil as uniutil
Ra = rational.Rational
Poly = uniutil.polynomial
Int = rational.theIntegerRing
# sub test
try:
from test.testMatrixFiniteField import *
except:
try:
from nzmath.test.testMatrixFiniteField import *
except:
from .testMatrixFiniteField import *
## for RingMatrix
a1 = createMatrix(1, 2, [3, 2])
a2 = Matrix(1, 2, [5, -6])
a3 = createMatrix(3, 2, [7, 8]+[3, -2]+[0, 10])
a4 = Matrix(3, 2, [21, -12]+[1, -1]+[0, 0])
a5 = createMatrix(1, 2, [Poly({0:3, 1:5}, Int), Poly({1:2}, Int)])
## for RingSquareMatrix
b1 = createMatrix(2, 2, [1, 2]+[3, 4])
b2 = Matrix(2, 2, [0, -1]+[1, -2])
b3 = createMatrix(3, 3, [0, 1, 2]+[5, 4, 6]+[7, 9, 8])
b4 = Matrix(3, 3, [1, 2, 3]+[0, 5, -2]+[7, 1, 9])
b5 = createMatrix(3, 3, [1, 3, 2, 4, 6, 5, 6, 8, 9])
b6 = createMatrix(3, 3, [1, 2, 4, 0, 3, 5, 0, 0, 0])
b7 = createMatrix(3, 3, [1, 0, 0, 9, 1, 0, 5, 6, 1])
b8 = Matrix(3, 3, [3, 15, 12]+[2,7,5]+[1,-4,-2])
## for FieldMatrix
c1 = createMatrix(1, 2, [Ra(3), Ra(2)])
c2 = createMatrix(4, 5, \
[Ra(0), 0, 1, 2, -1]+[0, 0, 5, 12, -2]+[0, 0, 1, 3, -1]+[0, 0, 1, 2, 0])
c3 = createMatrix(3, 2, [Ra(1), 2]+[2, 5]+[6, 7])
## for FieldSquareMatrix
d1 = createMatrix(2, 2, [Ra(1), Ra(2)]+[Ra(3), Ra(4)])
d2 = createMatrix(3, 3, [Ra(1), 2, 3]+[4, 5, 6]+[5, 7, 9])
d3 = Matrix(3, 3, \
[Ra(1), Ra(2), Ra(3)]+[Ra(0), Ra(5), Ra(-2)]+[7, 1, 9])
d4 = createMatrix(6, 6, \
[Ra(4), 2, 5, 0, 2, 1]+[5, 1, 2, 5, 1, 1]+[90, 7, 54, 8, 4, 6]+\
[7, 5, 0, 8, 2, 5]+[8, 2, 6, 5, -4, 2]+[4, 1, 5, 6, 3, 1])
d5 = createMatrix(4, 4, \
[Ra(2), -1, 0, 0]+[-1, 2, -1, 0]+[0, -1, 2, -1]+[0, 0, -1, 2])
d6 = createMatrix(4, 4, \
[Ra(1), 2, 3, 4]+[2, 3, 4, 5]+[3, 4, 5, 6]+[4, 5, 6, 7])
d7 = Matrix(3, 3, \
[Ra(1, 2), Ra(2, 3), Ra(1, 5)]+[Ra(3, 2), Ra(1, 3), Ra(2, 5)]+[Ra(-1, 2), Ra(4, 3), Ra(3, 5)])
## other objects
v1 = vector.Vector([1, 4])
v2 = vector.Vector([8])
v3 = vector.Vector([0, 0, 1])
class MatrixTest(unittest.TestCase):
def testInit(self):
lst_lst = Matrix(3, 2, [[21, -12], [1, -1], [0, 0]])
self.assertEqual(a4, lst_lst)
lst_tuple = Matrix(3, 2, [(21, 1, 0), (-12, -1, 0)])
self.assertEqual(a4, lst_tuple)
lst_vect = Matrix(3, 2, [vector.Vector([21, 1, 0]), vector.Vector([-12, -1, 0])])
self.assertEqual(a4, lst_vect)
def testGetitem(self):
self.assertEqual(2, a1[1, 2])
self.assertEqual(-2, b2[2, 2])
self.assertRaises(IndexError, a1.__getitem__, "wrong")
self.assertEqual(vector.Vector([21, 1, 0]), a4[1])
def testEqual(self):
self.assertTrue(a1 == Matrix(1, 2, [3, 2]))
self.assertTrue(isinstance(a1 == a1, bool))
def testNonZero(self):
self.assertTrue(not zeroMatrix(2, 3))
def testContains(self):
self.assertTrue(5 in a2)
def testCall(self):
call = createMatrix(1, 2, [13, 4])
self.assertEqual(call, a5(2))
def testMap(self):
pow_two = createMatrix(1, 2, [9, 4])
self.assertEqual(pow_two, a1.map(lambda n : n ** 2))
def testReduce(self):
self.assertEqual(-2, a3.reduce(min))
def testGetRow(self):
row1 = vector.Vector([3, -2])
self.assertEqual(row1, a3.getRow(2))
row2 = vector.Vector([1, 2])
self.assertEqual(row2, b1.getRow(1))
def testGetColumn(self):
col1 = vector.Vector([-12, -1, 0])
self.assertEqual(col1, a4.getColumn(2))
col2 = vector.Vector([1, 3])
self.assertEqual(col2, b1.getColumn(1))
def testTranspose(self):
trans = createMatrix(2, 3, [7, 3, 0]+[8, -2, 10])
self.assertEqual(trans, a3.transpose())
def testGetBlock(self):
block = Matrix(2, 3, [4, 6, 5, 6, 8, 9])
self.assertEqual(block, b5.getBlock(2, 1, 2, 3))
def testSubMatrix(self):
sub1 = createMatrix(2, 1, [-12, 0])
self.assertEqual(sub1, a4.subMatrix(2, 1))
sub2 = createMatrix(2, 2, [4, 5, 6, 9])
self.assertEqual(sub2, b5.subMatrix([2, 3], [1, 3]))
class SquareMatrixTest(unittest.TestCase):
def testIsUpperTriangularMatrix(self):
UT = createMatrix(4, 4, \
[1, 2, 3, 4]+[0, 5, 6, 7]+[0, 0, 8, 9]+[0, 0, 0, 1])
notUT = createMatrix(4, 4, \
[1, 2, 3, 4]+[0, 5, 6, 7]+[0, 0, 8, 9]+[0, 0, 1, 1])
assert UT.isUpperTriangularMatrix()
assert not notUT.isUpperTriangularMatrix()
def testIsLowerTriangularMatrix(self):
LT = createMatrix(4, 4, \
[1, 0, 0, 0]+[2, 3, 0, 0]+[4, 5, 6, 0]+[7, 8, 9, 10])
notLT = createMatrix(4, 4, \
[1, 0, 0, 0]+[2, 3, 1, 0]+[4, 5, 6, 0]+[7, 8, 9, 10])
assert LT.isLowerTriangularMatrix()
assert not notLT.isLowerTriangularMatrix()
def testIsDiagonalMatrix(self):
diag = createMatrix(2, 2, [-3, 0, 0, 5])
assert diag.isDiagonalMatrix()
def testIsScalarMatrix(self):
scaler = createMatrix(2, 2, [10, 0, 0, 10])
assert scaler.isScalarMatrix()
def testIsSymmetricMatrix(self):
symmetric = createMatrix(2, 2, [2, 3, 3, 5])
assert symmetric.isSymmetricMatrix()
class RingMatrixTest(unittest.TestCase):
def testAdd(self):
sum1 = createMatrix(1, 2, [8, -4])
self.assertEqual(sum1, a1 + a2)
sum2 = createMatrix(2, 2, [1, 1, 4, 2])
self.assertEqual(sum2, b1 + b2)
def testSub(self):
sub1 = createMatrix(1, 2, [-2, 8])
self.assertEqual(sub1, a1 - a2)
sub2 = createMatrix(2, 2, [1, 3, 2, 6])
self.assertEqual(sub2, b1 - b2)
def testMul(self):
mul1 = createMatrix(1, 2, [2, -7])
self.assertEqual(mul1, a1 * b2)
mul2 = createMatrix(3, 2, [-15, -6]+[-2, -2]+[0, 0])
self.assertEqual(mul2, a4 * b1)
mul3 = createMatrix(3, 2, [1, -1]+[109, -64]+[156, -93])
self.assertEqual(mul3, b3 * a4)
def testScalarMul(self):
mul = createMatrix(1, 2, [15, 10])
self.assertEqual(mul, 5 * a1)
def testVectorMul(self):
mul = vector.Vector([9, 19])
self.assertEqual(mul, b1 * v1)
def testMod(self):
mod1 = createMatrix(3, 2, [1, 2]+[0, 1]+[0, 1])
self.assertEqual(mod1, a3 % 3)
def testNeg(self):
neg = createMatrix(2, 2, [0, 1, -1, 2])
self.assertEqual(neg, -b2)
def testHermiteNormalForm(self):
already = createMatrix(4, 3, [1, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 1])
h = already.hermiteNormalForm()
self.assertEqual(h, already)
lessrank = createMatrix(2, 3, [1, 0, 0, 0, 1, 0])
h = lessrank.hermiteNormalForm()
self.assertEqual(h.row, lessrank.row)
self.assertEqual(h.column, lessrank.column)
zerovec = vector.Vector([0, 0])
self.assertEqual(zerovec, h.getColumn(1))
square = createMatrix(3, 3, [1, 0, 0, 0, 1, 1, 0, 1, 1])
h = square.hermiteNormalForm()
self.assertEqual(h.row, square.row)
self.assertEqual(h.column, square.column)
hermite = createMatrix(3, 3, [0, 1, 0, 0 ,0, 1, 0, 0, 1])
self.assertEqual(hermite, h)
def testExtHermiteNormalForm(self):
already = createMatrix(4, 3, [1, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 1])
U_1, h_1 = already.exthermiteNormalForm()
self.assertEqual(h_1, already)
self.assertEqual(already * U_1, h_1)
lessrank = createMatrix(2, 3, [1, 0, 0, 0, 1, 0])
U_2, h_2 = lessrank.exthermiteNormalForm()
self.assertEqual(h_2.row, lessrank.row)
self.assertEqual(h_2.column, lessrank.column)
self.assertEqual(lessrank * U_2, h_2)
def testKernelAsModule(self):
ker_1 = a1.kernelAsModule()
self.assertEqual(a1 * ker_1[1], vector.Vector([0]))
#zero test
ker_2 = b1.kernelAsModule()
self.assertEqual(ker_2, None)
class RingSquareMatrixTest(unittest.TestCase):
def testPow(self):
pow1 = createMatrix(2, 2, [7, 10, 15, 22])
self.assertEqual(pow1, b1 ** 2)
pow2 = createMatrix(2, 2, [1, 0, 0, 1])
self.assertEqual(pow2, b2 ** 0)
def testIsOrthogonalMatrix(self):
orthogonal = createMatrix(2, 2, [Ra(3, 5), Ra(4, 5), Ra(-4, 5), Ra(3, 5)])
assert orthogonal.isOrthogonalMatrix()
def testIsAlternatingMatrix(self):
alternate1 = createMatrix(2, 2, [0, 2, -2, 0])
assert alternate1.isAlternatingMatrix()
alternate2 = createMatrix(2, [1, 2, -2, 0])
assert not alternate2.isAntisymmetricMatrix()
def testIsSingular(self):
assert b6.isSingular()
def testTrace(self):
self.assertEqual(15, b4.trace())
def testDeterminant(self):
self.assertEqual(-2, b1.determinant())
#sf.bug #1914349
self.assertTrue(isinstance(b3.determinant(), int))
self.assertEqual(36, b3.determinant())
def testCofactor(self):
self.assertEqual(-6, b5.cofactor(1, 2))
def testCommutator(self):
commutator = createMatrix(2, 2, [5, -1, 9, -5])
self.assertEqual(commutator, b1.commutator(b2))
def testCharacteristicMatrix(self):
charMat = createMatrix(2, 2, \
[Poly({0:-1,1:1}, Int), Poly({0:-2}, Int)]+[Poly({0:-3}, Int), Poly({0:-4,1:1}, Int)])
self.assertEqual(charMat, b1.characteristicMatrix())
def testCharacteristicPolynomial(self):
assert d1.characteristicPolynomial() == d1.characteristicMatrix().determinant()
def testAdjugateMatrix(self):
adjugate = createMatrix(3, 3, [47, -15, -19, -14, -12, 2, -35, 13, 5])
self.assertEqual(adjugate, b4.adjugateMatrix())
assert d1 * d1.adjugateMatrix() == d1.determinant() * unitMatrix(d1.row)
def testCofactorMatrix(self):
cofact = d5.cofactorMatrix()
self.assertEqual(d5.cofactor(2, 3), cofact[2, 3])
def testSmithNormalForm(self):
self.assertEqual([12, 1, 1], b5.smithNormalForm())
self.assertRaises(ValueError, b6.smithNormalForm)
self.assertEqual([1, 1, 1], b7.smithNormalForm())
self.assertEqual([9, 3, 1], b8.smithNormalForm())
def testExtSmithNormalForm(self):
smith1 = Matrix(3, 3, [12, 0, 0, 0, 1, 0, 0, 0, 1])
U_1, V_1, M_1 = b5.extsmithNormalForm()
self.assertEqual(smith1, M_1)
self.assertEqual(M_1, U_1 * b5 * V_1)
smith2 = Matrix(3, 3, [9, 0, 0, 0, 3, 0, 0, 0, 1])
U_2, V_2, M_2 = b8.extsmithNormalForm()
self.assertEqual(smith2, M_2)
self.assertEqual(M_2, U_2 * b8 * V_2)
class FieldMatrixTest(unittest.TestCase):
def testDiv(self):
div = createMatrix(1, 2, [1, Ra(2, 3)])
self.assertEqual(div, c1 / 3)
def testKernel(self):
ker = c2.kernel()
self.assertTrue(not c2 * ker)
def testImage(self):
img = createMatrix(4,3,[1,2,-1]+[5,12,-2]+[1,3,-1]+[1,2,0])
self.assertEqual(img, c2.image())
def testRank(self):
self.assertEqual(3, c2.rank())
self.assertEqual(3, d3.rank())
def testInverseImage(self):
self.assertEqual(d6, d5 * d5.inverseImage(d6))
self.assertRaises(NoInverseImage, d2.inverseImage, unitMatrix(3))
def testSolve(self):
for i in range(1, d6.column+1):
self.assertEqual(d6[i], d5 * d5.solve(d6[i])[0])
sol1 = c1.solve(v2)
for i in range(len(sol1[1])):
self.assertEqual(v2, c1 * (sol1[0]+sol1[1][i]))
self.assertRaises(NoInverseImage, c3.solve, v3)
def testColumnEchelonForm(self):
echelon = createMatrix(4, 5,\
[Ra(0), 0, 1, 0, 0]+[0, 0, 0, 2, 3]+[0, 0, 0, 1, 0]+[0, 0, 0, 0, 1])
self.assertEqual(echelon, c2.columnEchelonForm())
class FieldSquareMatrixTest(unittest.TestCase):
def testPow(self):
pow3 = createMatrix(2, 2, [Ra(11, 2), Ra(-5, 2), Ra(-15, 4), Ra(7, 4)])
self.assertEqual(pow3, d1 ** (-2))
def testTriangulate(self):
triangle = createMatrix(3, 3, \
[Ra(1, 1), 2, 3]+[0, 5, -2]+[0, 0, Ra(-86, 5)])
self.assertEqual(triangle, d3.triangulate())
def testDeterminant(self):
self.assertEqual(Ra(-7, 15), d7.determinant())
def testInverse(self):
cinverse = createMatrix(3, 3)
cinverse.set([Ra(-47, 86), Ra(15, 86), Ra(19, 86)]+\
[Ra(7, 43), Ra(6, 43), Ra(-1, 43)]+[Ra(35, 86), Ra(-13, 86), Ra(-5, 86)])
self.assertEqual(cinverse, d3.inverse())
self.assertRaises(NoInverse, d2.inverse)
self.assertEqual(d3.inverse() * c3, d3.inverse(c3))
def testInverseNoChange(self):
# sf bug#1849220
M1 = SquareMatrix(2, 2, [Ra(1, 2), Ra(1, 2), Ra(1, 1), Ra(-3, 2)])
M1.inverse()
M2 = SquareMatrix(2, 2, [Ra(1, 2), Ra(1, 2), Ra(1, 1), Ra(-3, 2)])
self.assertEqual(M2, M1)
def testHessenbergForm(self):
pass
def testLUDecomposition(self):
L, U = d4.LUDecomposition()
assert L * U == d4
assert L.isLowerTriangularMatrix()
assert U.isUpperTriangularMatrix()
class MatrixRingTest (unittest.TestCase):
def setUp(self):
self.m2z = MatrixRing.getInstance(2, Int)
def testZero(self):
z = self.m2z.zero
self.assertEqual(0, z[1, 1])
self.assertEqual(0, z[1, 2])
self.assertEqual(0, z[2, 1])
self.assertEqual(0, z[2, 2])
def testOne(self):
o = self.m2z.one
self.assertEqual(1, o[1, 1])
self.assertEqual(0, o[1, 2])
self.assertEqual(0, o[2, 1])
self.assertEqual(1, o[2, 2])
def testUnitMatrix(self):
"""
unitMatrix() is an alias of one.
"""
self.assertEqual(self.m2z.one, self.m2z.unitMatrix())
def testRingAPI(self):
m3z = MatrixRing.getInstance(3, Int)
m2q = MatrixRing.getInstance(2, rational.theRationalField)
# issubring
self.assertFalse(self.m2z.issubring(Int))
self.assertTrue(self.m2z.issubring(self.m2z))
self.assertTrue(self.m2z.issubring(m2q))
self.assertFalse(self.m2z.issubring(m3z))
# issuperring
self.assertFalse(self.m2z.issuperring(Int))
self.assertTrue(self.m2z.issuperring(self.m2z))
self.assertFalse(self.m2z.issuperring(m2q))
self.assertFalse(self.m2z.issuperring(m3z))
# getCommonSuperring
self.assertRaises(TypeError, self.m2z.getCommonSuperring, Int)
class SubspaceTest(unittest.TestCase):
def testSupplementBasis(self):
ba = Subspace(3, 2, [1, 2, 3, 4, 5, 7])
supbase = createMatrix(3, 3, [1, 2, 0, 3, 4, 0, 5, 7, 1])
self.assertEqual(supbase, ba.supplementBasis())
def testSumOfSubspaces(self):
unit1 = Subspace(3, 1, [1, 0, 0])
unit2 = Subspace(3, 2, [0, 0]+[1, 0]+[0, 1])
self.assertEqual(unitMatrix(3), unit1.sumOfSubspaces(unit2))
def testIntersectionOfSubspace(self):
unit1 = Subspace(3, 2, [1, 0]+[0, 1]+[0, 0])
unit2 = unitMatrix(3)
unit2.toSubspace()
intersect = Subspace(3, 2, [-1, 0]+[0, -1]+[0, 0])
self.assertEqual(intersect, unit1.intersectionOfSubspaces(unit2))
class FunctionTest(unittest.TestCase):
def testCreateMatrix(self):
Q = rational.theRationalField
mat1 = createMatrix(2, 3, [[2,3,4], [5,6,7]])
self.assertEqual(mat1.coeff_ring, Int)
mat2 = createMatrix(2, 3, [[2,3,4], [5,6,7]], Q)
self.assertEqual(mat2.coeff_ring, Q)
mat3 = createMatrix(3, [(1, 2, 3), (4, 5, 6), (7, 8, 9)], Q)
self.assertTrue(mat3.row == mat3.column)
self.assertTrue(mat3.__class__, FieldSquareMatrix)
mat4 = createMatrix(2, [vector.Vector([1, 4]), vector.Vector([6, 8])])
self.assertEqual(mat4.coeff_ring, Int)
mat5 = createMatrix(5, 6, Int)
self.assertTrue(mat5 == 0)
mat6 = createMatrix(1, 4)
self.assertTrue(mat6 == 0)
mat7 = createMatrix(3, Q)
self.assertTrue(mat7.row == mat7.column)
self.assertTrue(mat7 == 0)
self.assertEqual(mat7.coeff_ring, Q)
mat8 = createMatrix(7)
self.assertTrue(mat8 == 0)
def suite(suffix="Test"):
suite = unittest.TestSuite()
all_names = globals()
for name in all_names:
if name.endswith(suffix):
suite.addTest(unittest.makeSuite(all_names[name], "test"))
return suite
if __name__ == '__main__':
runner = unittest.TextTestRunner()
runner.run(suite())
| 1.484375 | 1 |
Code/extract_method3.py | AbdullahNoori/CS-2.1-Trees-Sorting | 0 | 1567 | # Written by <NAME>
# Example for Compose Methods: Extract Method.
import math
def get_distance(xc1=5, xc2=7.25, yc1=22, yc2=-4.84):
# Calculate the distance between the two circle
return math.sqrt((xc1-xc2)**2 + (yc1 - yc2)**2)
print('distance', get_distance())
# *** somewhere else in your program ***
def get_length(xa=-50, ya=99, xb=.67, yb=.26):
# calcualte the length of vector AB vector which is a vector between A and B points.
return math.sqrt((xa-xb)*(xa-xb) + (ya-yb)*(ya-yb))
print('length', get_length())
| 2.96875 | 3 |
stage/configuration/test_amazon_s3_origin.py | Sentienz/datacollector-tests | 0 | 1607 | import logging
import pytest
from streamsets.testframework.markers import aws, sdc_min_version
from streamsets.testframework.utils import get_random_string
logger = logging.getLogger(__name__)
S3_SANDBOX_PREFIX = 'sandbox'
LOG_FIELD_MAPPING = [{'fieldPath': '/date', 'group': 1},
{'fieldPath': '/time', 'group': 2},
{'fieldPath': '/timehalf', 'group': 3},
{'fieldPath': '/info', 'group': 4},
{'fieldPath': '/file', 'group': 5},
{'fieldPath': '/message', 'group': 6}]
REGULAR_EXPRESSION = r'(\S+) (\S+) (\S+) (\S+) (\S+) (.*)'
# log to be written int the file on s3
data_format_content = {
'COMMON_LOG_FORMAT': '127.0.0.1 - frank [10/Oct/2000:13:55:36 -0700] '
'"GET /apache.gif HTTP/1.0" 200 232',
'LOG4J': '200 [main] DEBUG org.StreamSets.Log4j unknown - This is sample log message',
'APACHE_ERROR_LOG_FORMAT': '[Wed Oct 11 14:32:52 2000] [error] [client 127.0.0.1] client '
'denied by server configuration:/export/home/live/ap/htdocs/test',
'COMBINED_LOG_FORMAT': '127.0.0.1 - frank [10/Oct/2000:13:55:36 -0700] "GET /apache.gif'
' HTTP/1.0" 200 2326 "http://www.example.com/strt.html" "Mozilla/4.08'
' [en] (Win98; I ;Nav)"',
'APACHE_CUSTOM_LOG_FORMAT': '10.185.248.71 - - [09/Jan/2015:9:12:06 +0000] "GET '
'/inventoryServic/inventory/purchaseItem?userId=20253471&itemId=23434300 '
'HTTP/1.1" 500 17 ',
'CEF': '10.217.31.247 CEF:0|Citrix|NetScaler|NS10.0|APPFW|APPFW_STARTURL|6|src=10.217.253.78 '
'spt=53743 method=GET request=http://vpx247.example.net/FFC/login.html msg=Disallow Illegal URL.',
'LEEF': 'LEEF: 2.0|Trend Micro|Deep Security Agent|<DSA version>|4000030|cat=Anti-Malware '
'name=HEU_AEGIS_CRYPT desc=HEU_AEGIS_CRYPT sev=6 cn1=241 msg=Realtime',
'REGEX': '2019-04-30 08:23:53 AM [INFO] [streamsets.sdk.sdc_api] Pipeline Filewriterpipeline53'}
# data to verify the output of amazon s3 origin.
get_data_to_verify_output = {
'LOG4J': {'severity': 'DEBUG', 'relativetime': '200', 'thread': 'main', 'category': 'org.StreamSets.Log4j',
'ndc': 'unknown', 'message': 'This is sample log message'},
'COMMON_LOG_FORMAT': {'request': '/apache.gif', 'auth': 'frank', 'ident': '-', 'response': '200', 'bytes':
'232', 'clientip': '127.0.0.1', 'verb': 'GET', 'httpversion': '1.0', 'rawrequest': None,
'timestamp': '10/Oct/2000:13:55:36 -0700'},
'APACHE_ERROR_LOG_FORMAT': {'message': 'client denied by server configuration:/export/home/live/ap/htdocs/'
'test', 'timestamp': 'Wed Oct 11 14:32:52 2000', 'loglevel': 'error',
'clientip': '127.0.0.1'},
'COMBINED_LOG_FORMAT': {'request': '/apache.gif', 'agent': '"Mozilla/4.08 [en] (Win98; I ;Nav)"', 'auth':
'frank', 'ident': '-', 'verb': 'GET', 'referrer': '"http://www.example.com/strt.'
'html"', 'response': '200', 'bytes': '2326', 'clientip': '127.0.0.1',
'httpversion': '1.0', 'rawrequest': None, 'timestamp': '10/Oct/2000:13:55:36 -0700'},
'APACHE_CUSTOM_LOG_FORMAT': {'remoteUser': '-', 'requestTime': '09/Jan/2015:9:12:06 +0000', 'request': 'GET '
'/inventoryServic/inventory/purchaseItem?userId=20253471&itemId=23434300 HTTP/1.1',
'logName': '-', 'remoteHost': '10.185.248.71', 'bytesSent': '17', 'status': '500'},
'CEF': {'severity': '6', 'product': 'NetScaler', 'extensions': {'msg': 'Disallow Illegal URL.', 'request':
'http://vpx247.example.net/FFC/login.html', 'method': 'GET', 'src': '10.217.253.78', 'spt': '53743'},
'signature': 'APPFW', 'vendor': 'Citrix', 'cefVersion': 0, 'name': 'APPFW_STARTURL',
'version': 'NS10.0'},
'GROK': {'request': '/inventoryServic/inventory/purchaseItem?userId=20253471&itemId=23434300', 'auth': '-',
'ident': '-', 'response': '500', 'bytes': '17', 'clientip': '10.185.248.71', 'verb': 'GET',
'httpversion': '1.1', 'rawrequest': None, 'timestamp': '09/Jan/2015:9:12:06 +0000'},
'LEEF': {'eventId': '4000030', 'product': 'Deep Security Agent', 'extensions': {'cat': 'Realtime'},
'leefVersion': 2.0, 'vendor': 'Trend Micro', 'version': '<DSA version>'},
'REGEX': {'/time': '08:23:53', '/date': '2019-04-30', '/timehalf': 'AM',
'/info': '[INFO]', '/message': 'Pipeline Filewriterpipeline53', '/file': '[streamsets.sdk.sdc_api]'}}
@pytest.mark.skip('Not yet implemented')
def test_configuration_access_key_id(sdc_builder, sdc_executor):
pass
@pytest.mark.skip('Not yet implemented')
def test_configuration_bucket(sdc_builder, sdc_executor):
pass
@pytest.mark.skip('Not yet implemented')
def test_configuration_connection_timeout(sdc_builder, sdc_executor):
pass
@pytest.mark.parametrize('task', ['CREATE_NEW_OBJECT'])
@pytest.mark.skip('Not yet implemented')
def test_configuration_content(sdc_builder, sdc_executor, task):
pass
@pytest.mark.parametrize('task', ['COPY_OBJECT'])
@pytest.mark.parametrize('delete_original_object', [False, True])
@pytest.mark.skip('Not yet implemented')
def test_configuration_delete_original_object(sdc_builder, sdc_executor, task, delete_original_object):
pass
@pytest.mark.parametrize('region', ['OTHER'])
@pytest.mark.skip('Not yet implemented')
def test_configuration_endpoint(sdc_builder, sdc_executor, region):
pass
@pytest.mark.parametrize('task', ['COPY_OBJECT'])
@pytest.mark.skip('Not yet implemented')
def test_configuration_new_object_path(sdc_builder, sdc_executor, task):
pass
@pytest.mark.skip('Not yet implemented')
def test_configuration_object(sdc_builder, sdc_executor):
pass
@pytest.mark.parametrize('on_record_error', ['DISCARD', 'STOP_PIPELINE', 'TO_ERROR'])
@pytest.mark.skip('Not yet implemented')
def test_configuration_on_record_error(sdc_builder, sdc_executor, on_record_error):
pass
@pytest.mark.skip('Not yet implemented')
def test_configuration_preconditions(sdc_builder, sdc_executor):
pass
@pytest.mark.parametrize('use_proxy', [True])
@pytest.mark.skip('Not yet implemented')
def test_configuration_proxy_host(sdc_builder, sdc_executor, use_proxy):
pass
@pytest.mark.parametrize('use_proxy', [True])
@pytest.mark.skip('Not yet implemented')
def test_configuration_proxy_password(sdc_builder, sdc_executor, use_proxy):
pass
@pytest.mark.parametrize('use_proxy', [True])
@pytest.mark.skip('Not yet implemented')
def test_configuration_proxy_port(sdc_builder, sdc_executor, use_proxy):
pass
@pytest.mark.parametrize('use_proxy', [True])
@pytest.mark.skip('Not yet implemented')
def test_configuration_proxy_user(sdc_builder, sdc_executor, use_proxy):
pass
@pytest.mark.parametrize('region', ['AP_NORTHEAST_1', 'AP_NORTHEAST_2', 'AP_NORTHEAST_3', 'AP_SOUTHEAST_1', 'AP_SOUTHEAST_2', 'AP_SOUTH_1', 'CA_CENTRAL_1', 'CN_NORTHWEST_1', 'CN_NORTH_1', 'EU_CENTRAL_1', 'EU_WEST_1', 'EU_WEST_2', 'EU_WEST_3', 'OTHER', 'SA_EAST_1', 'US_EAST_1', 'US_EAST_2', 'US_GOV_WEST_1', 'US_WEST_1', 'US_WEST_2'])
@pytest.mark.skip('Not yet implemented')
def test_configuration_region(sdc_builder, sdc_executor, region):
pass
@pytest.mark.skip('Not yet implemented')
def test_configuration_required_fields(sdc_builder, sdc_executor):
pass
@pytest.mark.skip('Not yet implemented')
def test_configuration_retry_count(sdc_builder, sdc_executor):
pass
@pytest.mark.skip('Not yet implemented')
def test_configuration_secret_access_key(sdc_builder, sdc_executor):
pass
@pytest.mark.skip('Not yet implemented')
def test_configuration_socket_timeout(sdc_builder, sdc_executor):
pass
@pytest.mark.parametrize('task', ['CHANGE_EXISTING_OBJECT'])
@pytest.mark.skip('Not yet implemented')
def test_configuration_tags(sdc_builder, sdc_executor, task):
pass
@pytest.mark.parametrize('task', ['CHANGE_EXISTING_OBJECT', 'COPY_OBJECT', 'CREATE_NEW_OBJECT'])
@pytest.mark.skip('Not yet implemented')
def test_configuration_task(sdc_builder, sdc_executor, task):
pass
@pytest.mark.parametrize('use_proxy', [False, True])
@pytest.mark.skip('Not yet implemented')
def test_configuration_use_proxy(sdc_builder, sdc_executor, use_proxy):
pass
@aws('s3')
@pytest.mark.parametrize('data_format', ['LOG'])
@pytest.mark.parametrize('log_format', ['COMMON_LOG_FORMAT', 'APACHE_ERROR_LOG_FORMAT', 'COMBINED_LOG_FORMAT',
'APACHE_CUSTOM_LOG_FORMAT', 'REGEX', 'GROK', 'LOG4J', 'CEF', 'LEEF'])
def test_configurations_data_format_log(sdc_executor, sdc_builder, aws, data_format, log_format):
"""Check whether S3 origin can parse different log format or not. A log file is being created in s3 bucket
mentioned below .S3 origin reads the log file and parse the same.
Pipeline for the same-
s3_origin >> trash
s3_origin >= pipeline_finisher_executor
"""
if log_format == 'GROK':
file_content = data_format_content['APACHE_CUSTOM_LOG_FORMAT']
else:
file_content = data_format_content[log_format]
client = aws.s3
s3_key = f'{S3_SANDBOX_PREFIX}/{get_random_string()}'
attributes = {'bucket': aws.s3_bucket_name,
'prefix_pattern': f'{s3_key}/*',
'number_of_threads': 1,
'read_order': 'LEXICOGRAPHICAL',
'data_format': data_format,
'log_format': log_format,
'custom_log_format': '%h %l %u [%t] "%r" %>s %b',
'regular_expression': REGULAR_EXPRESSION,
'field_path_to_regex_group_mapping': LOG_FIELD_MAPPING
}
pipeline = get_aws_origin_to_trash_pipeline(sdc_builder, attributes, aws)
s3_origin = pipeline.origin_stage
try:
client.put_object(Bucket=aws.s3_bucket_name, Key=f'{s3_key}/{get_random_string()}.log', Body=file_content)
output_records = execute_pipeline_and_get_output(sdc_executor, s3_origin, pipeline)
assert output_records[0].field == get_data_to_verify_output[log_format]
finally:
if sdc_executor.get_pipeline_status(pipeline).response.json().get('status') == 'RUNNING':
sdc_executor.stop_pipeline(pipeline)
# cleaning up s3 bucket
delete_aws_objects(client, aws, s3_key)
def get_aws_origin_to_trash_pipeline(sdc_builder, attributes, aws):
# Build pipeline.
builder = sdc_builder.get_pipeline_builder()
builder.add_error_stage('Discard')
s3_origin = builder.add_stage('Amazon S3', type='origin')
s3_origin.set_attributes(**attributes)
trash = builder.add_stage('Trash')
pipeline_finisher_executor = builder.add_stage('Pipeline Finisher Executor')
pipeline_finisher_executor.set_attributes(stage_record_preconditions=["${record:eventType() == 'no-more-data'}"])
s3_origin >> trash
s3_origin >= pipeline_finisher_executor
s3_origin_pipeline = builder.build().configure_for_environment(aws)
s3_origin_pipeline.configuration['shouldRetry'] = False
return s3_origin_pipeline
def delete_aws_objects(client, aws, s3_key):
# Clean up S3.
delete_keys = {'Objects': [{'Key': k['Key']}
for k in
client.list_objects_v2(Bucket=aws.s3_bucket_name, Prefix=s3_key)['Contents']]}
client.delete_objects(Bucket=aws.s3_bucket_name, Delete=delete_keys)
def execute_pipeline_and_get_output(sdc_executor, s3_origin, pipeline):
sdc_executor.add_pipeline(pipeline)
snapshot = sdc_executor.capture_snapshot(pipeline, start_pipeline=True).snapshot
output_records = snapshot[s3_origin].output
return output_records
| 1.351563 | 1 |
stanza/models/common/dropout.py | rasimuvaikas/stanza | 3,633 | 1615 | import torch
import torch.nn as nn
class WordDropout(nn.Module):
""" A word dropout layer that's designed for embedded inputs (e.g., any inputs to an LSTM layer).
Given a batch of embedded inputs, this layer randomly set some of them to be a replacement state.
Note that this layer assumes the last dimension of the input to be the hidden dimension of a unit.
"""
def __init__(self, dropprob):
super().__init__()
self.dropprob = dropprob
def forward(self, x, replacement=None):
if not self.training or self.dropprob == 0:
return x
masksize = [y for y in x.size()]
masksize[-1] = 1
dropmask = torch.rand(*masksize, device=x.device) < self.dropprob
res = x.masked_fill(dropmask, 0)
if replacement is not None:
res = res + dropmask.float() * replacement
return res
def extra_repr(self):
return 'p={}'.format(self.dropprob)
class LockedDropout(nn.Module):
"""
A variant of dropout layer that consistently drops out the same parameters over time. Also known as the variational dropout.
This implementation was modified from the LockedDropout implementation in the flair library (https://github.com/zalandoresearch/flair).
"""
def __init__(self, dropprob, batch_first=True):
super().__init__()
self.dropprob = dropprob
self.batch_first = batch_first
def forward(self, x):
if not self.training or self.dropprob == 0:
return x
if not self.batch_first:
m = x.new_empty(1, x.size(1), x.size(2), requires_grad=False).bernoulli_(1 - self.dropprob)
else:
m = x.new_empty(x.size(0), 1, x.size(2), requires_grad=False).bernoulli_(1 - self.dropprob)
mask = m.div(1 - self.dropprob).expand_as(x)
return mask * x
def extra_repr(self):
return 'p={}'.format(self.dropprob)
class SequenceUnitDropout(nn.Module):
""" A unit dropout layer that's designed for input of sequence units (e.g., word sequence, char sequence, etc.).
Given a sequence of unit indices, this layer randomly set some of them to be a replacement id (usually set to be <UNK>).
"""
def __init__(self, dropprob, replacement_id):
super().__init__()
self.dropprob = dropprob
self.replacement_id = replacement_id
def forward(self, x):
""" :param: x must be a LongTensor of unit indices. """
if not self.training or self.dropprob == 0:
return x
masksize = [y for y in x.size()]
dropmask = torch.rand(*masksize, device=x.device) < self.dropprob
res = x.masked_fill(dropmask, self.replacement_id)
return res
def extra_repr(self):
return 'p={}, replacement_id={}'.format(self.dropprob, self.replacement_id)
| 2.984375 | 3 |
desktop/libs/liboozie/src/liboozie/submittion_tests.py | vinaymundada27/Hue | 1 | 1623 | #!/usr/bin/env python
# Licensed to Cloudera, Inc. under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. Cloudera, Inc. licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
from django.contrib.auth.models import User
from nose.plugins.attrib import attr
from nose.tools import assert_equal, assert_true, assert_not_equal
from hadoop import cluster, pseudo_hdfs4
from hadoop.conf import HDFS_CLUSTERS, MR_CLUSTERS, YARN_CLUSTERS
from liboozie.submittion import Submission
from oozie.tests import OozieMockBase
from desktop.lib.test_utils import clear_sys_caches
from desktop.lib.django_test_util import make_logged_in_client
LOG = logging.getLogger(__name__)
@attr('requires_hadoop')
def test_copy_files():
cluster = pseudo_hdfs4.shared_cluster()
try:
c = make_logged_in_client()
user = User.objects.get(username='test')
prefix = '/tmp/test_copy_files'
if cluster.fs.exists(prefix):
cluster.fs.rmtree(prefix)
# Jars in various locations
deployment_dir = '%s/workspace' % prefix
external_deployment_dir = '%s/deployment' % prefix
jar_1 = '%s/udf1.jar' % prefix
jar_2 = '%s/lib/udf2.jar' % prefix
jar_3 = '%s/udf3.jar' % deployment_dir
jar_4 = '%s/lib/udf4.jar' % deployment_dir # Never move
cluster.fs.mkdir(prefix)
cluster.fs.create(jar_1)
cluster.fs.create(jar_2)
cluster.fs.create(jar_3)
cluster.fs.create(jar_4)
class MockNode():
def __init__(self, jar_path):
self.jar_path = jar_path
class MockJob():
def __init__(self):
self.node_list = [
MockNode(jar_1),
MockNode(jar_2),
MockNode(jar_3),
MockNode(jar_4),
]
def get_application_filename(self):
return 'workflow.xml'
submission = Submission(user, job=MockJob(), fs=cluster.fs, jt=cluster.jt)
submission._copy_files(deployment_dir, "<xml>My XML</xml>")
submission._copy_files(external_deployment_dir, "<xml>My XML</xml>")
# All sources still there
assert_true(cluster.fs.exists(jar_1))
assert_true(cluster.fs.exists(jar_2))
assert_true(cluster.fs.exists(jar_3))
assert_true(cluster.fs.exists(jar_4))
deployment_dir = deployment_dir + '/lib'
external_deployment_dir = external_deployment_dir + '/lib'
list_dir_workspace = cluster.fs.listdir(deployment_dir)
list_dir_deployement = cluster.fs.listdir(external_deployment_dir)
# All destinations there
assert_true(cluster.fs.exists(deployment_dir + '/udf1.jar'), list_dir_workspace)
assert_true(cluster.fs.exists(deployment_dir + '/udf2.jar'), list_dir_workspace)
assert_true(cluster.fs.exists(deployment_dir + '/udf3.jar'), list_dir_workspace)
assert_true(cluster.fs.exists(deployment_dir + '/udf4.jar'), list_dir_workspace)
assert_true(cluster.fs.exists(external_deployment_dir + '/udf1.jar'), list_dir_deployement)
assert_true(cluster.fs.exists(external_deployment_dir + '/udf2.jar'), list_dir_deployement)
assert_true(cluster.fs.exists(external_deployment_dir + '/udf3.jar'), list_dir_deployement)
assert_true(cluster.fs.exists(external_deployment_dir + '/udf4.jar'), list_dir_deployement)
stats_udf1 = cluster.fs.stats(deployment_dir + '/udf1.jar')
stats_udf2 = cluster.fs.stats(deployment_dir + '/udf2.jar')
stats_udf3 = cluster.fs.stats(deployment_dir + '/udf3.jar')
stats_udf4 = cluster.fs.stats(deployment_dir + '/udf4.jar')
submission._copy_files('%s/workspace' % prefix, "<xml>My XML</xml>")
assert_not_equal(stats_udf1['fileId'], cluster.fs.stats(deployment_dir + '/udf1.jar')['fileId'])
assert_not_equal(stats_udf2['fileId'], cluster.fs.stats(deployment_dir + '/udf2.jar')['fileId'])
assert_not_equal(stats_udf3['fileId'], cluster.fs.stats(deployment_dir + '/udf3.jar')['fileId'])
assert_equal(stats_udf4['fileId'], cluster.fs.stats(deployment_dir + '/udf4.jar')['fileId'])
finally:
try:
cluster.fs.rmtree(prefix)
except:
LOG.exception('failed to remove %s' % prefix)
class MockFs():
def __init__(self, logical_name=None):
self.fs_defaultfs = 'hdfs://curacao:8020'
self.logical_name = logical_name if logical_name else ''
class MockJt():
def __init__(self, logical_name=None):
self.logical_name = logical_name if logical_name else ''
class TestSubmission(OozieMockBase):
def test_get_properties(self):
submission = Submission(self.user, fs=MockFs())
assert_equal({}, submission.properties)
submission._update_properties('curacao:8032', '/deployment_dir')
assert_equal({
'jobTracker': 'curacao:8032',
'nameNode': 'hdfs://curacao:8020'
}, submission.properties)
def test_get_logical_properties(self):
submission = Submission(self.user, fs=MockFs(logical_name='fsname'), jt=MockJt(logical_name='jtname'))
assert_equal({}, submission.properties)
submission._update_properties('curacao:8032', '/deployment_dir')
assert_equal({
'jobTracker': 'jtname',
'nameNode': 'fsname'
}, submission.properties)
def test_update_properties(self):
finish = []
finish.append(MR_CLUSTERS.set_for_testing({'default': {}}))
finish.append(MR_CLUSTERS['default'].SUBMIT_TO.set_for_testing(True))
finish.append(YARN_CLUSTERS.set_for_testing({'default': {}}))
finish.append(YARN_CLUSTERS['default'].SUBMIT_TO.set_for_testing(True))
try:
properties = {
'user.name': 'hue',
'test.1': 'http://localhost/test?test1=test&test2=test',
'nameNode': 'hdfs://curacao:8020',
'jobTracker': 'jtaddress'
}
final_properties = properties.copy()
submission = Submission(None, properties=properties, oozie_id='test', fs=MockFs())
assert_equal(properties, submission.properties)
submission._update_properties('jtaddress', 'deployment-directory')
assert_equal(final_properties, submission.properties)
clear_sys_caches()
fs = cluster.get_hdfs()
jt = cluster.get_next_ha_mrcluster()[1]
final_properties = properties.copy()
final_properties.update({
'jobTracker': 'jtaddress',
'nameNode': fs.fs_defaultfs
})
submission = Submission(None, properties=properties, oozie_id='test', fs=fs, jt=jt)
assert_equal(properties, submission.properties)
submission._update_properties('jtaddress', 'deployment-directory')
assert_equal(final_properties, submission.properties)
finish.append(HDFS_CLUSTERS['default'].LOGICAL_NAME.set_for_testing('namenode'))
finish.append(MR_CLUSTERS['default'].LOGICAL_NAME.set_for_testing('jobtracker'))
clear_sys_caches()
fs = cluster.get_hdfs()
jt = cluster.get_next_ha_mrcluster()[1]
final_properties = properties.copy()
final_properties.update({
'jobTracker': 'jobtracker',
'nameNode': 'namenode'
})
submission = Submission(None, properties=properties, oozie_id='test', fs=fs, jt=jt)
assert_equal(properties, submission.properties)
submission._update_properties('jtaddress', 'deployment-directory')
assert_equal(final_properties, submission.properties)
finally:
clear_sys_caches()
for reset in finish:
reset()
def test_get_external_parameters(self):
xml = """
<workflow-app name="Pig" xmlns="uri:oozie:workflow:0.4">
<start to="Pig"/>
<action name="Pig">
<pig>
<job-tracker>${jobTracker}</job-tracker>
<name-node>${nameNode}</name-node>
<prepare>
<delete path="${output}"/>
</prepare>
<script>aggregate.pig</script>
<argument>-param</argument>
<argument>INPUT=${input}</argument>
<argument>-param</argument>
<argument>OUTPUT=${output}</argument>
<configuration>
<property>
<name>mapred.input.format.class</name>
<value>org.apache.hadoop.examples.SleepJob$SleepInputFormat</value>
</property>
</configuration>
</pig>
<ok to="end"/>
<error to="kill"/>
</action>
<kill name="kill">
<message>Action failed, error message[${wf:errorMessage(wf:lastErrorNode())}]</message>
</kill>
<end name="end"/>
</workflow-app>
"""
properties = """
#
# Licensed to the Hue
#
nameNode=hdfs://localhost:8020
jobTracker=localhost:8021
queueName=default
examplesRoot=examples
oozie.use.system.libpath=true
oozie.wf.application.path=${nameNode}/user/${user.name}/${examplesRoot}/apps/pig
"""
parameters = Submission(self.user)._get_external_parameters(xml, properties)
assert_equal({'oozie.use.system.libpath': 'true',
'input': '',
'jobTracker': 'localhost:8021',
'oozie.wf.application.path': '${nameNode}/user/${user.name}/${examplesRoot}/apps/pig',
'examplesRoot': 'examples',
'output': '',
'nameNode': 'hdfs://localhost:8020',
'queueName': 'default'
},
parameters)
| 1.21875 | 1 |
k2/python/host/k2host/properties.py | Jarvan-Wang/k2 | 144 | 1631 | # Copyright (c) 2020 Xiaomi Corporation (author: <NAME>)
# See ../../../LICENSE for clarification regarding multiple authors
import torch
from torch.utils.dlpack import to_dlpack
from .fsa import Fsa
from _k2host import _is_valid
from _k2host import _is_top_sorted
from _k2host import _is_arc_sorted
from _k2host import _has_self_loops
from _k2host import _is_acyclic
from _k2host import _is_deterministic
from _k2host import _is_epsilon_free
from _k2host import _is_connected
from _k2host import _is_empty
def is_valid(fsa: Fsa) -> bool:
return _is_valid(fsa.get_base())
def is_top_sorted(fsa: Fsa) -> bool:
return _is_top_sorted(fsa.get_base())
def is_arc_sorted(fsa: Fsa) -> bool:
return _is_arc_sorted(fsa.get_base())
def has_self_loops(fsa: Fsa) -> bool:
return _has_self_loops(fsa.get_base())
def is_acyclic(fsa: Fsa) -> bool:
return _is_acyclic(fsa.get_base())
def is_deterministic(fsa: Fsa) -> bool:
return _is_deterministic(fsa.get_base())
def is_epsilon_free(fsa: Fsa) -> bool:
return _is_epsilon_free(fsa.get_base())
def is_connected(fsa: Fsa) -> bool:
return _is_connected(fsa.get_base())
def is_empty(fsa: Fsa) -> bool:
return _is_empty(fsa.get_base())
| 1.585938 | 2 |
leetcode/1672 Richest Customer Wealth.py | jaredliw/python-question-bank | 1 | 1647 | class Solution(object):
def maximumWealth(self, accounts):
"""
:type accounts: List[List[int]]
:rtype: int
"""
# Runtime: 36 ms
# Memory: 13.5 MB
return max(map(sum, accounts))
| 1.898438 | 2 |
amazon/goods_review_thread.py | JoanLee0826/amazon | 5 | 1655 | import pandas as pd
import requests
from lxml import etree
import re, time, random, datetime
from queue import Queue
import threading
class Review:
headers = {
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 \
(KHTML, like Gecko) Chrome/69.0.3497.81 Safari/537.36"
}
proxies = {
"http": "http://172.16.17.32:9999",
}
def __init__(self, domain):
self.view_list = []
self.page_list = []
self.url_queue = Queue()
if domain.strip().lower() == 'jp':
self.row_url = "https://www.amazon.co.jp"
elif domain.strip().lower == 'com':
self.row_url = "https://www.amazon.com"
self.s = requests.Session()
self.s.get(url=self.row_url, headers=self.headers, proxies=self.proxies)
def get_review(self, url):
res = self.s.get(url, headers=self.headers, proxies=self.proxies)
if res.status_code != 200:
print("请求出错,状态码为:%s" % res.status_code)
print(res.text)
return
res_html = etree.HTML(res.text)
# 商品评价名称
view_goods = res_html.xpath('//span[@class="a-list-item"]/a/text()')[0]
# 商品评价容器
view_con = res_html.xpath('//div[@class="a-section review aok-relative"]')
for each_view in view_con:
# 评价人
view_name = each_view.xpath('.//span[@class="a-profile-name"]/text()')[0]
view_star_raw = each_view.xpath('.//div[@class="a-row"]/a[@class="a-link-normal"]/@title')[0]
# 评价星级
view_star = view_star_raw.split(' ')[0]
# 评价title
view_title = each_view.xpath('.//a[@data-hook="review-title"]/span/text()')[0]
# 评价日期
view_date = each_view.xpath('.//span[@data-hook="review-date"]/text()')[0]
view_format = each_view.xpath('.//a[@data-hook="format-strip"]/text()')
view_colour = None
view_size = None
try:
for each in view_format:
if re.search("color|colour|色", each, re.I):
view_colour = each.split(':')[1].strip()
if re.search("size|style|サイズ", each, re.I):
view_size = each.split(":")[1].strip()
except:
pass
# 评价内容
view_body = each_view.xpath('string(.//span[@data-hook="review-body"]/span)')
# 评价有用数量
try:
view_useful_raw = each_view.xpath('.//span[@data-hook="helpful-vote-statement"]/text()')[0]
view_useful = view_useful_raw.split(' ')[0]
if view_useful == 'one':
view_useful = 1
try:
view_useful = int(view_useful)
except:
pass
except:
view_useful = 0
# 商品的评价信息表
each_view_list = [view_goods, view_name, view_star, view_title, view_date, view_colour, view_size,
view_body, view_useful]
self.view_list.append(each_view_list)
# print(self.view_list[-1])
def run(self, data):
goods_data = pd.read_excel(data, encoding='utf-8')
base_url = self.row_url + "/product-reviews/"
# goods_data.drop_duplicates(subset=['r','评价数量'],inplace=True)
for each_asin, each_count in zip(goods_data['ASIN'][5:50], goods_data['goods_review_count'][5:50]):
if each_asin and int(each_count) > 0:
if int(each_count) % 10 == 0:
end_page = int(each_count) // 10 + 1
else:
end_page = int(each_count) // 10 + 2
for page in range(1, end_page):
if page == 1:
url = base_url + each_asin
else:
url = base_url + each_asin + '?pageNumber=' + str(page)
self.url_queue.put(url)
print("review_page_%d" % page, url)
time.sleep(1.5)
while True:
try:
review_threads = [threading.Thread(target=self.get_review, args=(self.url_queue.get(),))
for m in range(30) if not self.url_queue.empty()]
for each in review_threads:
each.start()
print("队列剩余数量", self.url_queue.qsize())
for each in review_threads:
each.join()
except:
print("请求链接出错,重试中...")
pass
time.sleep(random.uniform(0.5,2.1))
if self.url_queue.empty():
break
view_goods_pd = pd.DataFrame(self.view_list,
columns=['review_goods', 'review_name', 'review_star', 'review_title',
'review_date', 'review_colour', 'review_size', 'review_body',
'review_useful'])
view_goods_pd.drop_duplicates(subset=['review_name', 'review_date','review_body'], inplace=True)
aft = datetime.datetime.now().strftime('%m%d%H%M')
file_name = r'../data/goods_review/' + "reviews_" + aft + ".xlsx"
view_goods_pd.to_excel(file_name, encoding='utf-8', engine='xlsxwriter')
print("共获取评论数量:", len(self.view_list))
if __name__ == '__main__':
data = r"../data/category/Kid's Weighted Blankets_08_28_13_22.xlsx"
review = Review(domain='com')
review.run(data=data)
| 1.59375 | 2 |
api/api/pokemon/views.py | farnswj1/PokemonAPI | 0 | 1671 | from django.utils.decorators import method_decorator
from django.views.decorators.cache import cache_page
from rest_framework.generics import (
ListAPIView,
RetrieveAPIView,
CreateAPIView,
UpdateAPIView,
DestroyAPIView
)
from .models import Pokemon
from .serializers import PokemonSerializer
from .filters import PokemonFilterSet
# Create your views here.
class PokemonListAPIView(ListAPIView):
queryset = Pokemon.objects.all()
serializer_class = PokemonSerializer
filterset_class = PokemonFilterSet
@method_decorator(cache_page(7200))
def get(self, request, *args, **kwargs):
return super().get(request, *args, **kwargs)
class PokemonDetailAPIView(RetrieveAPIView):
queryset = Pokemon.objects.all()
serializer_class = PokemonSerializer
@method_decorator(cache_page(7200))
def get(self, request, *args, **kwargs):
return super().get(request, *args, **kwargs)
class PokemonCreateAPIView(CreateAPIView):
queryset = Pokemon.objects.all()
serializer_class = PokemonSerializer
class PokemonUpdateAPIView(UpdateAPIView):
queryset = Pokemon.objects.all()
serializer_class = PokemonSerializer
class PokemonDeleteAPIView(DestroyAPIView):
queryset = Pokemon.objects.all()
serializer_class = PokemonSerializer
| 1.390625 | 1 |
Libraries/Python/wxGlade/v0.9,5/wxGlade-0.9.5-py3.6.egg/wxglade/bugdialog.py | davidbrownell/Common_EnvironmentEx | 0 | 1679 | """\
Dialog to show details of internal errors.
@copyright: 2014-2016 <NAME>
@copyright: 2017 <NAME>
@license: MIT (see LICENSE.txt) - THIS PROGRAM COMES WITH NO WARRANTY
"""
import bugdialog_ui
import config
import log
import logging
import sys
import wx
class BugReport(bugdialog_ui.UIBugDialog):
"Dialog to show details of internal errors"
_disabled = False # Flag to prevent dialog popups during test runs.
def __init__(self):
self._disabled = getattr(sys, '_called_from_test', False)
bugdialog_ui.UIBugDialog.__init__(self, None, -1, "")
def SetContent(self, msg, exc):
"""Prepare given exception information and show it as dialog content.
msg: Short description of the action that has raised this error
exc: Caught exception (Exception instance)
see: SetContentEI()"""
if self._disabled:
return
exc_type = exc.__class__.__name__
exc_msg = str(exc)
header = self.st_header.GetLabel() % {'action': msg}
log.exception_orig(header)
self._fill_dialog(exc_msg, exc_type, header)
def SetContentEI(self, exc_type, exc_value, exc_tb, msg=_('An internal error occurred')):
"""Format given exception and add details to dialog.
exc_type: Exception type
exc_value: Exception value
exc_tb: Exception traceback
msg: Short description of the exception
see: SetContent()"""
if self._disabled:
return
# don't use exception() because it overwrites exc_info with 1
logging.error(msg, exc_info=(exc_type, exc_value, exc_tb))
self._fill_dialog(msg, exc_type, _('An internal error occurred'))
def _fill_dialog(self, exc_msg, exc_type, header):
"""Fill the bug dialog
exc_msg: Short exception summary
exc_type: Exception type as string
header: Initial message
see: L{SetContent(), SetContentEI()"""
details = log.getBufferAsString()
if not exc_msg:
exc_msg = _('No summary available')
summary = self.st_summary.GetLabel() % { 'exc_type':exc_type, 'exc_msg':exc_msg }
self.st_header.SetLabel(header)
self.st_summary.SetLabel(summary)
self.tc_details.SetValue(details)
howto = self.tc_howto_report.GetValue()
howto = howto % {'log_file': config.log_file}
self.tc_howto_report.SetValue(howto)
def OnCopy(self, event):
"Copy the dialog content to the clipboard"
text = self.tc_details.GetValue()
if not text:
return
data = wx.TextDataObject(text)
if wx.TheClipboard.Open():
wx.TheClipboard.SetData(data)
wx.TheClipboard.Close()
else:
wx.MessageBox("Unable to open the clipboard", "Error")
def ShowModal(self, **kwargs):
if getattr(sys, '_called_from_test', False):
return wx.ID_OK
super(BugReport, self).ShowModal(**kwargs)
def Show(msg, exc):
"""Wrapper for creating a L{BugReport} dialog and show the details of the given exception instance.
msg: Short description of the action that has raised this error
exc: Caught exception
see ShowEI(), BugReport.SetContent()"""
dialog = BugReport()
dialog.SetContent(msg, exc)
dialog.ShowModal()
dialog.Destroy()
def ShowEI(exc_type, exc_value, exc_tb, msg=None):
"""Wrapper for creating a L{BugReport} dialog and show the given exception details.
exc_type: Exception type
exc_value: Exception value
exc_tb: Exception traceback
msg: Short description of the exception
see: L{Show(), BugReport.SetContent()"""
dialog = BugReport()
dialog.SetContentEI(exc_type, exc_value, exc_tb, msg)
dialog.ShowModal()
dialog.Destroy()
def ShowEnvironmentError(msg, inst):
"""Show EnvironmentError exceptions detailed and user-friendly
msg: Error message
inst: The caught exception"""
details = {'msg':msg, 'type':inst.__class__.__name__}
if inst.filename:
details['filename'] = _('Filename: %s') % inst.filename
if inst.errno is not None and inst.strerror is not None:
details['error'] = '%s - %s' % (inst.errno, inst.strerror)
else:
details['error'] = str(inst.args)
text = _("""%(msg)s
Error type: %(type)s
Error code: %(error)s
%(filename)s""") % details
wx.MessageBox(text, _('Error'), wx.OK | wx.CENTRE | wx.ICON_ERROR)
| 2.09375 | 2 |
language/bert_extraction/steal_bert_classifier/utils/wiki103_sentencize.py | Xtuden-com/language | 1,199 | 1727 | # coding=utf-8
# Copyright 2018 The Google AI Language Team Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python3
"""Sentencize the raw wikitext103."""
import tensorflow.compat.v1 as tf
app = tf.app
flags = tf.flags
gfile = tf.gfile
logging = tf.logging
flags.DEFINE_string("wiki103_raw", None,
"Path to raw wikitext103 train corpus.")
flags.DEFINE_string("output_path", None,
"Path to output the processed dataset.")
FLAGS = flags.FLAGS
def main(_):
with open(FLAGS.wiki103_raw, "r") as f:
data = f.read().strip().split("\n")
data = [x.split(" . ") for x in data if x.strip() and x.strip()[0] != "="]
sentences = []
for para in data:
for sent in para:
sentences.append(sent + ".")
data = "\n".join(sentences)
data = data.replace(" @.@ ", ".").replace(" @-@ ", "-").replace(" ,", ",")
data = data.replace(" \'", "\'").replace(" )", ")").replace("( ", "(")
data = data.replace(" ;", ";")
data = "\n".join([x for x in data.split("\n") if len(x.split()) > 3])
logging.info("length = %d", len(data.split("\n")))
with open(FLAGS.output_path, "w") as f:
f.write(data)
if __name__ == "__main__":
app.run(main)
| 1.734375 | 2 |
ds.py | tobiichiorigami1/csp | 0 | 1735 | votes_t_shape = [3, 0, 1, 2]
for i in range(6 - 4):
votes_t_shape += [i + 4]
print(votes_t_shape)
| 1.304688 | 1 |
helios/tasks.py | mattmurch/helios-server | 0 | 1743 | """
Celery queued tasks for Helios
2010-08-01
<EMAIL>
"""
import copy
from celery import shared_task
from celery.utils.log import get_logger
import signals
from models import CastVote, Election, Voter, VoterFile
from view_utils import render_template_raw
@shared_task
def cast_vote_verify_and_store(cast_vote_id, status_update_message=None, **kwargs):
cast_vote = CastVote.objects.get(id=cast_vote_id)
result = cast_vote.verify_and_store()
voter = cast_vote.voter
election = voter.election
user = voter.get_user()
if result:
# send the signal
signals.vote_cast.send(sender=election, election=election, user=user, voter=voter, cast_vote=cast_vote)
if status_update_message and user.can_update_status():
user.update_status(status_update_message)
else:
logger = get_logger(cast_vote_verify_and_store.__name__)
logger.error("Failed to verify and store %d" % cast_vote_id)
@shared_task
def voters_email(election_id, subject_template, body_template, extra_vars={},
voter_constraints_include=None, voter_constraints_exclude=None):
"""
voter_constraints_include are conditions on including voters
voter_constraints_exclude are conditions on excluding voters
"""
election = Election.objects.get(id=election_id)
# select the right list of voters
voters = election.voter_set.all()
if voter_constraints_include:
voters = voters.filter(**voter_constraints_include)
if voter_constraints_exclude:
voters = voters.exclude(**voter_constraints_exclude)
for voter in voters:
single_voter_email.delay(voter.uuid, subject_template, body_template, extra_vars)
@shared_task
def voters_notify(election_id, notification_template, extra_vars={}):
election = Election.objects.get(id=election_id)
for voter in election.voter_set.all():
single_voter_notify.delay(voter.uuid, notification_template, extra_vars)
@shared_task
def single_voter_email(voter_uuid, subject_template, body_template, extra_vars={}):
voter = Voter.objects.get(uuid=voter_uuid)
the_vars = copy.copy(extra_vars)
the_vars.update({'voter': voter})
subject = render_template_raw(None, subject_template, the_vars)
body = render_template_raw(None, body_template, the_vars)
voter.send_message(subject, body)
@shared_task
def single_voter_notify(voter_uuid, notification_template, extra_vars={}):
voter = Voter.objects.get(uuid=voter_uuid)
the_vars = copy.copy(extra_vars)
the_vars.update({'voter': voter})
notification = render_template_raw(None, notification_template, the_vars)
voter.send_notification(notification)
@shared_task
def election_compute_tally(election_id):
election = Election.objects.get(id=election_id)
election.compute_tally()
election_notify_admin.delay(election_id=election_id,
subject="encrypted tally computed",
body="""
The encrypted tally for election %s has been computed.
--
Helios
""" % election.name)
if election.has_helios_trustee():
tally_helios_decrypt.delay(election_id=election.id)
@shared_task
def tally_helios_decrypt(election_id):
election = Election.objects.get(id=election_id)
election.helios_trustee_decrypt()
election_notify_admin.delay(election_id=election_id,
subject='Helios Decrypt',
body="""
Helios has decrypted its portion of the tally
for election %s.
--
Helios
""" % election.name)
@shared_task
def voter_file_process(voter_file_id):
voter_file = VoterFile.objects.get(id=voter_file_id)
voter_file.process()
election_notify_admin.delay(election_id=voter_file.election.id,
subject='voter file processed',
body="""
Your voter file upload for election %s
has been processed.
%s voters have been created.
--
Helios
""" % (voter_file.election.name, voter_file.num_voters))
@shared_task
def election_notify_admin(election_id, subject, body):
election = Election.objects.get(id=election_id)
election.admin.send_message(subject, body)
| 1.40625 | 1 |
real_plot_fft_stft_impl.py | MuAuan/Scipy-Swan | 0 | 1751 | import pyaudio
import wave
from scipy.fftpack import fft, ifft
import numpy as np
import matplotlib.pyplot as plt
import cv2
from scipy import signal
from swan import pycwt
CHUNK = 1024
FORMAT = pyaudio.paInt16 # int16型
CHANNELS = 1 # 1;monoral 2;ステレオ-
RATE = 22100 # 22.1kHz 44.1kHz
RECORD_SECONDS = 5 # 5秒録音
WAVE_OUTPUT_FILENAME = "output2.wav"
p = pyaudio.PyAudio()
stream = p.open(format=FORMAT,
channels=CHANNELS,
rate=RATE,
input=True,
frames_per_buffer=CHUNK)
s=1
# figureの初期化
fig = plt.figure(figsize=(12, 10))
ax1 = fig.add_subplot(311)
ax2 = fig.add_subplot(312)
ax3 = fig.add_subplot(313)
ax2.axis([0, 5, 200,20000])
ax2.set_yscale('log')
while True:
fig.delaxes(ax1)
fig.delaxes(ax3)
ax1 = fig.add_subplot(311)
ax3 = fig.add_subplot(313)
print("* recording")
frames = []
for i in range(0, int(RATE / CHUNK * RECORD_SECONDS)):
data = stream.read(CHUNK)
frames.append(data)
print("* done recording")
wf = wave.open(WAVE_OUTPUT_FILENAME, 'wb')
wf.setnchannels(CHANNELS)
wf.setsampwidth(p.get_sample_size(FORMAT))
wf.setframerate(RATE)
wf.writeframes(b''.join(frames))
wf.close()
wavfile = WAVE_OUTPUT_FILENAME
wr = wave.open(wavfile, "rb")
ch = CHANNELS #wr.getnchannels()
width = p.get_sample_size(FORMAT) #wr.getsampwidth()
fr = RATE #wr.getframerate()
fn = wr.getnframes()
fs = fn / fr
origin = wr.readframes(wr.getnframes())
data = origin[:fn]
wr.close()
sig = np.frombuffer(data, dtype="int16") /32768.0
t = np.linspace(0,fs, fn/2, endpoint=False)
ax1.axis([0, 5, -0.0075,0.0075])
ax1.plot(t, sig)
nperseg = 256
f, t, Zxx = signal.stft(sig, fs=fs*fn/50, nperseg=nperseg)
ax2.pcolormesh(t, 5*f, np.abs(Zxx), cmap='hsv')
freq =fft(sig,int(fn/2))
Pyy = np.sqrt(freq*freq.conj())*2/fn
f = np.arange(int(fn/2))
ax3.axis([200, 20000, 0,0.000075])
ax3.set_xscale('log')
ax3.plot(f,Pyy)
plt.pause(1)
plt.savefig('figure'+str(s)+'.png')
s += 1
| 2.015625 | 2 |
games/migrations/0002_auto_20201026_1221.py | IceArrow256/game-list | 3 | 1767 | # Generated by Django 3.1.2 on 2020-10-26 12:21
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('games', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='game',
name='score',
field=models.FloatField(null=True, verbose_name='Score'),
),
migrations.AlterField(
model_name='game',
name='series',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, to='games.series'),
),
]
| 1.085938 | 1 |
pyscf/nao/m_comp_coulomb_pack.py | robert-anderson/pyscf | 2 | 1791 | # Copyright 2014-2018 The PySCF Developers. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function, division
from pyscf.nao.m_coulomb_am import coulomb_am
import numpy as np
try:
import numba as nb
from pyscf.nao.m_numba_utils import fill_triu_v2, fill_tril
use_numba = True
except:
use_numba = False
#
#
#
def comp_coulomb_pack(sv, ao_log=None, funct=coulomb_am, dtype=np.float64, **kvargs):
"""
Computes the matrix elements given by funct, for instance coulomb interaction
Args:
sv : (System Variables), this must have arrays of coordinates and species, etc
ao_log : description of functions (either orbitals or product basis functions)
Returns:
matrix elements for the whole system in packed form (lower triangular part)
"""
from pyscf.nao.m_ao_matelem import ao_matelem_c
from pyscf.nao.m_pack2den import ij2pack_l
aome = ao_matelem_c(sv.ao_log.rr, sv.ao_log.pp)
me = ao_matelem_c(sv.ao_log) if ao_log is None else aome.init_one_set(ao_log)
atom2s = np.zeros((sv.natm+1), dtype=np.int64)
for atom,sp in enumerate(sv.atom2sp): atom2s[atom+1]=atom2s[atom]+me.ao1.sp2norbs[sp]
norbs = atom2s[-1]
res = np.zeros(norbs*(norbs+1)//2, dtype=dtype)
for atom1,[sp1,rv1,s1,f1] in enumerate(zip(sv.atom2sp,sv.atom2coord,atom2s,atom2s[1:])):
#print("atom1 = {0}, rv1 = {1}".format(atom1, rv1))
for atom2,[sp2,rv2,s2,f2] in enumerate(zip(sv.atom2sp,sv.atom2coord,atom2s,atom2s[1:])):
if atom2>atom1: continue # skip
oo2f = funct(me,sp1,rv1,sp2,rv2, **kvargs)
if use_numba:
fill_triu_v2(oo2f, res, s1, f1, s2, f2, norbs)
else:
for i1 in range(s1,f1):
for i2 in range(s2, min(i1+1, f2)):
res[ij2pack_l(i1,i2,norbs)] = oo2f[i1-s1,i2-s2]
#print("number call = ", count)
#print("sum kernel: {0:.6f}".format(np.sum(abs(res))))
#np.savetxt("kernel_pyscf.txt", res)
#import sys
#sys.exit()
return res, norbs
| 1.953125 | 2 |
cruiser-lib/test/positioning/test_position_hl_commander.py | cfreebuf/kubeedge-examples | 0 | 1807 | # -*- coding: utf-8 -*-
#
# || ____ _ __
# +------+ / __ )(_) /_______________ _____ ___
# | 0xBC | / __ / / __/ ___/ ___/ __ `/_ / / _ \
# +------+ / /_/ / / /_/ /__/ / / /_/ / / /_/ __/
# || || /_____/_/\__/\___/_/ \__,_/ /___/\___/
#
# Copyright (C) 2018 Bitcraze AB
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston,
# MA 02110-1301, USA.
import math
import sys
import unittest
from cflib.crazyflie import Crazyflie
from cflib.crazyflie import HighLevelCommander
from cflib.crazyflie import Param
from cflib.positioning.position_hl_commander import PositionHlCommander
if sys.version_info < (3, 3):
from mock import MagicMock, patch, call
else:
from unittest.mock import MagicMock, patch, call
@patch('time.sleep')
class TestPositionHlCommander(unittest.TestCase):
def setUp(self):
self.commander_mock = MagicMock(spec=HighLevelCommander)
self.param_mock = MagicMock(spec=Param)
self.cf_mock = MagicMock(spec=Crazyflie)
self.cf_mock.high_level_commander = self.commander_mock
self.cf_mock.param = self.param_mock
self.cf_mock.is_connected.return_value = True
self.sut = PositionHlCommander(self.cf_mock)
def test_that_the_estimator_is_reset_on_take_off(
self, sleep_mock):
# Fixture
sut = PositionHlCommander(self.cf_mock, 1.0, 2.0, 3.0)
# Test
sut.take_off()
# Assert
self.param_mock.set_value.assert_has_calls([
call('kalman.initialX', '{:.2f}'.format(1.0)),
call('kalman.initialY', '{:.2f}'.format(2.0)),
call('kalman.initialZ', '{:.2f}'.format(3.0)),
call('kalman.resetEstimation', '1'),
call('kalman.resetEstimation', '0')
])
def test_that_the_hi_level_commander_is_activated_on_take_off(
self, sleep_mock):
# Fixture
# Test
self.sut.take_off()
# Assert
self.param_mock.set_value.assert_has_calls([
call('commander.enHighLevel', '1')
])
def test_that_controller_is_selected_on_take_off(
self, sleep_mock):
# Fixture
self.sut.set_controller(PositionHlCommander.CONTROLLER_MELLINGER)
# Test
self.sut.take_off()
# Assert
self.param_mock.set_value.assert_has_calls([
call('stabilizer.controller', '2')
])
def test_that_take_off_raises_exception_if_not_connected(
self, sleep_mock):
# Fixture
self.cf_mock.is_connected.return_value = False
# Test
# Assert
with self.assertRaises(Exception):
self.sut.take_off()
def test_that_take_off_raises_exception_when_already_flying(
self, sleep_mock):
# Fixture
self.sut.take_off()
# Test
# Assert
with self.assertRaises(Exception):
self.sut.take_off()
def test_that_it_goes_up_on_take_off(
self, sleep_mock):
# Fixture
# Test
self.sut.take_off(height=0.4, velocity=0.6)
# Assert
duration = 0.4 / 0.6
self.commander_mock.takeoff.assert_called_with(0.4, duration)
sleep_mock.assert_called_with(duration)
def test_that_it_goes_up_to_default_height(
self, sleep_mock):
# Fixture
sut = PositionHlCommander(self.cf_mock, default_height=0.4)
# Test
sut.take_off(velocity=0.6)
# Assert
duration = 0.4 / 0.6
self.commander_mock.takeoff.assert_called_with(0.4, duration)
sleep_mock.assert_called_with(duration)
def test_that_it_goes_down_on_landing(
self, sleep_mock):
# Fixture
self.sut.take_off(height=0.4)
# Test
self.sut.land(velocity=0.6)
# Assert
duration = 0.4 / 0.6
self.commander_mock.land.assert_called_with(0.0, duration)
sleep_mock.assert_called_with(duration)
def test_that_it_takes_off_and_lands_as_context_manager(
self, sleep_mock):
# Fixture
# Test
with self.sut:
pass
# Assert
duration1 = 0.5 / 0.5
duration2 = 0.5 / 0.5
self.commander_mock.takeoff.assert_called_with(0.5, duration1)
self.commander_mock.land.assert_called_with(0.0, duration2)
sleep_mock.assert_called_with(duration1)
sleep_mock.assert_called_with(duration2)
def test_that_it_returns_current_position(
self, sleep_mock):
# Fixture
self.sut.take_off(height=0.4, velocity=0.6)
# Test
actual = self.sut.get_position()
# Assert
self.assertEqual(actual, (0.0, 0.0, 0.4))
def test_that_it_goes_to_position(
self, sleep_mock):
# Fixture
self.sut.take_off()
inital_pos = self.sut.get_position()
# Test
self.sut.go_to(1.0, 2.0, 3.0, 4.0)
# Assert
distance = self._distance(inital_pos, (1.0, 2.0, 3.0))
duration = distance / 4.0
self.commander_mock.go_to.assert_called_with(
1.0, 2.0, 3.0, 0.0, duration)
sleep_mock.assert_called_with(duration)
def test_that_it_moves_distance(
self, sleep_mock):
# Fixture
self.sut.take_off()
inital_pos = self.sut.get_position()
# Test
self.sut.move_distance(1.0, 2.0, 3.0, 4.0)
# Assert
distance = self._distance((0.0, 0.0, 0.0), (1.0, 2.0, 3.0))
duration = distance / 4.0
final_pos = (
inital_pos[0] + 1.0,
inital_pos[1] + 2.0,
inital_pos[2] + 3.0)
self.commander_mock.go_to.assert_called_with(
final_pos[0], final_pos[1], final_pos[2], 0.0, duration)
sleep_mock.assert_called_with(duration)
def test_that_it_goes_forward(
self, sleep_mock):
# Fixture
self.sut.take_off()
inital_pos = self.sut.get_position()
# Test
self.sut.forward(1.0, 2.0)
# Assert
duration = 1.0 / 2.0
final_pos = (
inital_pos[0] + 1.0,
inital_pos[1],
inital_pos[2])
self.commander_mock.go_to.assert_called_with(
final_pos[0], final_pos[1], final_pos[2], 0.0, duration)
sleep_mock.assert_called_with(duration)
def test_that_it_goes_back(
self, sleep_mock):
# Fixture
self.sut.take_off()
inital_pos = self.sut.get_position()
# Test
self.sut.back(1.0, 2.0)
# Assert
duration = 1.0 / 2.0
final_pos = (
inital_pos[0] - 1.0,
inital_pos[1],
inital_pos[2])
self.commander_mock.go_to.assert_called_with(
final_pos[0], final_pos[1], final_pos[2], 0.0, duration)
sleep_mock.assert_called_with(duration)
def test_that_it_goes_left(
self, sleep_mock):
# Fixture
self.sut.take_off()
inital_pos = self.sut.get_position()
# Test
self.sut.left(1.0, 2.0)
# Assert
duration = 1.0 / 2.0
final_pos = (
inital_pos[0],
inital_pos[1] + 1.0,
inital_pos[2])
self.commander_mock.go_to.assert_called_with(
final_pos[0], final_pos[1], final_pos[2], 0.0, duration)
sleep_mock.assert_called_with(duration)
def test_that_it_goes_right(
self, sleep_mock):
# Fixture
self.sut.take_off()
inital_pos = self.sut.get_position()
# Test
self.sut.right(1.0, 2.0)
# Assert
duration = 1.0 / 2.0
final_pos = (
inital_pos[0],
inital_pos[1] - 1,
inital_pos[2])
self.commander_mock.go_to.assert_called_with(
final_pos[0], final_pos[1], final_pos[2], 0, duration)
sleep_mock.assert_called_with(duration)
def test_that_it_goes_up(
self, sleep_mock):
# Fixture
self.sut.take_off()
inital_pos = self.sut.get_position()
# Test
self.sut.up(1.0, 2.0)
# Assert
duration = 1.0 / 2.0
final_pos = (
inital_pos[0],
inital_pos[1],
inital_pos[2] + 1)
self.commander_mock.go_to.assert_called_with(
final_pos[0], final_pos[1], final_pos[2], 0, duration)
sleep_mock.assert_called_with(duration)
def test_that_it_goes_down(
self, sleep_mock):
# Fixture
self.sut.take_off()
inital_pos = self.sut.get_position()
# Test
self.sut.down(1.0, 2.0)
# Assert
duration = 1.0 / 2.0
final_pos = (
inital_pos[0],
inital_pos[1],
inital_pos[2] - 1)
self.commander_mock.go_to.assert_called_with(
final_pos[0], final_pos[1], final_pos[2], 0, duration)
sleep_mock.assert_called_with(duration)
def test_that_default_velocity_is_used(
self, sleep_mock):
# Fixture
self.sut.take_off()
inital_pos = self.sut.get_position()
self.sut.set_default_velocity(7)
# Test
self.sut.go_to(1.0, 2.0, 3.0)
# Assert
distance = self._distance(inital_pos, (1.0, 2.0, 3.0))
duration = distance / 7.0
self.commander_mock.go_to.assert_called_with(
1.0, 2.0, 3.0, 0.0, duration)
sleep_mock.assert_called_with(duration)
def test_that_default_height_is_used(
self, sleep_mock):
# Fixture
self.sut.take_off()
inital_pos = self.sut.get_position()
self.sut.set_default_velocity(7.0)
self.sut.set_default_height(5.0)
# Test
self.sut.go_to(1.0, 2.0)
# Assert
distance = self._distance(inital_pos, (1.0, 2.0, 5.0))
duration = distance / 7.0
self.commander_mock.go_to.assert_called_with(
1.0, 2.0, 5.0, 0.0, duration)
sleep_mock.assert_called_with(duration)
######################################################################
def _distance(self, p1, p2):
dx = p1[0] - p2[0]
dy = p1[1] - p2[1]
dz = p1[2] - p2[2]
return math.sqrt(dx * dx + dy * dy + dz * dz)
if __name__ == '__main__':
unittest.main()
| 1.609375 | 2 |
src/fiesta/urls.py | lerooze/django-fiesta | 0 | 1823 | # urls.py
from django.urls import path, register_converter
from fiesta import converters
from fiesta.views import views
from rest_framework.urlpatterns import format_suffix_patterns
# "http://django-sdmx.org/wsrest/"
# "http://django-sdmx.org/ws/"
register_converter(converters.ResourceConverter, 'res')
register_converter(converters.AgencyConverter, 'age')
register_converter(converters.ContextConverter, 'con')
urlpatterns = [
path('wsreg/SubmitStructure/', views.SubmitStructureRequestView.as_view()),
path('wsrest/schema/<con:context>/<age:agencyID>/<str:resourceID>', views.SDMXRESTfulSchemaView.as_view()),
path('wsrest/schema/<con:context>/<age:agencyID>/<str:resourceID>/<str:version>', views.SDMXRESTfulSchemaView.as_view()),
path('wsrest/<res:resource>/', views.SDMXRESTfulStructureView.as_view()),
path('wsrest/<res:resource>/<age:agencyID>/',
views.SDMXRESTfulStructureView.as_view()),
path('wsrest/<res:resource>/<age:agencyID>/<str:resourceID>/',
views.SDMXRESTfulStructureView.as_view()),
path('wsrest/<res:resource>/<age:agencyID>/<str:resourceID>/'
'<str:version>/',
views.SDMXRESTfulStructureView.as_view()),
]
urlpatterns = format_suffix_patterns(urlpatterns)
| 1.070313 | 1 |
src/saml2/saml.py | masterapps-au/pysaml2 | 0 | 1863 | #!/usr/bin/env python
#
# Generated Mon May 2 14:23:33 2011 by parse_xsd.py version 0.4.
#
# A summary of available specifications can be found at:
# https://wiki.oasis-open.org/security/FrontPage
#
# saml core specifications to be found at:
# if any question arise please query the following pdf.
# http://docs.oasis-open.org/security/saml/v2.0/saml-core-2.0-os.pdf
# The specification was later updated with errata, and the new version is here:
# https://www.oasis-open.org/committees/download.php/56776/sstc-saml-core-errata-2.0-wd-07.pdf
#
try:
from base64 import encodebytes as b64encode
except ImportError:
from base64 import b64encode
from saml2.validate import valid_ipv4, MustValueError
from saml2.validate import valid_ipv6
from saml2.validate import ShouldValueError
from saml2.validate import valid_domain_name
import saml2
from saml2 import SamlBase
import six
from saml2 import xmldsig as ds
from saml2 import xmlenc as xenc
# authentication information fields
NAMESPACE = 'urn:oasis:names:tc:SAML:2.0:assertion'
# xmlschema definition
XSD = "xs"
# xmlschema templates and extensions
XS_NAMESPACE = 'http://www.w3.org/2001/XMLSchema'
# xmlschema-instance, which contains several builtin attributes
XSI_NAMESPACE = 'http://www.w3.org/2001/XMLSchema-instance'
# xml soap namespace
NS_SOAP_ENC = "http://schemas.xmlsoap.org/soap/encoding/"
# type definitions for xmlschemas
XSI_TYPE = '{%s}type' % XSI_NAMESPACE
# nil type definition for xmlschemas
XSI_NIL = '{%s}nil' % XSI_NAMESPACE
# idp and sp communicate usually about a subject(NameID)
# the format determines the category the subject is in
# custom subject
NAMEID_FORMAT_UNSPECIFIED = (
"urn:oasis:names:tc:SAML:1.1:nameid-format:unspecified")
# subject as email address
NAMEID_FORMAT_EMAILADDRESS = (
"urn:oasis:names:tc:SAML:1.1:nameid-format:emailAddress")
# subject as x509 key
NAMEID_FORMAT_X509SUBJECTNAME = (
"urn:oasis:names:tc:SAML:1.1:nameid-format:X509SubjectName")
# subject as windows domain name
NAMEID_FORMAT_WINDOWSDOMAINQUALIFIEDNAME = (
"urn:oasis:names:tc:SAML:1.1:nameid-format:WindowsDomainQualifiedName")
# subject from a kerberos instance
NAMEID_FORMAT_KERBEROS = (
"urn:oasis:names:tc:SAML:2.0:nameid-format:kerberos")
# subject as name
NAMEID_FORMAT_ENTITY = (
"urn:oasis:names:tc:SAML:2.0:nameid-format:entity")
# linked subject
NAMEID_FORMAT_PERSISTENT = (
"urn:oasis:names:tc:SAML:2.0:nameid-format:persistent")
# annonymous subject
NAMEID_FORMAT_TRANSIENT = (
"urn:oasis:names:tc:SAML:2.0:nameid-format:transient")
# subject avaiable in encrypted format
NAMEID_FORMAT_ENCRYPTED = (
"urn:oasis:names:tc:SAML:2.0:nameid-format:encrypted")
# dicc for avaiable formats
NAMEID_FORMATS_SAML2 = (
('NAMEID_FORMAT_EMAILADDRESS', NAMEID_FORMAT_EMAILADDRESS),
('NAMEID_FORMAT_ENCRYPTED', NAMEID_FORMAT_ENCRYPTED),
('NAMEID_FORMAT_ENTITY', NAMEID_FORMAT_ENTITY),
('NAMEID_FORMAT_PERSISTENT', NAMEID_FORMAT_PERSISTENT),
('NAMEID_FORMAT_TRANSIENT', NAMEID_FORMAT_TRANSIENT),
('NAMEID_FORMAT_UNSPECIFIED', NAMEID_FORMAT_UNSPECIFIED),
)
# a profile outlines a set of rules describing how to embed SAML assertions.
# https://docs.oasis-open.org/security/saml/v2.0/saml-profiles-2.0-os.pdf
# The specification was later updated with errata, and the new version is here:
# https://www.oasis-open.org/committees/download.php/56782/sstc-saml-profiles-errata-2.0-wd-07.pdf
# XML based values for SAML attributes
PROFILE_ATTRIBUTE_BASIC = (
"urn:oasis:names:tc:SAML:2.0:profiles:attribute:basic")
# an AuthnRequest is made to initiate authentication
# authenticate the request with login credentials
AUTHN_PASSWORD = "urn:oasis:names:tc:SAML:2.0:ac:classes:Password"
# authenticate the request with login credentials, over tls/https
AUTHN_PASSWORD_PROTECTED = \
"urn:oasis:names:tc:SAML:2.0:ac:classes:PasswordProtectedTransport"
# attribute statements is key:value metadata shared with your app
# custom format
NAME_FORMAT_UNSPECIFIED = (
"urn:oasis:names:tc:SAML:2.0:attrname-format:unspecified")
# uri format
NAME_FORMAT_URI = "urn:oasis:names:tc:SAML:2.0:attrname-format:uri"
# XML-based format
NAME_FORMAT_BASIC = "urn:oasis:names:tc:SAML:2.0:attrname-format:basic"
# dicc for avaiable formats
NAME_FORMATS_SAML2 = (
('NAME_FORMAT_BASIC', NAME_FORMAT_BASIC),
('NAME_FORMAT_URI', NAME_FORMAT_URI),
('NAME_FORMAT_UNSPECIFIED', NAME_FORMAT_UNSPECIFIED),
)
# the SAML authority's decision can be predetermined by arbitrary context
# the specified action is permitted
DECISION_TYPE_PERMIT = "Permit"
# the specified action is denied
DECISION_TYPE_DENY = "Deny"
# the SAML authority cannot determine if the action is permitted or denied
DECISION_TYPE_INDETERMINATE = "Indeterminate"
# consent attributes determine wether consent has been given and under
# what conditions
# no claim to consent is made
CONSENT_UNSPECIFIED = "urn:oasis:names:tc:SAML:2.0:consent:unspecified"
# consent has been obtained
CONSENT_OBTAINED = "urn:oasis:names:tc:SAML:2.0:consent:obtained"
# consent has been obtained before the message has been initiated
CONSENT_PRIOR = "urn:oasis:names:tc:SAML:2.0:consent:prior"
# consent has been obtained implicitly
CONSENT_IMPLICIT = "urn:oasis:names:tc:SAML:2.0:consent:current-implicit"
# consent has been obtained explicitly
CONSENT_EXPLICIT = "urn:oasis:names:tc:SAML:2.0:consent:current-explicit"
# no consent has been obtained
CONSENT_UNAVAILABLE = "urn:oasis:names:tc:SAML:2.0:consent:unavailable"
# no consent is needed.
CONSENT_INAPPLICABLE = "urn:oasis:names:tc:SAML:2.0:consent:inapplicable"
# Subject confirmation methods(scm), can be issued, besides the subject itself
# by third parties.
# http://docs.oasis-open.org/wss/oasis-wss-saml-token-profile-1.0.pdf
# the 3rd party is identified on behalf of the subject given private/public key
SCM_HOLDER_OF_KEY = "urn:oasis:names:tc:SAML:2.0:cm:holder-of-key"
# the 3rd party is identified by subject confirmation and must include a security header
# signing its content.
SCM_SENDER_VOUCHES = "urn:oasis:names:tc:SAML:2.0:cm:sender-vouches"
# a bearer token is issued instead.
SCM_BEARER = "urn:oasis:names:tc:SAML:2.0:cm:bearer"
class AttributeValueBase(SamlBase):
def __init__(self,
text=None,
extension_elements=None,
extension_attributes=None):
self._extatt = {}
SamlBase.__init__(self,
text=None,
extension_elements=extension_elements,
extension_attributes=extension_attributes)
if self._extatt:
self.extension_attributes = self._extatt
if text:
self.set_text(text)
elif not extension_elements:
self.extension_attributes = {XSI_NIL: 'true'}
elif XSI_TYPE in self.extension_attributes:
del self.extension_attributes[XSI_TYPE]
def __setattr__(self, key, value):
if key == "text":
self.set_text(value)
else:
SamlBase.__setattr__(self, key, value)
def verify(self):
if not self.text and not self.extension_elements:
if not self.extension_attributes:
raise Exception(
"Attribute value base should not have extension attributes"
)
if self.extension_attributes[XSI_NIL] != "true":
raise Exception(
"Attribute value base should not have extension attributes"
)
return True
else:
SamlBase.verify(self)
def set_type(self, typ):
try:
del self.extension_attributes[XSI_NIL]
except (AttributeError, KeyError):
pass
try:
self.extension_attributes[XSI_TYPE] = typ
except AttributeError:
self._extatt[XSI_TYPE] = typ
if typ.startswith('xs:'):
try:
self.extension_attributes['xmlns:xs'] = XS_NAMESPACE
except AttributeError:
self._extatt['xmlns:xs'] = XS_NAMESPACE
if typ.startswith('xsd:'):
try:
self.extension_attributes['xmlns:xsd'] = XS_NAMESPACE
except AttributeError:
self._extatt['xmlns:xsd'] = XS_NAMESPACE
def get_type(self):
try:
return self.extension_attributes[XSI_TYPE]
except (KeyError, AttributeError):
try:
return self._extatt[XSI_TYPE]
except KeyError:
return ""
def clear_type(self):
try:
del self.extension_attributes[XSI_TYPE]
except KeyError:
pass
try:
del self._extatt[XSI_TYPE]
except KeyError:
pass
def set_text(self, value, base64encode=False):
def _wrong_type_value(xsd, value):
msg = 'Type and value do not match: {xsd}:{type}:{value}'
msg = msg.format(xsd=xsd, type=type(value), value=value)
raise ValueError(msg)
# only work with six.string_types
_str = unicode if six.PY2 else str
if isinstance(value, six.binary_type):
value = value.decode('utf-8')
type_to_xsd = {
_str: 'string',
int: 'integer',
float: 'float',
bool: 'boolean',
type(None): '',
}
# entries of xsd-types each declaring:
# - a corresponding python type
# - a function to turn a string into that type
# - a function to turn that type into a text-value
xsd_types_props = {
'string': {
'type': _str,
'to_type': _str,
'to_text': _str,
},
'integer': {
'type': int,
'to_type': int,
'to_text': _str,
},
'short': {
'type': int,
'to_type': int,
'to_text': _str,
},
'int': {
'type': int,
'to_type': int,
'to_text': _str,
},
'long': {
'type': int,
'to_type': int,
'to_text': _str,
},
'float': {
'type': float,
'to_type': float,
'to_text': _str,
},
'double': {
'type': float,
'to_type': float,
'to_text': _str,
},
'boolean': {
'type': bool,
'to_type': lambda x: {
'true': True,
'false': False,
}[_str(x).lower()],
'to_text': lambda x: _str(x).lower(),
},
'base64Binary': {
'type': _str,
'to_type': _str,
'to_text': (
lambda x: b64encode(x.encode()) if base64encode else x
),
},
'anyType': {
'type': type(value),
'to_type': lambda x: x,
'to_text': lambda x: x,
},
'': {
'type': type(None),
'to_type': lambda x: None,
'to_text': lambda x: '',
},
}
xsd_string = (
'base64Binary' if base64encode
else self.get_type()
or type_to_xsd.get(type(value)))
xsd_ns, xsd_type = (
['', type(None)] if xsd_string is None
else ['', ''] if xsd_string == ''
else [
XSD if xsd_string in xsd_types_props else '',
xsd_string
] if ':' not in xsd_string
else xsd_string.split(':', 1))
xsd_type_props = xsd_types_props.get(xsd_type, {})
valid_type = xsd_type_props.get('type', type(None))
to_type = xsd_type_props.get('to_type', str)
to_text = xsd_type_props.get('to_text', str)
# cast to correct type before type-checking
if type(value) is _str and valid_type is not _str:
try:
value = to_type(value)
except (TypeError, ValueError, KeyError):
# the cast failed
_wrong_type_value(xsd=xsd_type, value=value)
if type(value) is not valid_type:
_wrong_type_value(xsd=xsd_type, value=value)
text = to_text(value)
self.set_type(
'{ns}:{type}'.format(ns=xsd_ns, type=xsd_type) if xsd_ns
else xsd_type if xsd_type
else '')
SamlBase.__setattr__(self, 'text', text)
return self
def harvest_element_tree(self, tree):
# Fill in the instance members from the contents of the XML tree.
for child in tree:
self._convert_element_tree_to_member(child)
for attribute, value in iter(tree.attrib.items()):
self._convert_element_attribute_to_member(attribute, value)
# if we have added children to this node
# we consider whitespace insignificant
# and remove/trim/strip whitespace
# and expect to not have actual text content
text = (
tree.text.strip()
if tree.text and self.extension_elements
else tree.text
)
if text:
#print("set_text:", tree.text)
# clear type
#self.clear_type()
self.set_text(text)
# if we have added a text node
# or other children to this node
# remove the nil marker
if text or self.extension_elements:
if XSI_NIL in self.extension_attributes:
del self.extension_attributes[XSI_NIL]
class BaseIDAbstractType_(SamlBase):
"""The urn:oasis:names:tc:SAML:2.0:assertion:BaseIDAbstractType element """
c_tag = 'BaseIDAbstractType'
c_namespace = NAMESPACE
c_children = SamlBase.c_children.copy()
c_attributes = SamlBase.c_attributes.copy()
c_child_order = SamlBase.c_child_order[:]
c_cardinality = SamlBase.c_cardinality.copy()
c_attributes['NameQualifier'] = ('name_qualifier', 'string', False)
c_attributes['SPNameQualifier'] = ('sp_name_qualifier', 'string', False)
def __init__(self,
name_qualifier=None,
sp_name_qualifier=None,
text=None,
extension_elements=None,
extension_attributes=None):
SamlBase.__init__(self,
text=text,
extension_elements=extension_elements,
extension_attributes=extension_attributes)
self.name_qualifier = name_qualifier
self.sp_name_qualifier = sp_name_qualifier
class NameIDType_(SamlBase):
"""The urn:oasis:names:tc:SAML:2.0:assertion:NameIDType element """
c_tag = 'NameIDType'
c_namespace = NAMESPACE
c_value_type = {'base': 'string'}
c_children = SamlBase.c_children.copy()
c_attributes = SamlBase.c_attributes.copy()
c_child_order = SamlBase.c_child_order[:]
c_cardinality = SamlBase.c_cardinality.copy()
c_attributes['NameQualifier'] = ('name_qualifier', 'string', False)
c_attributes['SPNameQualifier'] = ('sp_name_qualifier', 'string', False)
c_attributes['Format'] = ('format', 'anyURI', False)
c_attributes['SPProvidedID'] = ('sp_provided_id', 'string', False)
def __init__(self,
name_qualifier=None,
sp_name_qualifier=None,
format=None,
sp_provided_id=None,
text=None,
extension_elements=None,
extension_attributes=None):
SamlBase.__init__(self,
text=text,
extension_elements=extension_elements,
extension_attributes=extension_attributes)
self.name_qualifier = name_qualifier
self.sp_name_qualifier = sp_name_qualifier
self.format = format
self.sp_provided_id = sp_provided_id
def name_id_type__from_string(xml_string):
return saml2.create_class_from_xml_string(NameIDType_, xml_string)
class EncryptedElementType_(SamlBase):
"""The urn:oasis:names:tc:SAML:2.0:assertion:EncryptedElementType element
"""
c_tag = 'EncryptedElementType'
c_namespace = NAMESPACE
c_children = SamlBase.c_children.copy()
c_attributes = SamlBase.c_attributes.copy()
c_child_order = SamlBase.c_child_order[:]
c_cardinality = SamlBase.c_cardinality.copy()
c_children['{http://www.w3.org/2001/04/xmlenc#}EncryptedData'] = (
'encrypted_data',
xenc.EncryptedData)
c_children['{http://www.w3.org/2001/04/xmlenc#}EncryptedKey'] = (
'encrypted_key',
[xenc.EncryptedKey])
c_cardinality['encrypted_key'] = {"min": 0}
c_child_order.extend(['encrypted_data', 'encrypted_key'])
def __init__(self,
encrypted_data=None,
encrypted_key=None,
text=None,
extension_elements=None,
extension_attributes=None):
SamlBase.__init__(self,
text=text,
extension_elements=extension_elements,
extension_attributes=extension_attributes)
self.encrypted_data = encrypted_data
self.encrypted_key = encrypted_key or []
def encrypted_element_type__from_string(xml_string):
return saml2.create_class_from_xml_string(EncryptedElementType_, xml_string)
class EncryptedID(EncryptedElementType_):
"""The urn:oasis:names:tc:SAML:2.0:assertion:EncryptedID element """
c_tag = 'EncryptedID'
c_namespace = NAMESPACE
c_children = EncryptedElementType_.c_children.copy()
c_attributes = EncryptedElementType_.c_attributes.copy()
c_child_order = EncryptedElementType_.c_child_order[:]
c_cardinality = EncryptedElementType_.c_cardinality.copy()
def encrypted_id_from_string(xml_string):
return saml2.create_class_from_xml_string(EncryptedID, xml_string)
class Issuer(NameIDType_):
"""The urn:oasis:names:tc:SAML:2.0:assertion:Issuer element """
c_tag = 'Issuer'
c_namespace = NAMESPACE
c_children = NameIDType_.c_children.copy()
c_attributes = NameIDType_.c_attributes.copy()
c_child_order = NameIDType_.c_child_order[:]
c_cardinality = NameIDType_.c_cardinality.copy()
def issuer_from_string(xml_string):
return saml2.create_class_from_xml_string(Issuer, xml_string)
class AssertionIDRef(SamlBase):
"""The urn:oasis:names:tc:SAML:2.0:assertion:AssertionIDRef element """
c_tag = 'AssertionIDRef'
c_namespace = NAMESPACE
c_value_type = {'base': 'NCName'}
c_children = SamlBase.c_children.copy()
c_attributes = SamlBase.c_attributes.copy()
c_child_order = SamlBase.c_child_order[:]
c_cardinality = SamlBase.c_cardinality.copy()
def assertion_id_ref_from_string(xml_string):
return saml2.create_class_from_xml_string(AssertionIDRef, xml_string)
class AssertionURIRef(SamlBase):
"""The urn:oasis:names:tc:SAML:2.0:assertion:AssertionURIRef element """
c_tag = 'AssertionURIRef'
c_namespace = NAMESPACE
c_value_type = {'base': 'anyURI'}
c_children = SamlBase.c_children.copy()
c_attributes = SamlBase.c_attributes.copy()
c_child_order = SamlBase.c_child_order[:]
c_cardinality = SamlBase.c_cardinality.copy()
def assertion_uri_ref_from_string(xml_string):
return saml2.create_class_from_xml_string(AssertionURIRef, xml_string)
class SubjectConfirmationDataType_(SamlBase):
"""The urn:oasis:names:tc:SAML:2.0:assertion:SubjectConfirmationDataType
element """
c_tag = 'SubjectConfirmationDataType'
c_namespace = NAMESPACE
c_children = SamlBase.c_children.copy()
c_attributes = SamlBase.c_attributes.copy()
c_child_order = SamlBase.c_child_order[:]
c_cardinality = SamlBase.c_cardinality.copy()
c_attributes['NotBefore'] = ('not_before', 'dateTime', False)
c_attributes['NotOnOrAfter'] = ('not_on_or_after', 'dateTime', False)
c_attributes['Recipient'] = ('recipient', 'anyURI', False)
c_attributes['InResponseTo'] = ('in_response_to', 'NCName', False)
c_attributes['Address'] = ('address', 'string', False)
c_any = {"namespace": "##any", "processContents": "lax", "minOccurs": "0",
"maxOccurs": "unbounded"}
c_any_attribute = {"namespace": "##other", "processContents": "lax"}
def __init__(self,
not_before=None,
not_on_or_after=None,
recipient=None,
in_response_to=None,
address=None,
text=None,
extension_elements=None,
extension_attributes=None):
SamlBase.__init__(self,
text=text,
extension_elements=extension_elements,
extension_attributes=extension_attributes)
self.not_before = not_before
self.not_on_or_after = not_on_or_after
self.recipient = recipient
self.in_response_to = in_response_to
self.address = address
def subject_confirmation_data_type__from_string(xml_string):
return saml2.create_class_from_xml_string(SubjectConfirmationDataType_,
xml_string)
class KeyInfoConfirmationDataType_(SamlBase):
"""The urn:oasis:names:tc:SAML:2.0:assertion:KeyInfoConfirmationDataType
element """
c_tag = 'KeyInfoConfirmationDataType'
c_namespace = NAMESPACE
c_children = SamlBase.c_children.copy()
c_attributes = SamlBase.c_attributes.copy()
c_child_order = SamlBase.c_child_order[:]
c_cardinality = SamlBase.c_cardinality.copy()
c_children['{http://www.w3.org/2000/09/xmldsig#}KeyInfo'] = ('key_info',
[ds.KeyInfo])
c_cardinality['key_info'] = {"min": 1}
c_child_order.extend(['key_info'])
def __init__(self,
key_info=None,
text=None,
extension_elements=None,
extension_attributes=None):
SamlBase.__init__(self,
text=text,
extension_elements=extension_elements,
extension_attributes=extension_attributes)
self.key_info = key_info or []
def key_info_confirmation_data_type__from_string(xml_string):
return saml2.create_class_from_xml_string(KeyInfoConfirmationDataType_,
xml_string)
class ConditionAbstractType_(SamlBase):
"""The urn:oasis:names:tc:SAML:2.0:assertion:ConditionAbstractType
element """
c_tag = 'ConditionAbstractType'
c_namespace = NAMESPACE
c_children = SamlBase.c_children.copy()
c_attributes = SamlBase.c_attributes.copy()
c_child_order = SamlBase.c_child_order[:]
c_cardinality = SamlBase.c_cardinality.copy()
class Audience(SamlBase):
"""The urn:oasis:names:tc:SAML:2.0:assertion:Audience element """
c_tag = 'Audience'
c_namespace = NAMESPACE
c_value_type = {'base': 'anyURI'}
c_children = SamlBase.c_children.copy()
c_attributes = SamlBase.c_attributes.copy()
c_child_order = SamlBase.c_child_order[:]
c_cardinality = SamlBase.c_cardinality.copy()
def audience_from_string(xml_string):
return saml2.create_class_from_xml_string(Audience, xml_string)
class OneTimeUseType_(ConditionAbstractType_):
"""The urn:oasis:names:tc:SAML:2.0:assertion:OneTimeUseType element """
c_tag = 'OneTimeUseType'
c_namespace = NAMESPACE
c_children = ConditionAbstractType_.c_children.copy()
c_attributes = ConditionAbstractType_.c_attributes.copy()
c_child_order = ConditionAbstractType_.c_child_order[:]
c_cardinality = ConditionAbstractType_.c_cardinality.copy()
def one_time_use_type__from_string(xml_string):
return saml2.create_class_from_xml_string(OneTimeUseType_, xml_string)
class ProxyRestrictionType_(ConditionAbstractType_):
"""The urn:oasis:names:tc:SAML:2.0:assertion:ProxyRestrictionType element
"""
c_tag = 'ProxyRestrictionType'
c_namespace = NAMESPACE
c_children = ConditionAbstractType_.c_children.copy()
c_attributes = ConditionAbstractType_.c_attributes.copy()
c_child_order = ConditionAbstractType_.c_child_order[:]
c_cardinality = ConditionAbstractType_.c_cardinality.copy()
c_children['{urn:oasis:names:tc:SAML:2.0:assertion}Audience'] = ('audience',
[Audience])
c_cardinality['audience'] = {"min": 0}
c_attributes['Count'] = ('count', 'nonNegativeInteger', False)
c_child_order.extend(['audience'])
def __init__(self,
audience=None,
count=None,
text=None,
extension_elements=None,
extension_attributes=None):
ConditionAbstractType_.__init__(
self, text=text, extension_elements=extension_elements,
extension_attributes=extension_attributes)
self.audience = audience or []
self.count = count
def proxy_restriction_type__from_string(xml_string):
return saml2.create_class_from_xml_string(ProxyRestrictionType_, xml_string)
class EncryptedAssertion(EncryptedElementType_):
"""The urn:oasis:names:tc:SAML:2.0:assertion:EncryptedAssertion element """
c_tag = 'EncryptedAssertion'
c_namespace = NAMESPACE
c_children = EncryptedElementType_.c_children.copy()
c_attributes = EncryptedElementType_.c_attributes.copy()
c_child_order = EncryptedElementType_.c_child_order[:]
c_cardinality = EncryptedElementType_.c_cardinality.copy()
def encrypted_assertion_from_string(xml_string):
return saml2.create_class_from_xml_string(EncryptedAssertion, xml_string)
class StatementAbstractType_(SamlBase):
"""The urn:oasis:names:tc:SAML:2.0:assertion:StatementAbstractType element
"""
c_tag = 'StatementAbstractType'
c_namespace = NAMESPACE
c_children = SamlBase.c_children.copy()
c_attributes = SamlBase.c_attributes.copy()
c_child_order = SamlBase.c_child_order[:]
c_cardinality = SamlBase.c_cardinality.copy()
class SubjectLocalityType_(SamlBase):
"""The urn:oasis:names:tc:SAML:2.0:assertion:SubjectLocalityType element """
c_tag = 'SubjectLocalityType'
c_namespace = NAMESPACE
c_children = SamlBase.c_children.copy()
c_attributes = SamlBase.c_attributes.copy()
c_child_order = SamlBase.c_child_order[:]
c_cardinality = SamlBase.c_cardinality.copy()
c_attributes['Address'] = ('address', 'string', False)
c_attributes['DNSName'] = ('dns_name', 'string', False)
def __init__(self,
address=None,
dns_name=None,
text=None,
extension_elements=None,
extension_attributes=None):
SamlBase.__init__(self,
text=text,
extension_elements=extension_elements,
extension_attributes=extension_attributes)
self.address = address
self.dns_name = dns_name
def subject_locality_type__from_string(xml_string):
return saml2.create_class_from_xml_string(SubjectLocalityType_, xml_string)
class AuthnContextClassRef(SamlBase):
"""The urn:oasis:names:tc:SAML:2.0:assertion:AuthnContextClassRef element
"""
c_tag = 'AuthnContextClassRef'
c_namespace = NAMESPACE
c_value_type = {'base': 'anyURI'}
c_children = SamlBase.c_children.copy()
c_attributes = SamlBase.c_attributes.copy()
c_child_order = SamlBase.c_child_order[:]
c_cardinality = SamlBase.c_cardinality.copy()
def authn_context_class_ref_from_string(xml_string):
return saml2.create_class_from_xml_string(AuthnContextClassRef, xml_string)
class AuthnContextDeclRef(SamlBase):
"""The urn:oasis:names:tc:SAML:2.0:assertion:AuthnContextDeclRef element """
c_tag = 'AuthnContextDeclRef'
c_namespace = NAMESPACE
c_value_type = {'base': 'anyURI'}
c_children = SamlBase.c_children.copy()
c_attributes = SamlBase.c_attributes.copy()
c_child_order = SamlBase.c_child_order[:]
c_cardinality = SamlBase.c_cardinality.copy()
def authn_context_decl_ref_from_string(xml_string):
return saml2.create_class_from_xml_string(AuthnContextDeclRef, xml_string)
class AuthnContextDecl(SamlBase):
"""The urn:oasis:names:tc:SAML:2.0:assertion:AuthnContextDecl element """
c_tag = 'AuthnContextDecl'
c_namespace = NAMESPACE
c_value_type = {'base': 'anyType'}
c_children = SamlBase.c_children.copy()
c_attributes = SamlBase.c_attributes.copy()
c_child_order = SamlBase.c_child_order[:]
c_cardinality = SamlBase.c_cardinality.copy()
def authn_context_decl_from_string(xml_string):
return saml2.create_class_from_xml_string(AuthnContextDecl, xml_string)
class AuthenticatingAuthority(SamlBase):
"""The urn:oasis:names:tc:SAML:2.0:assertion:AuthenticatingAuthority
element """
c_tag = 'AuthenticatingAuthority'
c_namespace = NAMESPACE
c_value_type = {'base': 'anyURI'}
c_children = SamlBase.c_children.copy()
c_attributes = SamlBase.c_attributes.copy()
c_child_order = SamlBase.c_child_order[:]
c_cardinality = SamlBase.c_cardinality.copy()
def authenticating_authority_from_string(xml_string):
return saml2.create_class_from_xml_string(AuthenticatingAuthority,
xml_string)
class DecisionType_(SamlBase):
"""The urn:oasis:names:tc:SAML:2.0:assertion:DecisionType element """
c_tag = 'DecisionType'
c_namespace = NAMESPACE
c_value_type = {'base': 'string', 'enumeration': ['Permit', 'Deny',
'Indeterminate']}
c_children = SamlBase.c_children.copy()
c_attributes = SamlBase.c_attributes.copy()
c_child_order = SamlBase.c_child_order[:]
c_cardinality = SamlBase.c_cardinality.copy()
def decision_type__from_string(xml_string):
return saml2.create_class_from_xml_string(DecisionType_, xml_string)
class ActionType_(SamlBase):
"""The urn:oasis:names:tc:SAML:2.0:assertion:ActionType element """
c_tag = 'ActionType'
c_namespace = NAMESPACE
c_value_type = {'base': 'string'}
c_children = SamlBase.c_children.copy()
c_attributes = SamlBase.c_attributes.copy()
c_child_order = SamlBase.c_child_order[:]
c_cardinality = SamlBase.c_cardinality.copy()
c_attributes['Namespace'] = ('namespace', 'anyURI', True)
def __init__(self,
namespace=None,
text=None,
extension_elements=None,
extension_attributes=None):
SamlBase.__init__(self,
text=text,
extension_elements=extension_elements,
extension_attributes=extension_attributes)
self.namespace = namespace
def action_type__from_string(xml_string):
return saml2.create_class_from_xml_string(ActionType_, xml_string)
class AttributeValue(AttributeValueBase):
"""The urn:oasis:names:tc:SAML:2.0:assertion:AttributeValue element """
c_tag = 'AttributeValue'
c_namespace = NAMESPACE
c_value_type = {'base': 'anyType'}
c_children = SamlBase.c_children.copy()
c_attributes = SamlBase.c_attributes.copy()
c_child_order = SamlBase.c_child_order[:]
c_cardinality = SamlBase.c_cardinality.copy()
def attribute_value_from_string(xml_string):
return saml2.create_class_from_xml_string(AttributeValue, xml_string)
class EncryptedAttribute(EncryptedElementType_):
"""The urn:oasis:names:tc:SAML:2.0:assertion:EncryptedAttribute element """
c_tag = 'EncryptedAttribute'
c_namespace = NAMESPACE
c_children = EncryptedElementType_.c_children.copy()
c_attributes = EncryptedElementType_.c_attributes.copy()
c_child_order = EncryptedElementType_.c_child_order[:]
c_cardinality = EncryptedElementType_.c_cardinality.copy()
def encrypted_attribute_from_string(xml_string):
return saml2.create_class_from_xml_string(EncryptedAttribute, xml_string)
class BaseID(BaseIDAbstractType_):
"""The urn:oasis:names:tc:SAML:2.0:assertion:BaseID element """
c_tag = 'BaseID'
c_namespace = NAMESPACE
c_children = BaseIDAbstractType_.c_children.copy()
c_attributes = BaseIDAbstractType_.c_attributes.copy()
c_child_order = BaseIDAbstractType_.c_child_order[:]
c_cardinality = BaseIDAbstractType_.c_cardinality.copy()
def base_id_from_string(xml_string):
return saml2.create_class_from_xml_string(BaseID, xml_string)
class NameID(NameIDType_):
"""The urn:oasis:names:tc:SAML:2.0:assertion:NameID element
From the Oasis SAML2 Technical Overview:
"The <NameID> element within a <Subject> offers the ability to provide name
identifiers in a number of different formats. SAML's predefined formats
include: Email address, X.509 subject name, Windows domain qualified name,
Kerberos principal name, Entity identifier, Persistent identifier,
Transient identifier."
"""
c_tag = 'NameID'
c_namespace = NAMESPACE
c_children = NameIDType_.c_children.copy()
c_attributes = NameIDType_.c_attributes.copy()
c_child_order = NameIDType_.c_child_order[:]
c_cardinality = NameIDType_.c_cardinality.copy()
def name_id_from_string(xml_string):
return saml2.create_class_from_xml_string(NameID, xml_string)
class SubjectConfirmationData(SubjectConfirmationDataType_):
"""The urn:oasis:names:tc:SAML:2.0:assertion:SubjectConfirmationData
element """
c_tag = 'SubjectConfirmationData'
c_namespace = NAMESPACE
c_children = SubjectConfirmationDataType_.c_children.copy()
c_attributes = SubjectConfirmationDataType_.c_attributes.copy()
c_child_order = SubjectConfirmationDataType_.c_child_order[:]
c_cardinality = SubjectConfirmationDataType_.c_cardinality.copy()
def subject_confirmation_data_from_string(xml_string):
return saml2.create_class_from_xml_string(SubjectConfirmationData,
xml_string)
class Condition(ConditionAbstractType_):
"""The urn:oasis:names:tc:SAML:2.0:assertion:Condition element """
c_tag = 'Condition'
c_namespace = NAMESPACE
c_children = ConditionAbstractType_.c_children.copy()
c_attributes = ConditionAbstractType_.c_attributes.copy()
c_child_order = ConditionAbstractType_.c_child_order[:]
c_cardinality = ConditionAbstractType_.c_cardinality.copy()
def condition_from_string(xml_string):
return saml2.create_class_from_xml_string(Condition, xml_string)
class AudienceRestrictionType_(ConditionAbstractType_):
"""The urn:oasis:names:tc:SAML:2.0:assertion:AudienceRestrictionType
element """
c_tag = 'AudienceRestrictionType'
c_namespace = NAMESPACE
c_children = ConditionAbstractType_.c_children.copy()
c_attributes = ConditionAbstractType_.c_attributes.copy()
c_child_order = ConditionAbstractType_.c_child_order[:]
c_cardinality = ConditionAbstractType_.c_cardinality.copy()
c_children['{urn:oasis:names:tc:SAML:2.0:assertion}Audience'] = ('audience',
[Audience])
c_cardinality['audience'] = {"min": 1}
c_child_order.extend(['audience'])
def __init__(self,
audience=None,
text=None,
extension_elements=None,
extension_attributes=None):
ConditionAbstractType_.__init__(
self, text=text, extension_elements=extension_elements,
extension_attributes=extension_attributes)
self.audience = audience or []
def audience_restriction_type__from_string(xml_string):
return saml2.create_class_from_xml_string(AudienceRestrictionType_,
xml_string)
class OneTimeUse(OneTimeUseType_):
"""The urn:oasis:names:tc:SAML:2.0:assertion:OneTimeUse element """
c_tag = 'OneTimeUse'
c_namespace = NAMESPACE
c_children = OneTimeUseType_.c_children.copy()
c_attributes = OneTimeUseType_.c_attributes.copy()
c_child_order = OneTimeUseType_.c_child_order[:]
c_cardinality = OneTimeUseType_.c_cardinality.copy()
def one_time_use_from_string(xml_string):
return saml2.create_class_from_xml_string(OneTimeUse, xml_string)
class ProxyRestriction(ProxyRestrictionType_):
"""The urn:oasis:names:tc:SAML:2.0:assertion:ProxyRestriction element """
c_tag = 'ProxyRestriction'
c_namespace = NAMESPACE
c_children = ProxyRestrictionType_.c_children.copy()
c_attributes = ProxyRestrictionType_.c_attributes.copy()
c_child_order = ProxyRestrictionType_.c_child_order[:]
c_cardinality = ProxyRestrictionType_.c_cardinality.copy()
def proxy_restriction_from_string(xml_string):
return saml2.create_class_from_xml_string(ProxyRestriction, xml_string)
class Statement(StatementAbstractType_):
"""The urn:oasis:names:tc:SAML:2.0:assertion:Statement element """
c_tag = 'Statement'
c_namespace = NAMESPACE
c_children = StatementAbstractType_.c_children.copy()
c_attributes = StatementAbstractType_.c_attributes.copy()
c_child_order = StatementAbstractType_.c_child_order[:]
c_cardinality = StatementAbstractType_.c_cardinality.copy()
def statement_from_string(xml_string):
return saml2.create_class_from_xml_string(Statement, xml_string)
class SubjectLocality(SubjectLocalityType_):
"""The urn:oasis:names:tc:SAML:2.0:assertion:SubjectLocality element """
c_tag = 'SubjectLocality'
c_namespace = NAMESPACE
c_children = SubjectLocalityType_.c_children.copy()
c_attributes = SubjectLocalityType_.c_attributes.copy()
c_child_order = SubjectLocalityType_.c_child_order[:]
c_cardinality = SubjectLocalityType_.c_cardinality.copy()
def verify(self):
if self.address:
# dotted-decimal IPv4 or RFC3513 IPv6 address
if valid_ipv4(self.address) or valid_ipv6(self.address):
pass
else:
raise ShouldValueError("Not an IPv4 or IPv6 address")
elif self.dns_name:
valid_domain_name(self.dns_name)
return SubjectLocalityType_.verify(self)
def subject_locality_from_string(xml_string):
return saml2.create_class_from_xml_string(SubjectLocality, xml_string)
class AuthnContextType_(SamlBase):
"""The urn:oasis:names:tc:SAML:2.0:assertion:AuthnContextType element """
c_tag = 'AuthnContextType'
c_namespace = NAMESPACE
c_children = SamlBase.c_children.copy()
c_attributes = SamlBase.c_attributes.copy()
c_child_order = SamlBase.c_child_order[:]
c_cardinality = SamlBase.c_cardinality.copy()
c_children[
'{urn:oasis:names:tc:SAML:2.0:assertion}AuthnContextClassRef'] = (
'authn_context_class_ref', AuthnContextClassRef)
c_children['{urn:oasis:names:tc:SAML:2.0:assertion}AuthnContextDecl'] = (
'authn_context_decl',
AuthnContextDecl)
c_cardinality['authn_context_decl'] = {"min": 0, "max": 1}
c_children['{urn:oasis:names:tc:SAML:2.0:assertion}AuthnContextDeclRef'] = (
'authn_context_decl_ref',
AuthnContextDeclRef)
c_cardinality['authn_context_decl_ref'] = {"min": 0, "max": 1}
c_children[
'{urn:oasis:names:tc:SAML:2.0:assertion}AuthenticatingAuthority'] = (
'authenticating_authority', [AuthenticatingAuthority])
c_cardinality['authenticating_authority'] = {"min": 0}
c_child_order.extend(['authn_context_class_ref', 'authn_context_decl',
'authn_context_decl_ref', 'authenticating_authority'])
def __init__(self,
authn_context_class_ref=None,
authn_context_decl=None,
authn_context_decl_ref=None,
authenticating_authority=None,
text=None,
extension_elements=None,
extension_attributes=None):
SamlBase.__init__(self,
text=text,
extension_elements=extension_elements,
extension_attributes=extension_attributes)
self.authn_context_class_ref = authn_context_class_ref
self.authn_context_decl = authn_context_decl
self.authn_context_decl_ref = authn_context_decl_ref
self.authenticating_authority = authenticating_authority or []
def verify(self):
if self.authn_context_decl and self.authn_context_decl_ref:
raise Exception(
"Invalid Response: "
"Cannot have both <AuthnContextDecl> and <AuthnContextDeclRef>"
)
return SamlBase.verify(self)
def authn_context_type__from_string(xml_string):
return saml2.create_class_from_xml_string(AuthnContextType_, xml_string)
class Action(ActionType_):
"""The urn:oasis:names:tc:SAML:2.0:assertion:Action element """
c_tag = 'Action'
c_namespace = NAMESPACE
c_children = ActionType_.c_children.copy()
c_attributes = ActionType_.c_attributes.copy()
c_child_order = ActionType_.c_child_order[:]
c_cardinality = ActionType_.c_cardinality.copy()
def action_from_string(xml_string):
return saml2.create_class_from_xml_string(Action, xml_string)
class AttributeType_(SamlBase):
"""The urn:oasis:names:tc:SAML:2.0:assertion:AttributeType element """
c_tag = 'AttributeType'
c_namespace = NAMESPACE
c_children = SamlBase.c_children.copy()
c_attributes = SamlBase.c_attributes.copy()
c_child_order = SamlBase.c_child_order[:]
c_cardinality = SamlBase.c_cardinality.copy()
c_children['{urn:oasis:names:tc:SAML:2.0:assertion}AttributeValue'] = (
'attribute_value',
[AttributeValue])
c_cardinality['attribute_value'] = {"min": 0}
c_attributes['Name'] = ('name', 'string', True)
c_attributes['NameFormat'] = ('name_format', 'anyURI', False)
c_attributes['FriendlyName'] = ('friendly_name', 'string', False)
c_child_order.extend(['attribute_value'])
c_any_attribute = {"namespace": "##other", "processContents": "lax"}
def __init__(self,
attribute_value=None,
name=None,
name_format=NAME_FORMAT_URI,
friendly_name=None,
text=None,
extension_elements=None,
extension_attributes=None):
SamlBase.__init__(self,
text=text,
extension_elements=extension_elements,
extension_attributes=extension_attributes)
self.attribute_value = attribute_value or []
self.name = name
self.name_format = name_format
self.friendly_name = friendly_name
# when consuming such elements, default to NAME_FORMAT_UNSPECIFIED as NameFormat
def harvest_element_tree(self, tree):
tree.attrib.setdefault('NameFormat', NAME_FORMAT_UNSPECIFIED)
SamlBase.harvest_element_tree(self, tree)
def attribute_type__from_string(xml_string):
return saml2.create_class_from_xml_string(AttributeType_, xml_string)
class SubjectConfirmationType_(SamlBase):
"""The urn:oasis:names:tc:SAML:2.0:assertion:SubjectConfirmationType
element """
c_tag = 'SubjectConfirmationType'
c_namespace = NAMESPACE
c_children = SamlBase.c_children.copy()
c_attributes = SamlBase.c_attributes.copy()
c_child_order = SamlBase.c_child_order[:]
c_cardinality = SamlBase.c_cardinality.copy()
c_children['{urn:oasis:names:tc:SAML:2.0:assertion}BaseID'] = ('base_id',
BaseID)
c_cardinality['base_id'] = {"min": 0, "max": 1}
c_children['{urn:oasis:names:tc:SAML:2.0:assertion}NameID'] = ('name_id',
NameID)
c_cardinality['name_id'] = {"min": 0, "max": 1}
c_children['{urn:oasis:names:tc:SAML:2.0:assertion}EncryptedID'] = (
'encrypted_id',
EncryptedID)
c_cardinality['encrypted_id'] = {"min": 0, "max": 1}
c_children[
'{urn:oasis:names:tc:SAML:2.0:assertion}SubjectConfirmationData'] = (
'subject_confirmation_data', SubjectConfirmationData)
c_cardinality['subject_confirmation_data'] = {"min": 0, "max": 1}
c_attributes['Method'] = ('method', 'anyURI', True)
c_child_order.extend(['base_id', 'name_id', 'encrypted_id',
'subject_confirmation_data'])
def __init__(self,
base_id=None,
name_id=None,
encrypted_id=None,
subject_confirmation_data=None,
method=None,
text=None,
extension_elements=None,
extension_attributes=None):
SamlBase.__init__(self,
text=text,
extension_elements=extension_elements,
extension_attributes=extension_attributes)
self.base_id = base_id
self.name_id = name_id
self.encrypted_id = encrypted_id
self.subject_confirmation_data = subject_confirmation_data
self.method = method
def subject_confirmation_type__from_string(xml_string):
return saml2.create_class_from_xml_string(SubjectConfirmationType_,
xml_string)
class AudienceRestriction(AudienceRestrictionType_):
"""The urn:oasis:names:tc:SAML:2.0:assertion:AudienceRestriction element """
c_tag = 'AudienceRestriction'
c_namespace = NAMESPACE
c_children = AudienceRestrictionType_.c_children.copy()
c_attributes = AudienceRestrictionType_.c_attributes.copy()
c_child_order = AudienceRestrictionType_.c_child_order[:]
c_cardinality = AudienceRestrictionType_.c_cardinality.copy()
def audience_restriction_from_string(xml_string):
return saml2.create_class_from_xml_string(AudienceRestriction, xml_string)
class AuthnContext(AuthnContextType_):
"""The urn:oasis:names:tc:SAML:2.0:assertion:AuthnContext element """
c_tag = 'AuthnContext'
c_namespace = NAMESPACE
c_children = AuthnContextType_.c_children.copy()
c_attributes = AuthnContextType_.c_attributes.copy()
c_child_order = AuthnContextType_.c_child_order[:]
c_cardinality = AuthnContextType_.c_cardinality.copy()
def authn_context_from_string(xml_string):
return saml2.create_class_from_xml_string(AuthnContext, xml_string)
class Attribute(AttributeType_):
"""The urn:oasis:names:tc:SAML:2.0:assertion:Attribute element """
c_tag = 'Attribute'
c_namespace = NAMESPACE
c_children = AttributeType_.c_children.copy()
c_attributes = AttributeType_.c_attributes.copy()
c_child_order = AttributeType_.c_child_order[:]
c_cardinality = AttributeType_.c_cardinality.copy()
def attribute_from_string(xml_string):
return saml2.create_class_from_xml_string(Attribute, xml_string)
class SubjectConfirmation(SubjectConfirmationType_):
"""The urn:oasis:names:tc:SAML:2.0:assertion:SubjectConfirmation element """
c_tag = 'SubjectConfirmation'
c_namespace = NAMESPACE
c_children = SubjectConfirmationType_.c_children.copy()
c_attributes = SubjectConfirmationType_.c_attributes.copy()
c_child_order = SubjectConfirmationType_.c_child_order[:]
c_cardinality = SubjectConfirmationType_.c_cardinality.copy()
def subject_confirmation_from_string(xml_string):
return saml2.create_class_from_xml_string(SubjectConfirmation, xml_string)
class ConditionsType_(SamlBase):
"""The urn:oasis:names:tc:SAML:2.0:assertion:ConditionsType element """
c_tag = 'ConditionsType'
c_namespace = NAMESPACE
c_children = SamlBase.c_children.copy()
c_attributes = SamlBase.c_attributes.copy()
c_child_order = SamlBase.c_child_order[:]
c_cardinality = SamlBase.c_cardinality.copy()
c_children['{urn:oasis:names:tc:SAML:2.0:assertion}Condition'] = (
'condition',
[Condition])
c_cardinality['condition'] = {"min": 0}
c_children['{urn:oasis:names:tc:SAML:2.0:assertion}AudienceRestriction'] = (
'audience_restriction',
[AudienceRestriction])
c_cardinality['audience_restriction'] = {"min": 0}
c_children['{urn:oasis:names:tc:SAML:2.0:assertion}OneTimeUse'] = (
'one_time_use',
[OneTimeUse])
c_cardinality['one_time_use'] = {"min": 0}
c_children['{urn:oasis:names:tc:SAML:2.0:assertion}ProxyRestriction'] = (
'proxy_restriction',
[ProxyRestriction])
c_cardinality['proxy_restriction'] = {"min": 0}
c_attributes['NotBefore'] = ('not_before', 'dateTime', False)
c_attributes['NotOnOrAfter'] = ('not_on_or_after', 'dateTime', False)
c_child_order.extend(['condition', 'audience_restriction', 'one_time_use',
'proxy_restriction'])
def __init__(self,
condition=None,
audience_restriction=None,
one_time_use=None,
proxy_restriction=None,
not_before=None,
not_on_or_after=None,
text=None,
extension_elements=None,
extension_attributes=None):
SamlBase.__init__(self,
text=text,
extension_elements=extension_elements,
extension_attributes=extension_attributes)
self.condition = condition or []
self.audience_restriction = audience_restriction or []
self.one_time_use = one_time_use or []
self.proxy_restriction = proxy_restriction or []
self.not_before = not_before
self.not_on_or_after = not_on_or_after
def verify(self):
if self.one_time_use:
if len(self.one_time_use) != 1:
raise Exception("Cannot be used more than once")
if self.proxy_restriction:
if len(self.proxy_restriction) != 1:
raise Exception("Cannot be used more than once")
return SamlBase.verify(self)
def conditions_type__from_string(xml_string):
return saml2.create_class_from_xml_string(ConditionsType_, xml_string)
class AuthnStatementType_(StatementAbstractType_):
"""The urn:oasis:names:tc:SAML:2.0:assertion:AuthnStatementType element """
c_tag = 'AuthnStatementType'
c_namespace = NAMESPACE
c_children = StatementAbstractType_.c_children.copy()
c_attributes = StatementAbstractType_.c_attributes.copy()
c_child_order = StatementAbstractType_.c_child_order[:]
c_cardinality = StatementAbstractType_.c_cardinality.copy()
c_children['{urn:oasis:names:tc:SAML:2.0:assertion}SubjectLocality'] = (
'subject_locality', SubjectLocality)
c_cardinality['subject_locality'] = {"min": 0, "max": 1}
c_children['{urn:oasis:names:tc:SAML:2.0:assertion}AuthnContext'] = (
'authn_context', AuthnContext)
c_attributes['AuthnInstant'] = ('authn_instant', 'dateTime', True)
c_attributes['SessionIndex'] = ('session_index', 'string', False)
c_attributes['SessionNotOnOrAfter'] = ('session_not_on_or_after',
'dateTime', False)
c_child_order.extend(['subject_locality', 'authn_context'])
def __init__(self,
subject_locality=None,
authn_context=None,
authn_instant=None,
session_index=None,
session_not_on_or_after=None,
text=None,
extension_elements=None,
extension_attributes=None):
StatementAbstractType_.__init__(
self, text=text, extension_elements=extension_elements,
extension_attributes=extension_attributes)
self.subject_locality = subject_locality
self.authn_context = authn_context
self.authn_instant = authn_instant
self.session_index = session_index
self.session_not_on_or_after = session_not_on_or_after
def authn_statement_type__from_string(xml_string):
return saml2.create_class_from_xml_string(AuthnStatementType_, xml_string)
class AttributeStatementType_(StatementAbstractType_):
"""The urn:oasis:names:tc:SAML:2.0:assertion:AttributeStatementType
element """
c_tag = 'AttributeStatementType'
c_namespace = NAMESPACE
c_children = StatementAbstractType_.c_children.copy()
c_attributes = StatementAbstractType_.c_attributes.copy()
c_child_order = StatementAbstractType_.c_child_order[:]
c_cardinality = StatementAbstractType_.c_cardinality.copy()
c_children['{urn:oasis:names:tc:SAML:2.0:assertion}Attribute'] = (
'attribute',
[Attribute])
c_cardinality['attribute'] = {"min": 0}
c_children['{urn:oasis:names:tc:SAML:2.0:assertion}EncryptedAttribute'] = (
'encrypted_attribute',
[EncryptedAttribute])
c_cardinality['encrypted_attribute'] = {"min": 0}
c_child_order.extend(['attribute', 'encrypted_attribute'])
def __init__(self,
attribute=None,
encrypted_attribute=None,
text=None,
extension_elements=None,
extension_attributes=None):
StatementAbstractType_.__init__(
self, text=text, extension_elements=extension_elements,
extension_attributes=extension_attributes)
self.attribute = attribute or []
self.encrypted_attribute = encrypted_attribute or []
def attribute_statement_type__from_string(xml_string):
return saml2.create_class_from_xml_string(AttributeStatementType_,
xml_string)
class SubjectType_(SamlBase):
"""The urn:oasis:names:tc:SAML:2.0:assertion:SubjectType element """
c_tag = 'SubjectType'
c_namespace = NAMESPACE
c_children = SamlBase.c_children.copy()
c_attributes = SamlBase.c_attributes.copy()
c_child_order = SamlBase.c_child_order[:]
c_cardinality = SamlBase.c_cardinality.copy()
c_children['{urn:oasis:names:tc:SAML:2.0:assertion}BaseID'] = ('base_id',
BaseID)
c_cardinality['base_id'] = {"min": 0, "max": 1}
c_children['{urn:oasis:names:tc:SAML:2.0:assertion}NameID'] = ('name_id',
NameID)
c_cardinality['name_id'] = {"min": 0, "max": 1}
c_children['{urn:oasis:names:tc:SAML:2.0:assertion}EncryptedID'] = (
'encrypted_id', EncryptedID)
c_cardinality['encrypted_id'] = {"min": 0, "max": 1}
c_children['{urn:oasis:names:tc:SAML:2.0:assertion}SubjectConfirmation'] = (
'subject_confirmation', [SubjectConfirmation])
c_cardinality['subject_confirmation'] = {"min": 0}
c_child_order.extend(['base_id', 'name_id', 'encrypted_id',
'subject_confirmation'])
def __init__(self,
base_id=None,
name_id=None,
encrypted_id=None,
subject_confirmation=None,
text=None,
extension_elements=None,
extension_attributes=None):
SamlBase.__init__(self,
text=text,
extension_elements=extension_elements,
extension_attributes=extension_attributes)
self.base_id = base_id
self.name_id = name_id
self.encrypted_id = encrypted_id
self.subject_confirmation = subject_confirmation or []
def subject_type__from_string(xml_string):
return saml2.create_class_from_xml_string(SubjectType_, xml_string)
class Conditions(ConditionsType_):
"""The urn:oasis:names:tc:SAML:2.0:assertion:Conditions element """
c_tag = 'Conditions'
c_namespace = NAMESPACE
c_children = ConditionsType_.c_children.copy()
c_attributes = ConditionsType_.c_attributes.copy()
c_child_order = ConditionsType_.c_child_order[:]
c_cardinality = ConditionsType_.c_cardinality.copy()
def conditions_from_string(xml_string):
return saml2.create_class_from_xml_string(Conditions, xml_string)
class AuthnStatement(AuthnStatementType_):
"""The urn:oasis:names:tc:SAML:2.0:assertion:AuthnStatement element """
c_tag = 'AuthnStatement'
c_namespace = NAMESPACE
c_children = AuthnStatementType_.c_children.copy()
c_attributes = AuthnStatementType_.c_attributes.copy()
c_child_order = AuthnStatementType_.c_child_order[:]
c_cardinality = AuthnStatementType_.c_cardinality.copy()
def authn_statement_from_string(xml_string):
return saml2.create_class_from_xml_string(AuthnStatement, xml_string)
class AttributeStatement(AttributeStatementType_):
"""The urn:oasis:names:tc:SAML:2.0:assertion:AttributeStatement element """
c_tag = 'AttributeStatement'
c_namespace = NAMESPACE
c_children = AttributeStatementType_.c_children.copy()
c_attributes = AttributeStatementType_.c_attributes.copy()
c_child_order = AttributeStatementType_.c_child_order[:]
c_cardinality = AttributeStatementType_.c_cardinality.copy()
def attribute_statement_from_string(xml_string):
return saml2.create_class_from_xml_string(AttributeStatement, xml_string)
class Subject(SubjectType_):
"""The urn:oasis:names:tc:SAML:2.0:assertion:Subject element """
c_tag = 'Subject'
c_namespace = NAMESPACE
c_children = SubjectType_.c_children.copy()
c_attributes = SubjectType_.c_attributes.copy()
c_child_order = SubjectType_.c_child_order[:]
c_cardinality = SubjectType_.c_cardinality.copy()
def subject_from_string(xml_string):
return saml2.create_class_from_xml_string(Subject, xml_string)
#..................
# ['AuthzDecisionStatement', 'EvidenceType', 'AdviceType', 'Evidence',
# 'Assertion', 'AssertionType', 'AuthzDecisionStatementType', 'Advice']
class EvidenceType_(SamlBase):
"""The urn:oasis:names:tc:SAML:2.0:assertion:EvidenceType element """
c_tag = 'EvidenceType'
c_namespace = NAMESPACE
c_children = SamlBase.c_children.copy()
c_attributes = SamlBase.c_attributes.copy()
c_child_order = SamlBase.c_child_order[:]
c_cardinality = SamlBase.c_cardinality.copy()
c_children['{urn:oasis:names:tc:SAML:2.0:assertion}AssertionIDRef'] = (
'assertion_id_ref', [AssertionIDRef])
c_cardinality['assertion_id_ref'] = {"min": 0}
c_children['{urn:oasis:names:tc:SAML:2.0:assertion}AssertionURIRef'] = (
'assertion_uri_ref', [AssertionURIRef])
c_cardinality['assertion_uri_ref'] = {"min": 0}
c_cardinality['assertion'] = {"min": 0}
c_children['{urn:oasis:names:tc:SAML:2.0:assertion}EncryptedAssertion'] = (
'encrypted_assertion', [EncryptedAssertion])
c_cardinality['encrypted_assertion'] = {"min": 0}
c_child_order.extend(['assertion_id_ref', 'assertion_uri_ref', 'assertion',
'encrypted_assertion'])
def __init__(self,
assertion_id_ref=None,
assertion_uri_ref=None,
assertion=None,
encrypted_assertion=None,
text=None,
extension_elements=None,
extension_attributes=None):
SamlBase.__init__(self,
text=text,
extension_elements=extension_elements,
extension_attributes=extension_attributes)
self.assertion_id_ref = assertion_id_ref or []
self.assertion_uri_ref = assertion_uri_ref or []
self.assertion = assertion or []
self.encrypted_assertion = encrypted_assertion or []
def evidence_type__from_string(xml_string):
return saml2.create_class_from_xml_string(EvidenceType_, xml_string)
class Evidence(EvidenceType_):
"""The urn:oasis:names:tc:SAML:2.0:assertion:Evidence element """
c_tag = 'Evidence'
c_namespace = NAMESPACE
c_children = EvidenceType_.c_children.copy()
c_attributes = EvidenceType_.c_attributes.copy()
c_child_order = EvidenceType_.c_child_order[:]
c_cardinality = EvidenceType_.c_cardinality.copy()
def evidence_from_string(xml_string):
return saml2.create_class_from_xml_string(Evidence, xml_string)
class AuthzDecisionStatementType_(StatementAbstractType_):
"""The urn:oasis:names:tc:SAML:2.0:assertion:AuthzDecisionStatementType
element """
c_tag = 'AuthzDecisionStatementType'
c_namespace = NAMESPACE
c_children = StatementAbstractType_.c_children.copy()
c_attributes = StatementAbstractType_.c_attributes.copy()
c_child_order = StatementAbstractType_.c_child_order[:]
c_cardinality = StatementAbstractType_.c_cardinality.copy()
c_children['{urn:oasis:names:tc:SAML:2.0:assertion}Action'] = (
'action', [Action])
c_cardinality['action'] = {"min": 1}
c_children['{urn:oasis:names:tc:SAML:2.0:assertion}Evidence'] = (
'evidence', Evidence)
c_cardinality['evidence'] = {"min": 0, "max": 1}
c_attributes['Resource'] = ('resource', 'anyURI', True)
c_attributes['Decision'] = ('decision', DecisionType_, True)
c_child_order.extend(['action', 'evidence'])
def __init__(self,
action=None,
evidence=None,
resource=None,
decision=None,
text=None,
extension_elements=None,
extension_attributes=None):
StatementAbstractType_.__init__(
self, text=text, extension_elements=extension_elements,
extension_attributes=extension_attributes)
self.action = action or []
self.evidence = evidence
self.resource = resource
self.decision = decision
def authz_decision_statement_type__from_string(xml_string):
return saml2.create_class_from_xml_string(AuthzDecisionStatementType_,
xml_string)
class AuthzDecisionStatement(AuthzDecisionStatementType_):
"""The urn:oasis:names:tc:SAML:2.0:assertion:AuthzDecisionStatement
element """
c_tag = 'AuthzDecisionStatement'
c_namespace = NAMESPACE
c_children = AuthzDecisionStatementType_.c_children.copy()
c_attributes = AuthzDecisionStatementType_.c_attributes.copy()
c_child_order = AuthzDecisionStatementType_.c_child_order[:]
c_cardinality = AuthzDecisionStatementType_.c_cardinality.copy()
def authz_decision_statement_from_string(xml_string):
return saml2.create_class_from_xml_string(AuthzDecisionStatement,
xml_string)
#..................
# ['Assertion', 'AssertionType', 'AdviceType', 'Advice']
class AssertionType_(SamlBase):
"""The urn:oasis:names:tc:SAML:2.0:assertion:AssertionType element """
c_tag = 'AssertionType'
c_namespace = NAMESPACE
c_children = SamlBase.c_children.copy()
c_attributes = SamlBase.c_attributes.copy()
c_child_order = SamlBase.c_child_order[:]
c_cardinality = SamlBase.c_cardinality.copy()
c_children['{urn:oasis:names:tc:SAML:2.0:assertion}Issuer'] = ('issuer',
Issuer)
c_children['{http://www.w3.org/2000/09/xmldsig#}Signature'] = ('signature',
ds.Signature)
c_cardinality['signature'] = {"min": 0, "max": 1}
c_children['{urn:oasis:names:tc:SAML:2.0:assertion}Subject'] = ('subject',
Subject)
c_cardinality['subject'] = {"min": 0, "max": 1}
c_children['{urn:oasis:names:tc:SAML:2.0:assertion}Conditions'] = (
'conditions', Conditions)
c_cardinality['conditions'] = {"min": 0, "max": 1}
c_cardinality['advice'] = {"min": 0, "max": 1}
c_children['{urn:oasis:names:tc:SAML:2.0:assertion}Statement'] = (
'statement', [Statement])
c_cardinality['statement'] = {"min": 0}
c_children['{urn:oasis:names:tc:SAML:2.0:assertion}AuthnStatement'] = (
'authn_statement', [AuthnStatement])
c_cardinality['authn_statement'] = {"min": 0}
c_children[
'{urn:oasis:names:tc:SAML:2.0:assertion}AuthzDecisionStatement'] = (
'authz_decision_statement', [AuthzDecisionStatement])
c_cardinality['authz_decision_statement'] = {"min": 0}
c_children['{urn:oasis:names:tc:SAML:2.0:assertion}AttributeStatement'] = (
'attribute_statement', [AttributeStatement])
c_cardinality['attribute_statement'] = {"min": 0}
c_attributes['Version'] = ('version', 'string', True)
c_attributes['ID'] = ('id', 'ID', True)
c_attributes['IssueInstant'] = ('issue_instant', 'dateTime', True)
c_child_order.extend(['issuer', 'signature', 'subject', 'conditions',
'advice', 'statement', 'authn_statement',
'authz_decision_statement', 'attribute_statement'])
def __init__(self,
issuer=None,
signature=None,
subject=None,
conditions=None,
advice=None,
statement=None,
authn_statement=None,
authz_decision_statement=None,
attribute_statement=None,
version=None,
id=None,
issue_instant=None,
text=None,
extension_elements=None,
extension_attributes=None):
SamlBase.__init__(self,
text=text,
extension_elements=extension_elements,
extension_attributes=extension_attributes)
self.issuer = issuer
self.signature = signature
self.subject = subject
self.conditions = conditions
self.advice = advice
self.statement = statement or []
self.authn_statement = authn_statement or []
self.authz_decision_statement = authz_decision_statement or []
self.attribute_statement = attribute_statement or []
self.version = version
self.id = id
self.issue_instant = issue_instant
def verify(self):
# If no statement MUST contain a subject element
if self.attribute_statement or self.statement or \
self.authn_statement or self.authz_decision_statement:
pass
elif not self.subject:
raise MustValueError(
"If no statement MUST contain a subject element")
if self.authn_statement and not self.subject:
raise MustValueError(
"An assertion with an AuthnStatement must contain a Subject")
return SamlBase.verify(self)
def assertion_type__from_string(xml_string):
return saml2.create_class_from_xml_string(AssertionType_, xml_string)
class Assertion(AssertionType_):
"""The urn:oasis:names:tc:SAML:2.0:assertion:Assertion element """
c_tag = 'Assertion'
c_namespace = NAMESPACE
c_children = AssertionType_.c_children.copy()
c_attributes = AssertionType_.c_attributes.copy()
c_child_order = AssertionType_.c_child_order[:]
c_cardinality = AssertionType_.c_cardinality.copy()
def assertion_from_string(xml_string):
return saml2.create_class_from_xml_string(Assertion, xml_string)
class AdviceType_(SamlBase):
"""The urn:oasis:names:tc:SAML:2.0:assertion:AdviceType element """
c_tag = 'AdviceType'
c_namespace = NAMESPACE
c_children = SamlBase.c_children.copy()
c_attributes = SamlBase.c_attributes.copy()
c_child_order = SamlBase.c_child_order[:]
c_cardinality = SamlBase.c_cardinality.copy()
c_children['{urn:oasis:names:tc:SAML:2.0:assertion}AssertionIDRef'] = (
'assertion_id_ref', [AssertionIDRef])
c_cardinality['assertion_id_ref'] = {"min": 0}
c_children['{urn:oasis:names:tc:SAML:2.0:assertion}AssertionURIRef'] = (
'assertion_uri_ref', [AssertionURIRef])
c_cardinality['assertion_uri_ref'] = {"min": 0}
c_children['{urn:oasis:names:tc:SAML:2.0:assertion}Assertion'] = (
'assertion', [Assertion])
c_cardinality['assertion'] = {"min": 0}
c_children['{urn:oasis:names:tc:SAML:2.0:assertion}EncryptedAssertion'] = (
'encrypted_assertion', [EncryptedAssertion])
c_cardinality['encrypted_assertion'] = {"min": 0}
c_child_order.extend(['assertion_id_ref', 'assertion_uri_ref', 'assertion',
'encrypted_assertion'])
c_any = {"namespace": "##other", "processContents": "lax"}
def __init__(self,
assertion_id_ref=None,
assertion_uri_ref=None,
assertion=None,
encrypted_assertion=None,
text=None,
extension_elements=None,
extension_attributes=None):
SamlBase.__init__(self,
text=text,
extension_elements=extension_elements,
extension_attributes=extension_attributes)
self.assertion_id_ref = assertion_id_ref or []
self.assertion_uri_ref = assertion_uri_ref or []
self.assertion = assertion or []
self.encrypted_assertion = encrypted_assertion or []
def advice_type__from_string(xml_string):
return saml2.create_class_from_xml_string(AdviceType_, xml_string)
class Advice(AdviceType_):
"""The urn:oasis:names:tc:SAML:2.0:assertion:Advice element """
c_tag = 'Advice'
c_namespace = NAMESPACE
c_children = AdviceType_.c_children.copy()
c_attributes = AdviceType_.c_attributes.copy()
c_child_order = AdviceType_.c_child_order[:]
c_cardinality = AdviceType_.c_cardinality.copy()
def advice_from_string(xml_string):
return saml2.create_class_from_xml_string(Advice, xml_string)
# ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
EvidenceType_.c_children['{urn:oasis:names:tc:SAML:2.0:assertion}Assertion'] = (
'assertion', [Assertion])
Evidence.c_children['{urn:oasis:names:tc:SAML:2.0:assertion}Assertion'] = (
'assertion', [Assertion])
AssertionType_.c_children['{urn:oasis:names:tc:SAML:2.0:assertion}Advice'] = (
'advice', Advice)
Assertion.c_children['{urn:oasis:names:tc:SAML:2.0:assertion}Advice'] = (
'advice', Advice)
# ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
AG_IDNameQualifiers = [
('NameQualifier', 'string', False),
('SPNameQualifier', 'string', False),
]
ELEMENT_FROM_STRING = {
BaseID.c_tag: base_id_from_string,
NameID.c_tag: name_id_from_string,
NameIDType_.c_tag: name_id_type__from_string,
EncryptedElementType_.c_tag: encrypted_element_type__from_string,
EncryptedID.c_tag: encrypted_id_from_string,
Issuer.c_tag: issuer_from_string,
AssertionIDRef.c_tag: assertion_id_ref_from_string,
AssertionURIRef.c_tag: assertion_uri_ref_from_string,
Assertion.c_tag: assertion_from_string,
AssertionType_.c_tag: assertion_type__from_string,
Subject.c_tag: subject_from_string,
SubjectType_.c_tag: subject_type__from_string,
SubjectConfirmation.c_tag: subject_confirmation_from_string,
SubjectConfirmationType_.c_tag: subject_confirmation_type__from_string,
SubjectConfirmationData.c_tag: subject_confirmation_data_from_string,
SubjectConfirmationDataType_.c_tag:
subject_confirmation_data_type__from_string,
KeyInfoConfirmationDataType_.c_tag:
key_info_confirmation_data_type__from_string,
Conditions.c_tag: conditions_from_string,
ConditionsType_.c_tag: conditions_type__from_string,
Condition.c_tag: condition_from_string,
AudienceRestriction.c_tag: audience_restriction_from_string,
AudienceRestrictionType_.c_tag: audience_restriction_type__from_string,
Audience.c_tag: audience_from_string,
OneTimeUse.c_tag: one_time_use_from_string,
OneTimeUseType_.c_tag: one_time_use_type__from_string,
ProxyRestriction.c_tag: proxy_restriction_from_string,
ProxyRestrictionType_.c_tag: proxy_restriction_type__from_string,
Advice.c_tag: advice_from_string,
AdviceType_.c_tag: advice_type__from_string,
EncryptedAssertion.c_tag: encrypted_assertion_from_string,
Statement.c_tag: statement_from_string,
AuthnStatement.c_tag: authn_statement_from_string,
AuthnStatementType_.c_tag: authn_statement_type__from_string,
SubjectLocality.c_tag: subject_locality_from_string,
SubjectLocalityType_.c_tag: subject_locality_type__from_string,
AuthnContext.c_tag: authn_context_from_string,
AuthnContextType_.c_tag: authn_context_type__from_string,
AuthnContextClassRef.c_tag: authn_context_class_ref_from_string,
AuthnContextDeclRef.c_tag: authn_context_decl_ref_from_string,
AuthnContextDecl.c_tag: authn_context_decl_from_string,
AuthenticatingAuthority.c_tag: authenticating_authority_from_string,
AuthzDecisionStatement.c_tag: authz_decision_statement_from_string,
AuthzDecisionStatementType_.c_tag:
authz_decision_statement_type__from_string,
DecisionType_.c_tag: decision_type__from_string,
Action.c_tag: action_from_string,
ActionType_.c_tag: action_type__from_string,
Evidence.c_tag: evidence_from_string,
EvidenceType_.c_tag: evidence_type__from_string,
AttributeStatement.c_tag: attribute_statement_from_string,
AttributeStatementType_.c_tag: attribute_statement_type__from_string,
Attribute.c_tag: attribute_from_string,
AttributeType_.c_tag: attribute_type__from_string,
AttributeValue.c_tag: attribute_value_from_string,
EncryptedAttribute.c_tag: encrypted_attribute_from_string,
}
ELEMENT_BY_TAG = {
'BaseID': BaseID,
'NameID': NameID,
'NameIDType': NameIDType_,
'EncryptedElementType': EncryptedElementType_,
'EncryptedID': EncryptedID,
'Issuer': Issuer,
'AssertionIDRef': AssertionIDRef,
'AssertionURIRef': AssertionURIRef,
'Assertion': Assertion,
'AssertionType': AssertionType_,
'Subject': Subject,
'SubjectType': SubjectType_,
'SubjectConfirmation': SubjectConfirmation,
'SubjectConfirmationType': SubjectConfirmationType_,
'SubjectConfirmationData': SubjectConfirmationData,
'SubjectConfirmationDataType': SubjectConfirmationDataType_,
'KeyInfoConfirmationDataType': KeyInfoConfirmationDataType_,
'Conditions': Conditions,
'ConditionsType': ConditionsType_,
'Condition': Condition,
'AudienceRestriction': AudienceRestriction,
'AudienceRestrictionType': AudienceRestrictionType_,
'Audience': Audience,
'OneTimeUse': OneTimeUse,
'OneTimeUseType': OneTimeUseType_,
'ProxyRestriction': ProxyRestriction,
'ProxyRestrictionType': ProxyRestrictionType_,
'Advice': Advice,
'AdviceType': AdviceType_,
'EncryptedAssertion': EncryptedAssertion,
'Statement': Statement,
'AuthnStatement': AuthnStatement,
'AuthnStatementType': AuthnStatementType_,
'SubjectLocality': SubjectLocality,
'SubjectLocalityType': SubjectLocalityType_,
'AuthnContext': AuthnContext,
'AuthnContextType': AuthnContextType_,
'AuthnContextClassRef': AuthnContextClassRef,
'AuthnContextDeclRef': AuthnContextDeclRef,
'AuthnContextDecl': AuthnContextDecl,
'AuthenticatingAuthority': AuthenticatingAuthority,
'AuthzDecisionStatement': AuthzDecisionStatement,
'AuthzDecisionStatementType': AuthzDecisionStatementType_,
'DecisionType': DecisionType_,
'Action': Action,
'ActionType': ActionType_,
'Evidence': Evidence,
'EvidenceType': EvidenceType_,
'AttributeStatement': AttributeStatement,
'AttributeStatementType': AttributeStatementType_,
'Attribute': Attribute,
'AttributeType': AttributeType_,
'AttributeValue': AttributeValue,
'EncryptedAttribute': EncryptedAttribute,
'BaseIDAbstractType': BaseIDAbstractType_,
'ConditionAbstractType': ConditionAbstractType_,
'StatementAbstractType': StatementAbstractType_,
}
def factory(tag, **kwargs):
return ELEMENT_BY_TAG[tag](**kwargs)
| 1.125 | 1 |
examples/web/handlers.py | nicoddemus/aioworkers | 45 | 1871 |
async def handler(context):
return await context.data
| 0.609375 | 1 |
neutron/db/models/l3ha.py | cleo4zheng/neutron | 4 | 1879 | # Copyright (C) 2014 eNov<NAME> <<EMAIL>>
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
from neutron_lib.db import model_base
import sqlalchemy as sa
from sqlalchemy import orm
from neutron.common import constants as n_const
from neutron.db.models import agent as agent_model
from neutron.db import models_v2
class L3HARouterAgentPortBinding(model_base.BASEV2):
"""Represent agent binding state of a HA router port.
A HA Router has one HA port per agent on which it is spawned.
This binding table stores which port is used for a HA router by a
L3 agent.
"""
__tablename__ = 'ha_router_agent_port_bindings'
__table_args__ = (
sa.UniqueConstraint(
'router_id', 'l3_agent_id',
name='uniq_ha_router_agent_port_bindings0port_id0l3_agent_id'),
model_base.BASEV2.__table_args__
)
port_id = sa.Column(sa.String(36), sa.ForeignKey('ports.id',
ondelete='CASCADE'),
nullable=False, primary_key=True)
port = orm.relationship(models_v2.Port)
router_id = sa.Column(sa.String(36), sa.ForeignKey('routers.id',
ondelete='CASCADE'),
nullable=False)
l3_agent_id = sa.Column(sa.String(36),
sa.ForeignKey("agents.id",
ondelete='CASCADE'))
agent = orm.relationship(agent_model.Agent)
state = sa.Column(sa.Enum(n_const.HA_ROUTER_STATE_ACTIVE,
n_const.HA_ROUTER_STATE_STANDBY,
name='l3_ha_states'),
default=n_const.HA_ROUTER_STATE_STANDBY,
server_default=n_const.HA_ROUTER_STATE_STANDBY)
class L3HARouterNetwork(model_base.BASEV2, model_base.HasProjectPrimaryKey):
"""Host HA network for a tenant.
One HA Network is used per tenant, all HA router ports are created
on this network.
"""
__tablename__ = 'ha_router_networks'
network_id = sa.Column(sa.String(36),
sa.ForeignKey('networks.id', ondelete="CASCADE"),
nullable=False, primary_key=True)
network = orm.relationship(models_v2.Network)
class L3HARouterVRIdAllocation(model_base.BASEV2):
"""VRID allocation per HA network.
Keep a track of the VRID allocations per HA network.
"""
__tablename__ = 'ha_router_vrid_allocations'
network_id = sa.Column(sa.String(36),
sa.ForeignKey('networks.id', ondelete="CASCADE"),
nullable=False, primary_key=True)
vr_id = sa.Column(sa.Integer(), nullable=False, primary_key=True)
| 1.351563 | 1 |
genlist.py | truckli/technotes | 0 | 1887 | #!/usr/bin/env python
import shutil, re, os, sys
file_model = "Model.template"
bookname = "TechNotes"
file_bibtex = "thebib.bib"
folder_target = "../pdf/"
#if name is a chapter, return its sections
def get_sections(name):
if not os.path.isdir(name):
return []
files = os.listdir(name)
sections = []
for section in files:
if re.match('.*\.tex$', section) and not re.match(".*lmz0610.*", section):
sections.append(name + "/" + section)
return sections
def is_updated(pdffile, texfiles):
def depend_modified(fname, ims):
depend_mtime = os.path.getmtime(fname)
if depend_mtime > ims:
print pdffile, ' mtime: ',ims
print fname, ' mtime: ', depend_mtime
return True
return False
old_pdffile = folder_target + pdffile
if not os.path.isfile(old_pdffile):
return False
pdf_mtime = os.path.getmtime(old_pdffile)
#if depend_modified(sys.argv[0], pdf_mtime):
#return False
#if depend_modified(file_model, pdf_mtime):
#return False
for section in texfiles:
if depend_modified(section, pdf_mtime):
return False
return True
def remove_tmp(tmpname):
if os.path.isfile(tmpname):
os.remove(tmpname)
def remove_latex_tmps(texname):
remove_tmp(texname + ".pdf")
remove_tmp(texname + ".tex")
remove_tmp(texname + ".blg")
remove_tmp(texname + ".bbl")
remove_tmp(texname + ".out")
remove_tmp(texname + ".toc")
remove_tmp(texname + ".aux")
remove_tmp(texname + ".idx")
remove_tmp(texname + ".log")
remove_tmp(texname + ".lof")
remove_tmp(texname + ".lot")
def read_bbl_file(object_name):
file_bbl = object_name + ".bbl"
if not os.path.isfile(file_bbl):
return ""
with open(file_bbl, 'r') as f:
return f.read()
#if depend_files contains citation
def need_bibtex(object_name, depend_files):
#if a file contains latex citation command \cite{}
def contain_citation(section_name):
with open(section_name, "r") as f:
content_section = f.read()
if content_section.find("\\cite{") == -1:
return False
return True
for section in depend_files:
if contain_citation(section):
return True
return False
def gen_pdf(object_name):
object_pdf = object_name + ".pdf"
if object_name == bookname:
depend_files = book_sections
targets = [folder_target + object_pdf, folder_target + "AAAAAAAAAAA.pdf"]
chapter_start_counter = 0
else:
depend_files = chap_sections[object_name]
targets = [folder_target + object_pdf]
chapter_start_counter = book_chapters.index(object_name)
# if is_updated(object_pdf, depend_files):
# print(object_pdf + " is updated")
# return False
obj_need_bibtex = need_bibtex(object_name, depend_files)
model = ''
with open(file_model) as model_file:
model = model_file.read()
model = model.replace("OBJECTNAME", object_name)
if object_name == 'Report':
model = model.replace("CHAPTERSTART", "0")
model = model.replace("\\tableofcontents", "%\\tableofcontents")
model = model.replace("ctexrep", "ctexart")
model = model.replace("\\setcounter{chapter}", "%\\setcounter{chapter}")
else:
model = model.replace("CHAPTERSTART", str(chapter_start_counter))
insert_word = "TOADD"
insert_pos = model.find(insert_word)
latex_text = model[:insert_pos] + insert_word
for section in depend_files:
latex_text = latex_text + "\n\\input{"+ section + "}"
#prepend text encoding mode line
section_text = ""
with open(section, 'r') as f:
line = f.readline()
if line[:6] != '%!Mode':
section_text = '%!Mode:: "TeX:UTF-8"\n' + line + f.read()
if section_text != "":
with open(section, 'w') as f:
f.write(section_text)
if obj_need_bibtex:
latex_text = latex_text + "\n\n"
latex_text = latex_text + "\\bibliographystyle{unsrt}\n"
latex_text = latex_text + "\\bibliography{thebib}\n"
latex_text = latex_text + model[insert_pos+len(insert_word):]
object_tex = object_name + ".tex"
with open(object_tex, "w") as f:
f.write(latex_text)
# os.system("xelatex " + object_name)
# if len(sys.argv) < 3 or sys.argv[2] != "fast":
# if obj_need_bibtex:
# old_bbl = read_bbl_file(object_name)
# os.system("bibtex " + object_name)
# if old_bbl != read_bbl_file(object_name):
# os.system("xelatex " + object_name)
# os.system("xelatex " + object_name)
#
# if os.path.isfile(object_pdf):
# for target in targets:
# shutil.copy(object_pdf, target)
return True
#trim trailing slash
def trim_chap_name(name):
if name[len(name) - 1] == '/':
name = name[:len(name)-1]
return name
def merge_chapter_pdfs():
mergecmd = 'pdftk '
for chap in book_chapters:
chappdf = folder_target + chap + '.pdf'
if os.path.isfile(chappdf):
mergecmd += chappdf + ' '
mergecmd += 'cat output ' + folder_target + 'AAABBBBBBBB.pdf'
print mergecmd
os.system(mergecmd)
##################################################
#now work starts
files = os.listdir('.')
chap_sections = {}
book_sections = []
book_chapters = []
for chap in files:
sections = get_sections(chap)
if len(sections):
chap_sections[chap] = sections
book_sections.extend(sections)
book_chapters.append(chap)
cmd = "one"
if cmd == "one":
gen_pdf(bookname)
elif cmd == "all":
modified = False
for chap in chap_sections:
modified = gen_pdf(chap) or modified
if modified:
merge_chapter_pdfs()
elif cmd == "clean":
for chap in chap_sections:
remove_latex_tmps(chap)
remove_latex_tmps(bookname)
else:
chap = trim_chap_name(cmd)
if chap in book_sections:
#chap is actually a section
section = chap
chap = 'Report'
chap_sections[chap] = [section]
book_chapters.append(chap)
if not chap_sections.has_key(chap):
print(chap + " is not a valid chapter name")
sys.exit(1)
modified = gen_pdf(chap)
if modified and chap != 'Report':
merge_chapter_pdfs()
| 1.757813 | 2 |
python_survey/finished_files/main.py | trenton3983/PyCharmProjects | 0 | 1895 | import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import scipy.stats
from finished_files.survey_data_dictionary import DATA_DICTIONARY
# Load data
# We want to take the names list from our data dictionary
names = [x.name for x in DATA_DICTIONARY]
# Generate the list of names to import
usecols = [x.name for x in DATA_DICTIONARY if x.usecol]
# dtypes should be a dict of 'col_name' : dtype
dtypes = {x.name : x.dtype for x in DATA_DICTIONARY if x.dtype}
# same for converters
converters = {x.name : x.converter for x in DATA_DICTIONARY if x.converter}
df = pd.read_csv('data/survey.csv',
header=0,
names=names,
dtype=dtypes,
converters=converters,
usecols=usecols)
#%% Clean up data: remove disqualified users
# In the survey, any user who selected they don't use Python was then
# disqualified from the rest of the survey. So let's drop them here.
df = df[df['python_main'] != 'No, I don’t use Python for my current projects']
# Considering we now only have two categories left:
# - Yes
# - No, I use Python for secondary projects only
# Let's turn it into a bool
df['python_main'] = df['python_main'] == 'Yes'
#%% Plot the web dev / data scientist ratio
# In the survey, respondents were asked to estimate the ratio between
# the amount of web developers vs the amount of data scientists. Afterwards
# they were asked what they thought the most popular answer would be.
# Let's see if there's a difference!
# This is a categorical data point, and it's already ordered in the data
# dictionary. So we shouldn't sort it after counting the values.
ratio_self = df['webdev_science_ratio_self'].value_counts(sort=False)
ratio_others = df['webdev_science_ratio_others'].value_counts(sort=False)
# Let's draw a bar chart comparing the distributions
fig = plt.figure()
ax = fig.add_subplot(111)
RATIO_COUNT = ratio_self.count()
x = np.arange(RATIO_COUNT)
WIDTH = 0.4
self_bars = ax.bar(x-WIDTH, ratio_self, width=WIDTH, color='b', align='center')
others_bars = ax.bar(x, ratio_others, width=WIDTH, color='g', align='center')
ax.set_xlabel('Ratios')
ax.set_ylabel('Observations')
labels = [str(lbl) for lbl in ratio_self.index]
ax.set_xticks(x - 0.5 * WIDTH)
ax.set_xticklabels(labels)
ax.legend((self_bars[0], others_bars[0]),
('Self', 'Most popular'))
plt.show()
#%% Calculate the predicted totals
# Let's recode the ratios to numbers, and calculate the means
CONVERSION = {
'10:1': 10,
'5:1' : 5,
'2:1' : 2,
'1:1' : 1,
'1:2' : 0.5,
'1:5' : 0.2,
'1:10': 0.1
}
self_numeric = df['webdev_science_ratio_self'] \
.replace(CONVERSION.keys(), CONVERSION.values())
others_numeric = df['webdev_science_ratio_others'] \
.replace(CONVERSION.keys(), CONVERSION.values())
print(f'Self:\t\t{self_numeric.mean().round(2)} web devs / scientist')
print(f'Others:\t\t{others_numeric.mean().round(2)} web devs / scientist')
#%% Is the difference statistically significant?
result = scipy.stats.chisquare(ratio_self, ratio_others)
# The null hypothesis is that they're the same. Let's see if we can reject it
print(result) | 2.359375 | 2 |
loldib/getratings/models/NA/na_talon/na_talon_jng.py | koliupy/loldib | 0 | 1903 | from getratings.models.ratings import Ratings
class NA_Talon_Jng_Aatrox(Ratings):
pass
class NA_Talon_Jng_Ahri(Ratings):
pass
class NA_Talon_Jng_Akali(Ratings):
pass
class NA_Talon_Jng_Alistar(Ratings):
pass
class NA_Talon_Jng_Amumu(Ratings):
pass
class NA_Talon_Jng_Anivia(Ratings):
pass
class NA_Talon_Jng_Annie(Ratings):
pass
class NA_Talon_Jng_Ashe(Ratings):
pass
class NA_Talon_Jng_AurelionSol(Ratings):
pass
class NA_Talon_Jng_Azir(Ratings):
pass
class NA_Talon_Jng_Bard(Ratings):
pass
class NA_Talon_Jng_Blitzcrank(Ratings):
pass
class NA_Talon_Jng_Brand(Ratings):
pass
class NA_Talon_Jng_Braum(Ratings):
pass
class NA_Talon_Jng_Caitlyn(Ratings):
pass
class NA_Talon_Jng_Camille(Ratings):
pass
class NA_Talon_Jng_Cassiopeia(Ratings):
pass
class NA_Talon_Jng_Chogath(Ratings):
pass
class NA_Talon_Jng_Corki(Ratings):
pass
class NA_Talon_Jng_Darius(Ratings):
pass
class NA_Talon_Jng_Diana(Ratings):
pass
class NA_Talon_Jng_Draven(Ratings):
pass
class NA_Talon_Jng_DrMundo(Ratings):
pass
class NA_Talon_Jng_Ekko(Ratings):
pass
class NA_Talon_Jng_Elise(Ratings):
pass
class NA_Talon_Jng_Evelynn(Ratings):
pass
class NA_Talon_Jng_Ezreal(Ratings):
pass
class NA_Talon_Jng_Fiddlesticks(Ratings):
pass
class NA_Talon_Jng_Fiora(Ratings):
pass
class NA_Talon_Jng_Fizz(Ratings):
pass
class NA_Talon_Jng_Galio(Ratings):
pass
class NA_Talon_Jng_Gangplank(Ratings):
pass
class NA_Talon_Jng_Garen(Ratings):
pass
class NA_Talon_Jng_Gnar(Ratings):
pass
class NA_Talon_Jng_Gragas(Ratings):
pass
class NA_Talon_Jng_Graves(Ratings):
pass
class NA_Talon_Jng_Hecarim(Ratings):
pass
class NA_Talon_Jng_Heimerdinger(Ratings):
pass
class NA_Talon_Jng_Illaoi(Ratings):
pass
class NA_Talon_Jng_Irelia(Ratings):
pass
class NA_Talon_Jng_Ivern(Ratings):
pass
class NA_Talon_Jng_Janna(Ratings):
pass
class NA_Talon_Jng_JarvanIV(Ratings):
pass
class NA_Talon_Jng_Jax(Ratings):
pass
class NA_Talon_Jng_Jayce(Ratings):
pass
class NA_Talon_Jng_Jhin(Ratings):
pass
class NA_Talon_Jng_Jinx(Ratings):
pass
class NA_Talon_Jng_Kalista(Ratings):
pass
class NA_Talon_Jng_Karma(Ratings):
pass
class NA_Talon_Jng_Karthus(Ratings):
pass
class NA_Talon_Jng_Kassadin(Ratings):
pass
class NA_Talon_Jng_Katarina(Ratings):
pass
class NA_Talon_Jng_Kayle(Ratings):
pass
class NA_Talon_Jng_Kayn(Ratings):
pass
class NA_Talon_Jng_Kennen(Ratings):
pass
class NA_Talon_Jng_Khazix(Ratings):
pass
class NA_Talon_Jng_Kindred(Ratings):
pass
class NA_Talon_Jng_Kled(Ratings):
pass
class NA_Talon_Jng_KogMaw(Ratings):
pass
class NA_Talon_Jng_Leblanc(Ratings):
pass
class NA_Talon_Jng_LeeSin(Ratings):
pass
class NA_Talon_Jng_Leona(Ratings):
pass
class NA_Talon_Jng_Lissandra(Ratings):
pass
class NA_Talon_Jng_Lucian(Ratings):
pass
class NA_Talon_Jng_Lulu(Ratings):
pass
class NA_Talon_Jng_Lux(Ratings):
pass
class NA_Talon_Jng_Malphite(Ratings):
pass
class NA_Talon_Jng_Malzahar(Ratings):
pass
class NA_Talon_Jng_Maokai(Ratings):
pass
class NA_Talon_Jng_MasterYi(Ratings):
pass
class NA_Talon_Jng_MissFortune(Ratings):
pass
class NA_Talon_Jng_MonkeyKing(Ratings):
pass
class NA_Talon_Jng_Mordekaiser(Ratings):
pass
class NA_Talon_Jng_Morgana(Ratings):
pass
class NA_Talon_Jng_Nami(Ratings):
pass
class NA_Talon_Jng_Nasus(Ratings):
pass
class NA_Talon_Jng_Nautilus(Ratings):
pass
class NA_Talon_Jng_Nidalee(Ratings):
pass
class NA_Talon_Jng_Nocturne(Ratings):
pass
class NA_Talon_Jng_Nunu(Ratings):
pass
class NA_Talon_Jng_Olaf(Ratings):
pass
class NA_Talon_Jng_Orianna(Ratings):
pass
class NA_Talon_Jng_Ornn(Ratings):
pass
class NA_Talon_Jng_Pantheon(Ratings):
pass
class NA_Talon_Jng_Poppy(Ratings):
pass
class NA_Talon_Jng_Quinn(Ratings):
pass
class NA_Talon_Jng_Rakan(Ratings):
pass
class NA_Talon_Jng_Rammus(Ratings):
pass
class NA_Talon_Jng_RekSai(Ratings):
pass
class NA_Talon_Jng_Renekton(Ratings):
pass
class NA_Talon_Jng_Rengar(Ratings):
pass
class NA_Talon_Jng_Riven(Ratings):
pass
class NA_Talon_Jng_Rumble(Ratings):
pass
class NA_Talon_Jng_Ryze(Ratings):
pass
class NA_Talon_Jng_Sejuani(Ratings):
pass
class NA_Talon_Jng_Shaco(Ratings):
pass
class NA_Talon_Jng_Shen(Ratings):
pass
class NA_Talon_Jng_Shyvana(Ratings):
pass
class NA_Talon_Jng_Singed(Ratings):
pass
class NA_Talon_Jng_Sion(Ratings):
pass
class NA_Talon_Jng_Sivir(Ratings):
pass
class NA_Talon_Jng_Skarner(Ratings):
pass
class NA_Talon_Jng_Sona(Ratings):
pass
class NA_Talon_Jng_Soraka(Ratings):
pass
class NA_Talon_Jng_Swain(Ratings):
pass
class NA_Talon_Jng_Syndra(Ratings):
pass
class NA_Talon_Jng_TahmKench(Ratings):
pass
class NA_Talon_Jng_Taliyah(Ratings):
pass
class NA_Talon_Jng_Talon(Ratings):
pass
class NA_Talon_Jng_Taric(Ratings):
pass
class NA_Talon_Jng_Teemo(Ratings):
pass
class NA_Talon_Jng_Thresh(Ratings):
pass
class NA_Talon_Jng_Tristana(Ratings):
pass
class NA_Talon_Jng_Trundle(Ratings):
pass
class NA_Talon_Jng_Tryndamere(Ratings):
pass
class NA_Talon_Jng_TwistedFate(Ratings):
pass
class NA_Talon_Jng_Twitch(Ratings):
pass
class NA_Talon_Jng_Udyr(Ratings):
pass
class NA_Talon_Jng_Urgot(Ratings):
pass
class NA_Talon_Jng_Varus(Ratings):
pass
class NA_Talon_Jng_Vayne(Ratings):
pass
class NA_Talon_Jng_Veigar(Ratings):
pass
class NA_Talon_Jng_Velkoz(Ratings):
pass
class NA_Talon_Jng_Vi(Ratings):
pass
class NA_Talon_Jng_Viktor(Ratings):
pass
class NA_Talon_Jng_Vladimir(Ratings):
pass
class NA_Talon_Jng_Volibear(Ratings):
pass
class NA_Talon_Jng_Warwick(Ratings):
pass
class NA_Talon_Jng_Xayah(Ratings):
pass
class NA_Talon_Jng_Xerath(Ratings):
pass
class NA_Talon_Jng_XinZhao(Ratings):
pass
class NA_Talon_Jng_Yasuo(Ratings):
pass
class NA_Talon_Jng_Yorick(Ratings):
pass
class NA_Talon_Jng_Zac(Ratings):
pass
class NA_Talon_Jng_Zed(Ratings):
pass
class NA_Talon_Jng_Ziggs(Ratings):
pass
class NA_Talon_Jng_Zilean(Ratings):
pass
class NA_Talon_Jng_Zyra(Ratings):
pass
| 0.75 | 1 |
src/unittest/python/merciful_elo_limit_tests.py | mgaertne/minqlx-plugin-tests | 4 | 1927 | from minqlx_plugin_test import *
import logging
import unittest
from mockito import *
from mockito.matchers import *
from hamcrest import *
from redis import Redis
from merciful_elo_limit import *
class MercifulEloLimitTests(unittest.TestCase):
def setUp(self):
setup_plugin()
setup_cvars({
"qlx_mercifulelo_minelo": "800",
"qlx_mercifulelo_applicationgames": "10",
"qlx_mercifulelo_abovegames": "10",
"qlx_mercifulelo_daysbanned": "30",
"qlx_owner": "42"
})
setup_game_in_progress()
self.plugin = merciful_elo_limit()
self.reply_channel = mocked_channel()
self.plugin.database = Redis
self.db = mock(Redis)
self.plugin._db_instance = self.db
when(self.db).__getitem__(any).thenReturn("42")
def tearDown(self):
unstub()
def setup_balance_ratings(self, player_elos):
gametype = None
if len(player_elos) > 0:
gametype = self.plugin.game.type_short
ratings = {}
for player, elo in player_elos:
ratings[player.steam_id] = {gametype: {'elo': elo}}
self.plugin._loaded_plugins["balance"] = mock({'ratings': ratings})
def setup_no_balance_plugin(self):
if "balance" in self.plugin._loaded_plugins:
del self.plugin._loaded_plugins["balance"]
def setup_exception_list(self, players):
mybalance_plugin = mock(Plugin)
mybalance_plugin.exceptions = [player.steam_id for player in players]
self.plugin._loaded_plugins["mybalance"] = mybalance_plugin
def test_handle_map_change_resets_tracked_player_ids(self):
connected_players()
self.setup_balance_ratings([])
self.plugin.tracked_player_sids = [123, 455]
self.plugin.handle_map_change("campgrounds", "ca")
assert_that(self.plugin.tracked_player_sids, is_([]))
def test_handle_map_change_resets_announced_player_ids(self):
connected_players()
self.setup_balance_ratings([])
self.plugin.announced_player_elos = [123, 455]
self.plugin.handle_map_change("campgrounds", "ca")
assert_that(self.plugin.announced_player_elos, is_([]))
def test_handle_map_change_fetches_elos_of_connected_players(self):
player1 = fake_player(123, "Fake Player1", team="red")
player2 = fake_player(456, "Fake Player2", team="blue")
connected_players(player1, player2)
self.setup_balance_ratings({(player1, 900), (player2, 1200)})
self.plugin.handle_map_change("thunderstruck", "ca")
verify(self.plugin._loaded_plugins["balance"]).add_request(
{player1.steam_id: 'ca', player2.steam_id: 'ca'},
self.plugin.callback_ratings, CHAT_CHANNEL
)
def test_handle_player_connect_fetches_elo_of_connecting_player(self):
player1 = fake_player(123, "Fake Player1", team="red")
player2 = fake_player(456, "Fake Player2", team="blue")
connecting_player = fake_player(789, "Connecting Player")
connected_players(player1, player2, connecting_player)
self.setup_balance_ratings({(player1, 900), (player2, 1200), (connecting_player, 1542)})
self.plugin.handle_player_connect(connecting_player)
verify(self.plugin._loaded_plugins["balance"]).add_request(
{connecting_player.steam_id: 'ca'},
self.plugin.callback_ratings, CHAT_CHANNEL
)
def test_fetch_elos_of_players_with_no_game_setup(self):
setup_no_game()
self.setup_balance_ratings({})
self.plugin.fetch_elos_of_players([])
verify(self.plugin._loaded_plugins["balance"], times=0).add_request(any, any, any)
def test_fetch_elos_of_players_with_unsupported_gametype(self):
setup_game_in_progress("unsupported")
self.setup_balance_ratings({})
self.plugin.fetch_elos_of_players([])
verify(self.plugin._loaded_plugins["balance"], times=0).add_request(any, any, any)
def test_fetch_elos_of_player_with_no_balance_plugin(self):
mocked_logger = mock(spec=logging.Logger)
spy2(minqlx.get_logger)
when(minqlx).get_logger(self.plugin).thenReturn(mocked_logger)
self.setup_no_balance_plugin()
self.plugin.fetch_elos_of_players([])
verify(mocked_logger).warning(matches("Balance plugin not found.*"))
def test_handle_round_countdown_with_no_game(self):
setup_no_game()
player1 = fake_player(123, "Fake Player1", team="red")
player2 = fake_player(456, "Fake Player2", team="blue")
player3 = fake_player(789, "Speccing Player", team="spectator")
connected_players(player1, player2, player3)
self.setup_balance_ratings({})
self.plugin.handle_round_countdown(1)
verify(self.plugin._loaded_plugins["balance"], times=0).add_request(any, any, any)
def test_handle_round_countdown_fetches_elos_of_players_in_teams(self):
player1 = fake_player(123, "Fake Player1", team="red")
player2 = fake_player(456, "Fake Player2", team="blue")
player3 = fake_player(789, "Speccing Player", team="spectator")
connected_players(player1, player2, player3)
self.setup_balance_ratings({(player1, 900), (player2, 1200), (player3, 1600)})
self.plugin.handle_round_countdown(4)
verify(self.plugin._loaded_plugins["balance"]).add_request(
{player1.steam_id: 'ca', player2.steam_id: 'ca'},
self.plugin.callback_ratings, CHAT_CHANNEL
)
def test_callback_ratings_with_no_game_running(self):
setup_no_game()
player1 = fake_player(123, "Fake Player1", team="red")
player2 = fake_player(456, "Fake Player2", team="blue")
player3 = fake_player(789, "Speccing Player", team="spectator")
connected_players(player1, player2, player3)
self.setup_balance_ratings({})
self.plugin.callback_ratings([], minqlx.CHAT_CHANNEL)
verify(self.db, times=0).get(any)
def test_callback_ratings_with_unsupported_game_type(self):
setup_game_in_progress("unsupported")
player1 = fake_player(123, "Fake Player1", team="red")
player2 = fake_player(456, "Fake Player2", team="blue")
player3 = fake_player(789, "Speccing Player", team="spectator")
connected_players(player1, player2, player3)
self.setup_balance_ratings({})
self.plugin.callback_ratings([], minqlx.CHAT_CHANNEL)
verify(self.db, times=0).get(any)
def test_callback_ratings_warns_low_elo_player(self):
player1 = fake_player(123, "Fake Player1", team="red")
player2 = fake_player(456, "Fake Player2", team="blue")
connected_players(player1, player2)
self.setup_balance_ratings({(player1, 900), (player2, 799)})
patch(minqlx.next_frame, lambda func: func)
patch(minqlx.thread, lambda func: func)
patch(time.sleep, lambda int: None)
when(self.db).get(any).thenReturn("2")
self.plugin.callback_ratings([player1, player2], minqlx.CHAT_CHANNEL)
verify(player2, times=12).center_print(matches(".*Skill warning.*8.*matches left.*"))
verify(player2).tell(matches(".*Skill Warning.*qlstats.*below.*800.*8.*of 10 application matches.*"))
def test_callback_ratings_announces_information_to_other_players(self):
player1 = fake_player(123, "Fake Player1", team="red")
player2 = fake_player(456, "Fake Player2", team="blue")
connected_players(player1, player2)
self.setup_balance_ratings({(player1, 900), (player2, 799)})
patch(minqlx.next_frame, lambda func: func)
patch(minqlx.thread, lambda func: func)
patch(time.sleep, lambda int: None)
when(self.db).get(any).thenReturn("2")
self.plugin.callback_ratings([player1, player2], minqlx.CHAT_CHANNEL)
assert_plugin_sent_to_console(matches("Fake Player2.*is below.*, but has.*8.*application matches left.*"))
def test_callback_ratings_announces_information_to_other_players_just_once_per_connect(self):
player1 = fake_player(123, "Fake Player1", team="red")
player2 = fake_player(456, "Fake Player2", team="blue")
connected_players(player1, player2)
self.setup_balance_ratings({(player1, 900), (player2, 799)})
self.plugin.announced_player_elos = [456]
patch(minqlx.next_frame, lambda func: func)
patch(minqlx.thread, lambda func: func)
patch(time.sleep, lambda int: None)
when(self.db).get(any).thenReturn("2")
self.plugin.callback_ratings([player1, player2], minqlx.CHAT_CHANNEL)
assert_plugin_sent_to_console(matches("Player.*is below.*, but has 8 application matches left.*"), times=0)
def test_callback_ratings_makes_exception_for_player_in_exception_list(self):
player1 = fake_player(123, "Fake Player1", team="red")
player2 = fake_player(456, "Fake Player2", team="blue")
player3 = fake_player(789, "Fake Player3", team="red")
connected_players(player1, player2, player3)
self.setup_balance_ratings({(player1, 900), (player2, 799), (player3, 600)})
self.setup_exception_list([player3])
patch(minqlx.next_frame, lambda func: func)
patch(minqlx.thread, lambda func: func)
patch(time.sleep, lambda int: None)
when(self.db).get(any).thenReturn("2")
self.plugin.callback_ratings([player1, player2, player3], minqlx.CHAT_CHANNEL)
verify(player2, times=12).center_print(matches(".*Skill warning.*8.*matches left.*"))
verify(player2).tell(matches(".*Skill Warning.*qlstats.*below.*800.*8.*of 10 application matches.*"))
verify(player3, times=0).center_print(any)
verify(player3, times=0).tell(any)
def test_callback_ratings_warns_low_elo_player_when_application_games_not_set(self):
player1 = fake_player(123, "Fake Player1", team="red")
player2 = fake_player(456, "Fake Player2", team="blue")
connected_players(player1, player2)
self.setup_balance_ratings({(player1, 900), (player2, 799)})
patch(minqlx.next_frame, lambda func: func)
patch(minqlx.thread, lambda func: func)
patch(time.sleep, lambda int: None)
when(self.db).get(any).thenReturn(None)
self.plugin.callback_ratings([player1, player2], minqlx.CHAT_CHANNEL)
verify(player2, times=12).center_print(matches(".*Skill warning.*10.*matches left.*"))
verify(player2).tell(matches(".*Skill Warning.*qlstats.*below.*800.*10.*of 10 application matches.*"))
def test_callback_ratings_bans_low_elo_players_that_used_up_their_application_games(self):
player1 = fake_player(123, "Fake Player1", team="red")
player2 = fake_player(456, "Fake Player2", team="blue")
connected_players(player1, player2)
self.setup_balance_ratings({(player1, 900), (player2, 799)})
when(self.db).get(any).thenReturn("11")
spy2(minqlx.COMMANDS.handle_input)
when2(minqlx.COMMANDS.handle_input, any, any, any).thenReturn(None)
patch(minqlx.PlayerInfo, lambda *args: mock(spec=minqlx.PlayerInfo))
patch(minqlx.next_frame, lambda func: func)
when(self.db).delete(any).thenReturn(None)
self.plugin.callback_ratings([player1, player2], minqlx.CHAT_CHANNEL)
verify(minqlx.COMMANDS).handle_input(any, any, any)
verify(self.db).delete("minqlx:players:{}:minelo:abovegames".format(player2.steam_id))
verify(self.db).delete("minqlx:players:{}:minelo:freegames".format(player2.steam_id))
def test_handle_round_start_increases_application_games_for_untracked_player(self):
player1 = fake_player(123, "Fake Player1", team="red")
player2 = fake_player(456, "Fake Player2", team="blue")
connected_players(player1, player2)
self.setup_balance_ratings({(player1, 900), (player2, 799)})
when(self.db).get(any).thenReturn("3")
when(self.db).delete(any).thenReturn(None)
when(self.db).exists(any).thenReturn(False)
when(self.db).incr(any).thenReturn(None)
self.plugin.handle_round_start(1)
verify(self.db).incr("minqlx:players:{}:minelo:freegames".format(player2.steam_id))
def test_handle_round_start_makes_exception_for_player_in_exception_list(self):
player1 = fake_player(123, "Fake Player1", team="red")
player2 = fake_player(456, "Fake Player2", team="blue")
player3 = fake_player(789, "Fake Player3", team="red")
connected_players(player1, player2, player3)
self.setup_balance_ratings({(player1, 900), (player2, 799), (player3, 600)})
self.setup_exception_list([player3])
when(self.db).get(any).thenReturn("3")
when(self.db).delete(any).thenReturn(None)
when(self.db).exists(any).thenReturn(False)
when(self.db).incr(any).thenReturn(None)
self.plugin.handle_round_start(1)
verify(self.db).incr("minqlx:players:{}:minelo:freegames".format(player2.steam_id))
verify(self.db, times=0).incr("minqlx:players:{}:minelo:freegames".format(player3.steam_id))
def test_handle_round_start_starts_tracking_for_low_elo_player(self):
player1 = fake_player(123, "Fake Player1", team="red")
player2 = fake_player(456, "Fake Player2", team="blue")
connected_players(player1, player2)
self.setup_balance_ratings({(player1, 900), (player2, 799)})
when(self.db).get(any).thenReturn("3")
when(self.db).delete(any).thenReturn(None)
when(self.db).exists(any).thenReturn(False)
when(self.db).incr(any).thenReturn(None)
self.plugin.handle_round_start(1)
assert_that(self.plugin.tracked_player_sids, has_item(player2.steam_id))
def test_handle_round_start_resets_above_games_for_low_elo_player(self):
player1 = fake_player(123, "Fake Player1", team="red")
player2 = fake_player(456, "Fake Player2", team="blue")
connected_players(player1, player2)
self.setup_balance_ratings({(player1, 900), (player2, 799)})
when(self.db).get(any).thenReturn("3")
when(self.db).delete(any).thenReturn(None)
when(self.db).exists(any).thenReturn(True)
when(self.db).incr(any).thenReturn(None)
self.plugin.handle_round_start(1)
verify(self.db).delete("minqlx:players:{}:minelo:abovegames".format(player2.steam_id))
def test_handle_round_start_increases_above_games_for_application_games_player(self):
player1 = fake_player(123, "Fake Player1", team="red")
player2 = fake_player(456, "Fake Player2", team="blue")
connected_players(player1, player2)
self.setup_balance_ratings({(player1, 900), (player2, 801)})
when(self.db).get(any).thenReturn("3")
when(self.db).delete(any).thenReturn(None)
when(self.db).exists(any).thenReturn(True)
when(self.db).incr(any).thenReturn(None)
self.plugin.handle_round_start(1)
verify(self.db).incr("minqlx:players:{}:minelo:abovegames".format(player2.steam_id))
def test_handle_round_start_increases_above_games_for_application_games_player_with_no_aobve_games_set(self):
player1 = fake_player(123, "Fake Player1", team="red")
player2 = fake_player(456, "Fake Player2", team="blue")
connected_players(player1, player2)
self.setup_balance_ratings({(player1, 900), (player2, 801)})
when(self.db).get(any).thenReturn("1")
when(self.db).delete(any).thenReturn(None)
when(self.db).exists(any).thenReturn(True)
when(self.db).incr(any).thenReturn(None)
self.plugin.handle_round_start(1)
verify(self.db).incr("minqlx:players:{}:minelo:abovegames".format(player2.steam_id))
def test_handle_round_start_starts_tracking_of_above_elo_players_for_application_games_player(self):
player1 = fake_player(123, "Fake Player1", team="red")
player2 = fake_player(456, "Fake Player2", team="blue")
connected_players(player1, player2)
self.setup_balance_ratings({(player1, 900), (player2, 801)})
when(self.db).get(any).thenReturn("3")
when(self.db).delete(any).thenReturn(None)
when(self.db).exists(any).thenReturn(True)
when(self.db).incr(any).thenReturn(None)
self.plugin.handle_round_start(1)
assert_that(self.plugin.tracked_player_sids, has_item(player2.steam_id))
def test_handle_round_start_removes_minelo_db_entries_for_above_elo_player(self):
player1 = fake_player(123, "Fake Player1", team="red")
player2 = fake_player(456, "Fake Player2", team="blue")
connected_players(player1, player2)
self.setup_balance_ratings({(player1, 900), (player2, 801)})
when(self.db).get(any).thenReturn("11")
when(self.db).delete(any).thenReturn(None)
when(self.db).exists(any).thenReturn(True)
when(self.db).incr(any).thenReturn(None)
self.plugin.handle_round_start(1)
verify(self.db).delete("minqlx:players:{}:minelo:freegames".format(player2.steam_id))
verify(self.db).delete("minqlx:players:{}:minelo:abovegames".format(player2.steam_id))
def test_handle_round_start_skips_already_tracked_player(self):
player1 = fake_player(123, "Fake Player1", team="red")
player2 = fake_player(456, "Fake Player2", team="blue")
connected_players(player1, player2)
self.plugin.tracked_player_sids.append(player2.steam_id)
self.setup_balance_ratings({(player1, 900), (player2, 799)})
when(self.db).get(any).thenReturn(3)
when(self.db).delete(any).thenReturn(None)
when(self.db).exists(any).thenReturn(False)
when(self.db).incr(any).thenReturn(None)
self.plugin.handle_round_start(1)
verify(self.db, times=0).delete(any)
verify(self.db, times=0).delete(any)
def test_handle_round_start_with_unsupported_gametype(self):
setup_game_in_progress("unsupported")
player1 = fake_player(123, "Fake Player1", team="red")
player2 = fake_player(456, "Fake Player2", team="blue")
connected_players(player1, player2)
self.setup_balance_ratings({})
self.plugin.handle_round_start(2)
verify(self.plugin._loaded_plugins["balance"], times=0).add_request(any, any, any)
def test_handle_round_start_with_no_balance_plugin(self):
player1 = fake_player(123, "Fake Player1", team="red")
player2 = fake_player(456, "Fake Player2", team="blue")
connected_players(player1, player2)
mocked_logger = mock(spec=logging.Logger)
spy2(minqlx.get_logger)
when(minqlx).get_logger(self.plugin).thenReturn(mocked_logger)
self.setup_no_balance_plugin()
self.plugin.handle_round_start(5)
verify(mocked_logger, atleast=1).warning(matches("Balance plugin not found.*"))
def test_cmd_mercis_shows_currently_connected_merciful_players(self):
player = fake_player(666, "Cmd using Player")
player1 = fake_player(123, "Fake Player1", team="red")
player2 = fake_player(456, "Fake Player2", team="blue")
player3 = fake_player(789, "Fake Player3", team="blue")
connected_players(player, player1, player2, player3)
self.setup_balance_ratings({(player, 1400), (player1, 801), (player2, 799), (player3, 900)})
when(self.db).get("minqlx:players:{}:minelo:freegames".format(player1.steam_id)).thenReturn("2")
when(self.db).get("minqlx:players:{}:minelo:freegames".format(player2.steam_id)).thenReturn("3")
when(self.db).get("minqlx:players:{}:minelo:abovegames".format(player1.steam_id)).thenReturn("6")
when(self.db).get("minqlx:players:{}:minelo:freegames".format(player.steam_id)).thenReturn(None)
when(self.db).get("minqlx:players:{}:minelo:freegames".format(player3.steam_id)).thenReturn(None)
self.plugin.cmd_mercis(player, ["!mercis"], self.reply_channel)
assert_channel_was_replied(self.reply_channel, matches("Fake Player1 \(elo: 801\):.*8.*application matches "
"left,.*6.*matches above.*"))
assert_channel_was_replied(self.reply_channel, matches("Fake Player2 \(elo: 799\):.*7.*application matches "
"left"))
def test_cmd_mercis_replies_to_main_cbannel_instead_of_team_chat(self):
self.addCleanup(self.reset_chat_channel, minqlx.CHAT_CHANNEL)
minqlx.CHAT_CHANNEL = mocked_channel()
player = fake_player(666, "Cmd using Player")
player1 = fake_player(123, "Fake Player1", team="red")
player2 = fake_player(456, "Fake Player2", team="blue")
player3 = fake_player(789, "Fake Player3", team="blue")
connected_players(player, player1, player2, player3)
self.setup_balance_ratings({(player, 1400), (player1, 801), (player2, 799), (player3, 900)})
when(self.db).get("minqlx:players:{}:minelo:freegames".format(player1.steam_id)).thenReturn("2")
when(self.db).get("minqlx:players:{}:minelo:freegames".format(player2.steam_id)).thenReturn("3")
when(self.db).get("minqlx:players:{}:minelo:abovegames".format(player1.steam_id)).thenReturn("6")
when(self.db).get("minqlx:players:{}:minelo:freegames".format(player.steam_id)).thenReturn(None)
when(self.db).get("minqlx:players:{}:minelo:freegames".format(player3.steam_id)).thenReturn(None)
self.plugin.cmd_mercis(player, ["!mercis"], minqlx.BLUE_TEAM_CHAT_CHANNEL)
assert_channel_was_replied(minqlx.CHAT_CHANNEL, matches("Fake Player1 \(elo: 801\):.*8.*application matches "
"left,.*6.*matches above.*"))
assert_channel_was_replied(minqlx.CHAT_CHANNEL, matches("Fake Player2 \(elo: 799\):.*7.*application matches "
"left"))
def reset_chat_channel(self, original_chat_channel):
minqlx.CHAT_CHANNEL = original_chat_channel
def test_cmd_mercis_shows_no_mercis_if_no_player_using_their_application_matches(self):
player = fake_player(666, "Cmd using Player")
connected_players(player)
self.setup_balance_ratings({(player, 1400)})
when(self.db).get(any).thenReturn(None)
self.plugin.cmd_mercis(player, ["!mercis"], minqlx.CHAT_CHANNEL)
assert_plugin_sent_to_console(any, times=0)
| 1.460938 | 1 |
UI/ControlSlider/__init__.py | peerke88/SkinningTools | 7 | 1951 | # -*- coding: utf-8 -*-
# SkinWeights command and component editor
# Copyright (C) 2018 <NAME>
# Website: http://www.trevorius.com
#
# pyqt attribute sliders
# Copyright (C) 2018 <NAME>
# Website: http://danieleniero.com/
#
# neighbour finding algorythm
# Copyright (C) 2018 <NAME>
# Website: http://www.janpijpers.com/
#
# skinningTools and UI
# Copyright (C) 2018 <NAME>
# Website: http://www.perryleijten.com
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# See http://www.gnu.org/licenses/gpl.html for a copy of the GNU General
# Public License.
# --------------------------------------------------------------------------------------
| 1.234375 | 1 |
scripts/viewStokespat.py | David-McKenna/AntPat | 5 | 1959 | #!/usr/bin/env python
"""A simple viewer for Stokes patterns based on two far-field pattern files.
(Possibly based on one FF pattern files if it has two requests: one for each
polarization channel.)"""
import os
import argparse
import numpy
import matplotlib.pyplot as plt
from antpat.reps.sphgridfun.tvecfun import TVecFields
from antpat.radfarfield import RadFarField
from antpat.dualpolelem import DualPolElem
FEKOsuffix = 'ffe'
GRASPsuffix = 'swe'
NECsuffix = 'out'
def Jones2Stokes(Jones):
"""Convert Jones matrix to Stokes vector. This assumes dual-pol antenna receiving unpolarized unit
valued radiation i.e. incoming Stokes = (1,0,0,0)."""
brightmat = numpy.matmul(Jones, numpy.swapaxes(numpy.conjugate(Jones),-1,-2))
StokesI = numpy.real(brightmat[...,0,0]+brightmat[...,1,1])
StokesQ = numpy.real(brightmat[...,0,0]-brightmat[...,1,1])
StokesU = numpy.real(brightmat[...,0,1]+brightmat[...,1,0])
StokesV = numpy.imag(brightmat[...,0,1]-brightmat[...,1,0])
return StokesI, StokesQ, StokesU, StokesV
def plotStokes_fromFEKOfiles(p_chan_file, q_chan_file, freq):
(tvf_p, tvf_q) = (TVecFields(), TVecFields())
tvf_p.load_ffe(p_chan_file)
tvf_q.load_ffe(q_chan_file)
(ant_p, ant_q) = (RadFarField(tvf_p), RadFarField(tvf_q))
(p_chan_name, q_chan_name) = (os.path.basename(p_chan_file), os.path.basename(q_chan_file))
(ant_p.name, ant_q.name) = (p_chan_name, q_chan_name)
dualpolAnt = DualPolElem(ant_p, ant_q)
THETA, PHI, Jones = dualpolAnt.getJonesPat(freq)
(StokesI, StokesQ, StokesU, StokesV) = Jones2Stokes(Jones)
x = THETA*numpy.cos(PHI)
y = THETA*numpy.sin(PHI)
#x= THETA
#y=PHI
xyNames = ('theta*cos(phi)','theta*sin(phi)')
fig = plt.figure()
ax1 = fig.add_subplot(221)
plt.pcolormesh(x, y, 10*numpy.log10(StokesI), label="I")
#plt.pcolormesh(x, y, StokesI, label="I")
plt.colorbar()
ax1.set_title('I (dB)')
ax2 = fig.add_subplot(222)
plt.pcolormesh(x, y, StokesQ/StokesI, label="Q")
plt.colorbar()
ax2.set_title('Q/I')
ax3 = fig.add_subplot(223)
plt.pcolormesh(x, y, StokesU/StokesI, label="U")
plt.colorbar()
ax3.set_title('U/I')
ax4 = fig.add_subplot(224)
plt.pcolormesh(x, y, StokesV/StokesI, label="V")
plt.colorbar()
ax4.set_title('V/I')
fig.suptitle('Stokes (azimuthal-equidistant proj) @ ' +str(freq/1e9)+' GHz')
plt.show()
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("p_chan_file",
help='Filename of polarization channel p')
parser.add_argument("q_chan_file",
help='Filename of polarization channel p')
parser.add_argument("freq", nargs='?', type=float,
help="Frequency in Hertz")
args = parser.parse_args()
if args.p_chan_file.endswith(FEKOsuffix):
plotStokes_fromFEKOfiles(args.p_chan_file, args.q_chan_file, args.freq)
elif args.p_chan_file.endswith(GRASPsuffix):
print("Not implemented yet.")
elif args.p_chan_file.endswith(NECsuffix):
print("Not implemented yet.")
else:
print("Far-field pattern file type not known")
exit(1)
| 2.296875 | 2 |
func-button/klSigmode.py | xcgoo/uiKLine | 232 | 1983 | # coding: utf-8
"""
插入所有需要的库,和函数
"""
#----------------------------------------------------------------------
def klSigmode(self):
"""查找模式"""
if self.mode == 'deal':
self.canvas.updateSig(self.signalsOpen)
self.mode = 'dealOpen'
else:
self.canvas.updateSig(self.signals)
self.mode = 'deal'
| 0.992188 | 1 |
mmdnn/conversion/caffe/writer.py | 2yz/MMdnn | 3,442 | 1991 | import base64
from google.protobuf import json_format
from importlib import import_module
import json
import numpy as np
import os
import sys
from mmdnn.conversion.caffe.errors import ConversionError
from mmdnn.conversion.caffe.common_graph import fetch_attr_value
from mmdnn.conversion.caffe.utils import get_lower_case, get_upper_case, get_real_name
class JsonFormatter(object):
'''Dumpt a DL graph into a Json file.'''
def __init__(self, graph):
self.graph_def = graph.as_graph_def()
def dump(self, json_path):
json_txt = json_format.MessageToJson(self.graph_def)
parsed = json.loads(json_txt)
formatted = json.dumps(parsed, indent=4, sort_keys=True)
with open(json_path, 'w') as f:
f.write(formatted)
class PyWriter(object):
'''Dumpt a DL graph into a Python script.'''
def __init__(self, graph, data, target):
self.graph = graph
self.data = data
self.tab = ' ' * 4
self.prefix = ''
target = target.lower()
if target == 'tensorflow':
self.target = target
self.net = 'TensorFlowNetwork'
elif target == 'keras':
self.target = target
self.net = 'KerasNetwork'
elif target == 'caffe':
self.target = target
self.net = 'CaffeNetwork'
else:
raise ConversionError('Target %s is not supported yet.' % target)
def indent(self):
self.prefix += self.tab
def outdent(self):
self.prefix = self.prefix[:-len(self.tab)]
def statement(self, s):
return self.prefix + s + '\n'
def emit_imports(self):
return self.statement('from dlconv.%s import %s\n' % (self.target, self.net))
def emit_class_def(self, name):
return self.statement('class %s(%s):' % (name, self.net))
def emit_setup_def(self):
return self.statement('def setup(self):')
def emit_node(self, node):
'''Emits the Python source for this node.'''
def pair(key, value):
return '%s=%s' % (key, value)
args = []
for input in node.input:
input = input.strip().split(':')
name = ''.join(input[:-1])
idx = int(input[-1])
assert name in self.graph.node_dict
parent = self.graph.get_node(name)
args.append(parent.output[idx])
#FIXME:
output = [node.output[0]]
# output = node.output
for k, v in node.attr:
if k == 'cell_type':
args.append(pair(k, "'" + fetch_attr_value(v) + "'"))
else:
args.append(pair(k, fetch_attr_value(v)))
args.append(pair('name', "'" + node.name + "'")) # Set the node name
args = ', '.join(args)
return self.statement('%s = self.%s(%s)' % (', '.join(output), node.op, args))
def dump(self, code_output_dir):
if not os.path.exists(code_output_dir):
os.makedirs(code_output_dir)
file_name = get_lower_case(self.graph.name)
code_output_path = os.path.join(code_output_dir, file_name + '.py')
data_output_path = os.path.join(code_output_dir, file_name + '.npy')
with open(code_output_path, 'w') as f:
f.write(self.emit())
with open(data_output_path, 'wb') as f:
np.save(f, self.data)
return code_output_path, data_output_path
def emit(self):
# Decompose DAG into chains
chains = []
for node in self.graph.topologically_sorted():
attach_to_chain = None
if len(node.input) == 1:
parent = get_real_name(node.input[0])
for chain in chains:
if chain[-1].name == parent: # Node is part of an existing chain.
attach_to_chain = chain
break
if attach_to_chain is None: # Start a new chain for this node.
attach_to_chain = []
chains.append(attach_to_chain)
attach_to_chain.append(node)
# Generate Python code line by line
source = self.emit_imports()
source += self.emit_class_def(self.graph.name)
self.indent()
source += self.emit_setup_def()
self.indent()
blocks = []
for chain in chains:
b = ''
for node in chain:
b += self.emit_node(node)
blocks.append(b[:-1])
source += '\n\n'.join(blocks)
return source
class ModelSaver(object):
def __init__(self, code_output_path, data_output_path):
self.code_output_path = code_output_path
self.data_output_path = data_output_path
def dump(self, model_output_dir):
'''Return the file path containing graph in generated model files.'''
if not os.path.exists(model_output_dir):
os.makedirs(model_output_dir)
sys.path.append(os.path.dirname(self.code_output_path))
file_name = os.path.splitext(os.path.basename(self.code_output_path))[0]
module = import_module(file_name)
class_name = get_upper_case(file_name)
net = getattr(module, class_name)
return net.dump(self.data_output_path, model_output_dir)
class GraphDrawer(object):
def __init__(self, toolkit, meta_path):
self.toolkit = toolkit.lower()
self.meta_path = meta_path
def dump(self, graph_path):
if self.toolkit == 'tensorflow':
from dlconv.tensorflow.visualizer import TensorFlowVisualizer
if self._is_web_page(graph_path):
TensorFlowVisualizer(self.meta_path).dump_html(graph_path)
else:
raise NotImplementedError('Image format or %s is unsupported!' % graph_path)
elif self.toolkit == 'keras':
from dlconv.keras.visualizer import KerasVisualizer
png_path, html_path = (None, None)
if graph_path.endswith('.png'):
png_path = graph_path
elif self._is_web_page(graph_path):
png_path = graph_path + ".png"
html_path = graph_path
else:
raise NotImplementedError('Image format or %s is unsupported!' % graph_path)
KerasVisualizer(self.meta_path).dump_png(png_path)
if html_path:
self._png_to_html(png_path, html_path)
os.remove(png_path)
else:
raise NotImplementedError('Visualization of %s is unsupported!' % self.toolkit)
def _is_web_page(self, path):
return path.split('.')[-1] in ('html', 'htm')
def _png_to_html(self, png_path, html_path):
with open(png_path, "rb") as f:
encoded = base64.b64encode(f.read()).decode('utf-8')
source = """<!DOCTYPE>
<html>
<head>
<meta charset="utf-8">
<title>Keras</title>
</head>
<body>
<img alt="Model Graph" src="data:image/png;base64,{base64_str}" />
</body>
</html>""".format(base64_str=encoded)
with open(html_path, 'w', encoding='utf-8') as f:
f.write(source) | 1.546875 | 2 |
10_days_of_statistics_8_1.py | sercangul/HackerRank | 0 | 2015 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Mon Jun 3 19:26:47 2019
@author: sercangul
"""
n = 5
xy = [map(int, input().split()) for _ in range(n)]
sx, sy, sx2, sxy = map(sum, zip(*[(x, y, x**2, x * y) for x, y in xy]))
b = (n * sxy - sx * sy) / (n * sx2 - sx**2)
a = (sy / n) - b * (sx / n)
print('{:.3f}'.format(a + b * 80)) | 2.015625 | 2 |
util/headers.py | giuseppe/quay | 2,027 | 2023 | import base64
def parse_basic_auth(header_value):
"""
Attempts to parse the given header value as a Base64-encoded Basic auth header.
"""
if not header_value:
return None
parts = header_value.split(" ")
if len(parts) != 2 or parts[0].lower() != "basic":
return None
try:
basic_parts = base64.b64decode(parts[1]).split(":", 1)
if len(basic_parts) != 2:
return None
return basic_parts
except ValueError:
return None
| 1.84375 | 2 |
build/scripts-3.6/fit_background_model.py | stahlberggroup/umierrorcorrect | 0 | 2031 | #!python
import numpy as np
from numpy import inf
from numpy import nan
from scipy.optimize import fmin
from scipy.stats import beta
from scipy.special import beta as B
from scipy.special import comb
import argparse
import sys
def parseArgs():
'''Function for parsing arguments'''
parser = argparse.ArgumentParser(description="Pipeline for analyzing barcoded amplicon \
sequencing data with Unique molecular \
identifiers (UMI)")
parser.add_argument('-cons', '--cons_file', dest='cons_file', help='Path to cons file, for fitting parameters of the bgmodel')
parser.add_argument('-nonbgposfile', '--non-background-positions', dest='nonbgposfile',
help='Path to file with non-background positions')
parser.add_argument('-out', '--out_file',dest='out_file',help="name of output file, default = %(default)s]",default="bgmodel.params")
parser.add_argument('-f','--fsize',dest='fsize', help='Family size cutoff (consensus cutoff) for variant calling. [default = %(default)s]', default=3)
args = parser.parse_args(sys.argv[1:])
return(args)
def parse_cons_file(filename,fsize=3):
n1=[]
f1=[]
c1=[]
posx=[]
data=[]
with open(filename) as f:
for line in f:
if not line.startswith('Sample Name'):
line=line.rstrip('\n')
parts=line.split('\t')
pos=parts[1]+':'+parts[2]
name=parts[3]
#print(name)
if name not in "":
famsize=parts[-4]
if int(famsize)==fsize:
frac=float(parts[-2])
alt=parts[-1]
count=parts[-3]
if frac > 0 and alt not in 'N':
cov=int(parts[-5])
f1.append(float(frac))
n1.append(int(cov))
c1.append(int(count))
posx.append(pos)
data.append(line)
#print(name)
#print(famsize)
return(f1,n1,c1,posx,data)
def betaNLL(params,*args):
a,b = params
data = np.array(args[0])
pdf=beta.pdf(data,a,b,loc=0,scale=1)
lg=np.log(pdf)
#lg=np.where(lg==-np.inf,0,lg)
mask = np.isfinite(lg)
nll = -lg[mask].sum()
nll=-1*np.sum(lg)
return(nll)
def get_beta_parameters(data):
m=np.mean(data)
v=np.var(data)
a0=m*(m * (1-m) / v-1 )
b0=(1-m)*(m * (1-m) / v-1 )
result=fmin(betaNLL,[a0,b0],args=(data,))
return(result)
def run_fit_bgmodel(args):
spikepositions=[178952085,55599321,7577558,7577547,7577538,7577120]
if args.nonbgposfile:
nonbgpos=[]
with open(args.nonbgposfile) as f:
for line in f:
line=line.rstrip()
nonbgpos.append(line)
else:
nonbgpos=spikepositions
if not args.cons_file:
args.cons_file=glob.glob(args.output_path+'/*cons.tsv')[0]
args.fsize=int(args.fsize)
f1,n1,a1,pos,data=parse_cons_file(args.cons_file,args.fsize)
f1 = np.array(f1)
n1 = np.array(n1)
a1 = np.array(a1)
pos = np.array(pos)
data = np.array(data)
result=get_beta_parameters(f1[np.isin(pos,nonbgpos)!=True])
#a=prob_bb(n1,a1,result[0],result[1])
print(pos,nonbgpos,np.isin(pos,nonbgpos))
with open(args.out_file,'w') as g:
g.write('{}\n'.format(result[0]))
g.write('{}\n'.format(result[1]))
#a[a==inf]=1e-10
#a[np.isnan(a)]=1e-10
#Q = -10*np.log10(a)
#data=np.array(data)
#plot_histogram(Q,args.output_path+'/'+args.sample_name+'.histogram.png')
#if args.vc_method.lower()=='bbmodel':
# rout=data[Q >= float(args.qvalue_threshold)]
# Qsig=Q[Q >= float(args.qvalue_threshold)]
#else:
# rout=data[a1 >= float(args.count_cutoff)]
# Qsig=Q[a1 >= float(args.count_cutoff)]
#outfilename=args.output_path+'/'+args.sample_name+'2.vcf'
#write_vcf(outfilename,rout,Qsig,args.reference_file)
if __name__=='__main__':
args=parseArgs()
run_fit_bgmodel(args)
| 1.882813 | 2 |
course_catalog/etl/conftest.py | mitodl/open-discussions | 12 | 2095 | """Common ETL test fixtures"""
import json
import pytest
@pytest.fixture(autouse=True)
def mitx_settings(settings):
"""Test settings for MITx import"""
settings.EDX_API_CLIENT_ID = "fake-client-id"
settings.EDX_API_CLIENT_SECRET = "fake-client-secret"
settings.EDX_API_ACCESS_TOKEN_URL = "http://localhost/fake/access/token/url"
settings.EDX_API_URL = "http://localhost/fake/api/url"
settings.MITX_BASE_URL = "http://localhost/fake/base/url"
settings.MITX_ALT_URL = "http://localhost/fake/alt/url"
return settings
@pytest.fixture(autouse=True)
def oll_settings(settings):
"""Test settings for MITx import"""
settings.OLL_API_CLIENT_ID = "fake-client-id"
settings.OLL_API_CLIENT_SECRET = "fake-client-secret"
settings.OLL_API_ACCESS_TOKEN_URL = "http://localhost/fake/access/token/url"
settings.OLL_API_URL = "http://localhost/fake/api/url"
settings.OLL_BASE_URL = "http://localhost/fake/base/url"
settings.OLL_ALT_URL = "http://localhost/fake/alt/url"
return settings
@pytest.fixture
def mitx_course_data():
"""Catalog data fixture"""
with open("./test_json/test_mitx_course.json", "r") as f:
yield json.loads(f.read())
@pytest.fixture
def non_mitx_course_data():
"""Catalog data fixture"""
with open("./test_json/test_non_mitx_course.json", "r") as f:
yield json.loads(f.read())
| 1.296875 | 1 |
pmdarima/preprocessing/endog/boxcox.py | tuomijal/pmdarima | 736 | 2103 | # -*- coding: utf-8 -*-
from scipy import stats
import numpy as np
import warnings
from ...compat import check_is_fitted, pmdarima as pm_compat
from .base import BaseEndogTransformer
__all__ = ['BoxCoxEndogTransformer']
class BoxCoxEndogTransformer(BaseEndogTransformer):
r"""Apply the Box-Cox transformation to an endogenous array
The Box-Cox transformation is applied to non-normal data to coerce it more
towards a normal distribution. It's specified as::
(((y + lam2) ** lam1) - 1) / lam1, if lmbda != 0, else
log(y + lam2)
Parameters
----------
lmbda : float or None, optional (default=None)
The lambda value for the Box-Cox transformation, if known. If not
specified, it will be estimated via MLE.
lmbda2 : float, optional (default=0.)
The value to add to ``y`` to make it non-negative. If, after adding
``lmbda2``, there are still negative values, a ValueError will be
raised.
neg_action : str, optional (default="raise")
How to respond if any values in ``y <= 0`` after adding ``lmbda2``.
One of ('raise', 'warn', 'ignore'). If anything other than 'raise',
values <= 0 will be truncated to the value of ``floor``.
floor : float, optional (default=1e-16)
A positive value that truncate values to if there are values in ``y``
that are zero or negative and ``neg_action`` is not 'raise'. Note that
if values are truncated, invertibility will not be preserved, and the
transformed array may not be perfectly inverse-transformed.
"""
def __init__(self, lmbda=None, lmbda2=0, neg_action="raise", floor=1e-16):
self.lmbda = lmbda
self.lmbda2 = lmbda2
self.neg_action = neg_action
self.floor = floor
def fit(self, y, X=None, **kwargs): # TODO: kwargs go away
"""Fit the transformer
Learns the value of ``lmbda``, if not specified in the constructor.
If defined in the constructor, is not re-learned.
Parameters
----------
y : array-like or None, shape=(n_samples,)
The endogenous (time-series) array.
X : array-like or None, shape=(n_samples, n_features), optional
The exogenous array of additional covariates. Not used for
endogenous transformers. Default is None, and non-None values will
serve as pass-through arrays.
"""
lam1 = self.lmbda
lam2 = self.lmbda2
# Temporary shim until we remove `exogenous` support completely
X, _ = pm_compat.get_X(X, **kwargs)
if lam2 < 0:
raise ValueError("lmbda2 must be a non-negative scalar value")
if lam1 is None:
y, _ = self._check_y_X(y, X)
_, lam1 = stats.boxcox(y + lam2, lmbda=None, alpha=None)
self.lam1_ = lam1
self.lam2_ = lam2
return self
def transform(self, y, X=None, **kwargs):
"""Transform the new array
Apply the Box-Cox transformation to the array after learning the
lambda parameter.
Parameters
----------
y : array-like or None, shape=(n_samples,)
The endogenous (time-series) array.
X : array-like or None, shape=(n_samples, n_features), optional
The exogenous array of additional covariates. Not used for
endogenous transformers. Default is None, and non-None values will
serve as pass-through arrays.
Returns
-------
y_transform : array-like or None
The Box-Cox transformed y array
X : array-like or None
The X array
"""
check_is_fitted(self, "lam1_")
# Temporary shim until we remove `exogenous` support completely
X, _ = pm_compat.get_X(X, **kwargs)
lam1 = self.lam1_
lam2 = self.lam2_
y, exog = self._check_y_X(y, X)
y += lam2
neg_mask = y <= 0.
if neg_mask.any():
action = self.neg_action
msg = "Negative or zero values present in y"
if action == "raise":
raise ValueError(msg)
elif action == "warn":
warnings.warn(msg, UserWarning)
y[neg_mask] = self.floor
if lam1 == 0:
return np.log(y), exog
return (y ** lam1 - 1) / lam1, exog
def inverse_transform(self, y, X=None, **kwargs): # TODO: kwargs go away
"""Inverse transform a transformed array
Inverse the Box-Cox transformation on the transformed array. Note that
if truncation happened in the ``transform`` method, invertibility will
not be preserved, and the transformed array may not be perfectly
inverse-transformed.
Parameters
----------
y : array-like or None, shape=(n_samples,)
The transformed endogenous (time-series) array.
X : array-like or None, shape=(n_samples, n_features), optional
The exogenous array of additional covariates. Not used for
endogenous transformers. Default is None, and non-None values will
serve as pass-through arrays.
Returns
-------
y : array-like or None
The inverse-transformed y array
X : array-like or None
The inverse-transformed X array
"""
check_is_fitted(self, "lam1_")
# Temporary shim until we remove `exogenous` support completely
X, _ = pm_compat.get_X(X, **kwargs)
lam1 = self.lam1_
lam2 = self.lam2_
y, exog = self._check_y_X(y, X)
if lam1 == 0:
return np.exp(y) - lam2, exog
numer = y * lam1 # remove denominator
numer += 1. # add 1 back to it
de_exp = numer ** (1. / lam1) # de-exponentiate
return de_exp - lam2, exog
| 2.71875 | 3 |
CV Model/Model - JupyterNotebook/mrcnn/tfliteconverter.py | fcsiba/Smart-Cart | 0 | 2119 | import tensorflow as tf
# Convert the model.
converter = tf.lite.TFLiteConverter.from_saved_model('model.py')
tflite_model = converter.convert()
open("trash_ai.tflite", "wb").write(tflite_model) | 1.085938 | 1 |
csat/django/fields.py | GaretJax/csat | 0 | 2127 | from lxml import etree
from django import forms
from django.db import models
class XMLFileField(models.FileField):
def __init__(self, *args, **kwargs):
self.schema = kwargs.pop('schema')
super(XMLFileField, self).__init__(*args, **kwargs)
def clean(self, *args, **kwargs):
data = super(XMLFileField, self).clean(*args, **kwargs)
with data as fh:
doc = etree.parse(fh)
with open(self.schema) as fh:
schema = etree.XMLSchema(etree.parse(fh))
if not schema.validate(doc):
raise forms.ValidationError('The XML file failed to validate '
'against the supplied schema.')
return data
| 1.296875 | 1 |
joulescope_ui/meter_widget.py | Axel-Jacobsen/pyjoulescope_ui | 1 | 2143 | # Copyright 2018 Jetperch LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from PySide2 import QtCore, QtWidgets
from . import joulescope_rc
from .meter_value_widget import MeterValueWidget
import logging
log = logging.getLogger(__name__)
FIELDS = [
('current', 'A', 'Amps'),
('voltage', 'V', 'Volts'),
('power', 'W', 'Watts'),
('energy', 'J', 'Joules'),
]
class MeterWidget(QtWidgets.QWidget):
def __init__(self, *args, **kwargs):
QtWidgets.QWidget.__init__(self, *args, **kwargs)
self.verticalLayout = QtWidgets.QVBoxLayout(self)
self.verticalLayout.setObjectName("verticalLayout")
self.verticalLayout.setSpacing(0)
self.controlWidget = QtWidgets.QWidget(self)
self.controlLayout = QtWidgets.QHBoxLayout(self.controlWidget)
self.verticalLayout.addWidget(self.controlWidget)
self.accumulateButton = QtWidgets.QPushButton(self.controlWidget)
self.accumulateButton.setCheckable(True)
self.accumulateButton.setObjectName("accumulateButton")
self.controlLayout.addWidget(self.accumulateButton)
self.accumulateButton.toggled.connect(self.on_accumulate_toggled)
self.controlSpacer = QtWidgets.QSpacerItem(40, 20, QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Minimum)
self.controlLayout.addItem(self.controlSpacer)
self.values = {}
for name, units_short, units_long in FIELDS:
w = MeterValueWidget(self)
w.setStyleSheet("QWidget { background-color : black; color : green; }")
w.configure(name.capitalize(), units_short, units_long)
self.values[name] = w
w.setContentsMargins(0, 0, 0, 0)
self.verticalLayout.addWidget(w)
self.values['energy'].configure_energy()
self.sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Expanding)
self.sizePolicy.setHorizontalStretch(0)
self.sizePolicy.setVerticalStretch(0)
self.setSizePolicy(self.sizePolicy)
self.retranslateUi()
@QtCore.Slot(bool)
def on_accumulate_toggled(self, checked):
self.values['current'].accumulate_enable = checked
self.values['voltage'].accumulate_enable = checked
self.values['power'].accumulate_enable = checked
def update(self, statistics):
"""Update the multimeter display
:param statistics: The statistics data structure
"""
for name, field in statistics['signals'].items():
d = field['statistics']
self.values[name].update_value(mean=d['μ'], variance=d['σ2'], v_min=d['min'], v_max=d['max'])
energy = statistics['accumulators']['energy']['value']
charge = statistics['accumulators']['charge']['value']
self.values['energy'].update_energy(energy, charge)
def retranslateUi(self):
_translate = QtCore.QCoreApplication.translate
self.accumulateButton.setText(_translate("meter_widget", "Accumulate"))
| 1.859375 | 2 |
api/tests/ver1/test_base.py | codacy-badger/politico-api | 0 | 2151 | import unittest
from api import create_app
class TestBase(unittest.TestCase):
"""Default super class for api ver 1 tests"""
# setup testing
def setUp(self):
self.app = create_app('testing')
self.client = self.app.test_client()
self.item_list = []
# deconstructs test elements
def tearDown(self):
self.app = None
self.item_list.clear()
| 1.257813 | 1 |
hail/python/test/hail/helpers.py | mitochon/hail | 0 | 2183 | import os
from timeit import default_timer as timer
import unittest
import pytest
from decorator import decorator
from hail.utils.java import Env
import hail as hl
from hail.backend.local_backend import LocalBackend
_initialized = False
def startTestHailContext():
global _initialized
if not _initialized:
backend_name = os.environ.get('HAIL_QUERY_BACKEND', 'spark')
if backend_name == 'spark':
hl.init(master='local[1]', min_block_size=0, quiet=True)
else:
Env.hc() # force initialization
_initialized = True
def stopTestHailContext():
pass
_test_dir = os.environ.get('HAIL_TEST_RESOURCES_DIR', '../src/test/resources')
_doctest_dir = os.environ.get('HAIL_DOCTEST_DATA_DIR', 'hail/docs/data')
def resource(filename):
return os.path.join(_test_dir, filename)
def doctest_resource(filename):
return os.path.join(_doctest_dir, filename)
def schema_eq(x, y):
x_fds = dict(x)
y_fds = dict(y)
return x_fds == y_fds
def convert_struct_to_dict(x):
if isinstance(x, hl.Struct):
return {k: convert_struct_to_dict(v) for k, v in x._fields.items()}
elif isinstance(x, list):
return [convert_struct_to_dict(elt) for elt in x]
elif isinstance(x, tuple):
return tuple([convert_struct_to_dict(elt) for elt in x])
elif isinstance(x, dict):
return {k: convert_struct_to_dict(v) for k, v in x.items()}
else:
return x
_dataset = None
def get_dataset():
global _dataset
if _dataset is None:
_dataset = hl.split_multi_hts(hl.import_vcf(resource('sample.vcf'))).cache()
return _dataset
def assert_time(f, max_duration):
start = timer()
x = f()
end = timer()
assert (start - end) < max_duration
print(f'took {end - start:.3f}')
return x
def create_all_values():
return hl.struct(
f32=hl.float32(3.14),
i64=hl.int64(-9),
m=hl.null(hl.tfloat64),
astruct=hl.struct(a=hl.null(hl.tint32), b=5.5),
mstruct=hl.null(hl.tstruct(x=hl.tint32, y=hl.tstr)),
aset=hl.set(['foo', 'bar', 'baz']),
mset=hl.null(hl.tset(hl.tfloat64)),
d=hl.dict({hl.array(['a', 'b']): 0.5, hl.array(['x', hl.null(hl.tstr), 'z']): 0.3}),
md=hl.null(hl.tdict(hl.tint32, hl.tstr)),
h38=hl.locus('chr22', 33878978, 'GRCh38'),
ml=hl.null(hl.tlocus('GRCh37')),
i=hl.interval(
hl.locus('1', 999),
hl.locus('1', 1001)),
c=hl.call(0, 1),
mc=hl.null(hl.tcall),
t=hl.tuple([hl.call(1, 2, phased=True), 'foo', hl.null(hl.tstr)]),
mt=hl.null(hl.ttuple(hl.tlocus('GRCh37'), hl.tbool)),
nd=hl.nd.arange(0, 10).reshape((2, 5)),
)
def prefix_struct(s, prefix):
return hl.struct(**{prefix + k: s[k] for k in s})
def create_all_values_table():
all_values = create_all_values()
return (hl.utils.range_table(5, n_partitions=3)
.annotate_globals(**prefix_struct(all_values, 'global_'))
.annotate(**all_values)
.cache())
def create_all_values_matrix_table():
all_values = create_all_values()
return (hl.utils.range_matrix_table(3, 2, n_partitions=2)
.annotate_globals(**prefix_struct(all_values, 'global_'))
.annotate_rows(**prefix_struct(all_values, 'row_'))
.annotate_cols(**prefix_struct(all_values, 'col_'))
.annotate_entries(**prefix_struct(all_values, 'entry_'))
.cache())
def create_all_values_datasets():
return (create_all_values_table(), create_all_values_matrix_table())
def skip_unless_spark_backend():
from hail.backend.spark_backend import SparkBackend
@decorator
def wrapper(func, *args, **kwargs):
if isinstance(hl.utils.java.Env.backend(), SparkBackend):
return func(*args, **kwargs)
else:
raise unittest.SkipTest('requires Spark')
return wrapper
fails_local_backend = pytest.mark.xfail(
os.environ.get('HAIL_QUERY_BACKEND') == 'local',
reason="doesn't yet work on local backend",
strict=True)
def run_with_cxx_compile():
@decorator
def wrapper(func, *args, **kwargs):
return
return wrapper
def assert_evals_to(e, v):
res = hl.eval(e)
if res != v:
raise ValueError(f' actual: {res}\n expected: {v}')
def assert_all_eval_to(*expr_and_expected):
exprs, expecteds = zip(*expr_and_expected)
assert_evals_to(hl.tuple(exprs), expecteds)
def lower_only():
@decorator
def wrapper(func, *args, **kwargs):
flags = hl._get_flags()
prev_lower = flags.get('lower')
prev_lower_only = flags.get('lower_only')
hl._set_flags(lower='1', lower_only='1')
try:
return func(*args, **kwargs)
finally:
hl._set_flags(lower=prev_lower, lower_only=prev_lower_only)
return wrapper | 1.375 | 1 |
models/utils.py | wyshi/Unsupervised-Structure-Learning | 34 | 2191 | # Original work Copyright (C) 2017 <NAME>, Carnegie Mellon University
# Modified work Copyright 2018 <NAME>.
import tensorflow as tf
import numpy as np
from nltk.translate.bleu_score import sentence_bleu
from nltk.translate.bleu_score import SmoothingFunction
def get_bleu_stats(ref, hyps):
scores = []
for hyp in hyps:
try:
scores.append(sentence_bleu([ref], hyp, smoothing_function=SmoothingFunction().method7,
weights=[1./3, 1./3,1./3]))
except:
scores.append(0.0)
return np.max(scores), np.mean(scores)
def gaussian_kld(recog_mu, recog_logvar, prior_mu, prior_logvar):
kld = -0.5 * tf.reduce_sum(1 + (recog_logvar - prior_logvar)
- tf.div(tf.pow(prior_mu - recog_mu, 2), tf.exp(prior_logvar))
- tf.div(tf.exp(recog_logvar), tf.exp(prior_logvar)), reduction_indices=1)
return kld
def norm_log_liklihood(x, mu, logvar):
return -0.5*tf.reduce_sum(tf.log(2*np.pi) + logvar + tf.div(tf.pow((x-mu), 2), tf.exp(logvar)), reduction_indices=1)
def sample_gaussian(mu, logvar):
epsilon = tf.random_normal(tf.shape(logvar), name="epsilon")
std = tf.exp(0.5 * logvar)
z= mu + tf.multiply(std, epsilon)
return z
def get_bow(embedding, avg=False):
"""
Assumption, the last dimension is the embedding
The second last dimension is the sentence length. The rank must be 3
"""
embedding_size = embedding.get_shape()[2].value
if avg:
return tf.reduce_mean(embedding, reduction_indices=[1]), embedding_size
else:
return tf.reduce_sum(embedding, reduction_indices=[1]), embedding_size
def get_rnn_encode(embedding, cell, length_mask=None, scope=None, reuse=None):
"""
Assumption, the last dimension is the embedding
The second last dimension is the sentence length. The rank must be 3
The padding should have zero
"""
with tf.variable_scope(scope, 'RnnEncoding', reuse=reuse):
if length_mask is None:
length_mask = tf.reduce_sum(tf.sign(tf.reduce_max(tf.abs(embedding), reduction_indices=2)),reduction_indices=1)
length_mask = tf.to_int32(length_mask)
_, encoded_input = tf.nn.dynamic_rnn(cell, embedding, sequence_length=length_mask, dtype=tf.float32)
return encoded_input, cell.state_size
def get_bi_rnn_encode(embedding, f_cell, b_cell, length_mask=None, scope=None, reuse=None):
"""
Assumption, the last dimension is the embedding
The second last dimension is the sentence length. The rank must be 3
The padding should have zero
"""
with tf.variable_scope(scope, 'RnnEncoding', reuse=reuse):
if length_mask is None:
length_mask = tf.reduce_sum(tf.sign(tf.reduce_max(tf.abs(embedding), reduction_indices=2)),reduction_indices=1)
length_mask = tf.to_int32(length_mask)
_, encoded_input = tf.nn.bidirectional_dynamic_rnn(f_cell, b_cell, embedding, sequence_length=length_mask, dtype=tf.float32)
encoded_input = tf.concat(encoded_input, 1)
return encoded_input, f_cell.state_size+b_cell.state_size
def get_prob_for_one_sent(vocab_prob, sent, length_mask=None):
"""
:param vocab_prob:
:param sent:
:param length_mask:
:return:
"""
tf.boolean_mask(tf.reshape(usr_input_sent, [-1, 50]), tf.sequence_mask(length_mask, 50))
def tf_repeat(tensor, repeats):
"""
:param tensor:
:param repeats:
:return:
"""
with tf.variable_scope("repeat"):
expanded_tensor = tf.expand_dims(tensor, -1)
multiples = [1] + repeats
tiled_tensor = tf.tile(expanded_tensor, multiples=multiples)
repeated_tensor = tf.reshape(tiled_tensor, tf.shape(tensor) * repeats)
return repeated_tensor | 2.140625 | 2 |
pydeap/feature_extraction/_time_domain_features.py | Wlgls/pyDEAP | 0 | 2199 | # -*- encoding: utf-8 -*-
'''
@File :_time_domain_features.py
@Time :2021/04/16 20:02:55
@Author :wlgls
@Version :1.0
'''
import numpy as np
def statistics(data, combined=True):
"""Statistical features, include Power, Mean, Std, 1st differece, Normalized 1st difference, 2nd difference, Normalized 2nd difference.
Parameters
----------
data array
data, for DEAP dataset, It's shape may be (n_trials, n_channels, points)
Return
----------
f:
Solved feature, It's shape is similar to the shape of your input data.
e.g. for input.shape is (n_trials, n_channels, points), the f.shape is (n_trials, n_channels, n_features)
Example
----------
In [13]: d.shape, l.shape
Out[13]: ((40, 32, 8064), (40, 1))
In [14]: statistics_feature(d).shape
Out[14]: (40, 32, 7)
"""
# Power
power = np.mean(data**2, axis=-1)
# Mean
ave = np.mean(data, axis=-1)
# Standard Deviation
std = np.std(data, axis=-1)
# the mean of the absolute values of 1st differece mean
diff_1st = np.mean(np.abs(np.diff(data,n=1, axis=-1)), axis=-1)
# the mean of the absolute values of Normalized 1st difference
normal_diff_1st = diff_1st / std
# the mean of the absolute values of 2nd difference mean
diff_2nd = np.mean(np.abs(data[..., 2:] - data[..., :-2]), axis=-1)
# the mean of the absolute values of Normalized 2nd difference
normal_diff_2nd = diff_2nd / std
# Features.append(np.concatenate((Power, Mean, Std, diff_1st, normal_diff_1st, diff_2nd, normal_diff_2nd), axis=2))
f = np.stack((power, ave, std, diff_1st, normal_diff_1st, diff_2nd, normal_diff_2nd), axis=-1)
if combined:
f = f.reshape((*f.shape[:-2]))
return f
def hjorth(data, combined=True):
"""Solving Hjorth features, include activity, mobility, complexity
Parameters
----------
data array
data, for DEAP dataset, It's shape may be (n_trials, n_channels, points)
Return
----------
f:
Solved feature, It's shape is similar to the shape of your input data.
e.g. for input.shape is (n_trials, n_channels, points), the f.shape is (n_trials, n_channels, n_features)
Example
----------
In [15]: d.shape, l.shape
Out[15]: ((40, 32, 8064), (40, 1))
In [16]: hjorth_features(d).shape
Out[16]: (40, 32, 3)
"""
data = np.array(data)
ave = np.mean(data, axis=-1)[..., np.newaxis]
diff_1st = np.diff(data, n=1, axis=-1)
# print(diff_1st.shape)
diff_2nd = data[..., 2:] - data[..., :-2]
# Activity
activity = np.mean((data-ave)**2, axis=-1)
# print(Activity.shape)
# Mobility
varfdiff = np.var(diff_1st, axis=-1)
# print(varfdiff.shape)
mobility = np.sqrt(varfdiff / activity)
# Complexity
varsdiff = np.var(diff_2nd, axis=-1)
complexity = np.sqrt(varsdiff/varfdiff) / mobility
f = np.stack((activity, mobility, complexity), axis=-1)
if combined:
f = f.reshape((*f.shape[:-2]))
return f
def higher_order_crossing(data, k=10, combined=True):
"""Solving the feature of hoc. Hoc is a high order zero crossing quantity.
Parameters
----------
data : array
data, for DEAP dataset, It's shape may be (n_trials, n_channels, points)
k : int, optional
Order, by default 10
Return
----------
nzc:
Solved feature, It's shape is similar to the shape of your input data.
e.g. for input.shape is (n_trials, n_channels, points), the f.shape is (n_trials, n_channels, n_features)
Example
----------
In [4]: d, l = load_deap(path, 0)
In [5]: hoc(d, k=10).shape
Out[5]: (40, 32, 10)
In [6]: hoc(d, k=5).shape
Out[6]: (40, 32, 5)
"""
nzc = []
for i in range(k):
curr_diff = np.diff(data, n=i)
x_t = curr_diff >= 0
x_t = np.diff(x_t)
x_t = np.abs(x_t)
count = np.count_nonzero(x_t, axis=-1)
nzc.append(count)
f = np.stack(nzc, axis=-1)
if combined:
f = f.reshape((*f.shape[:-2]))
return f
def sevcik_fd(data, combined=True):
"""Fractal dimension feature is solved, which is used to describe the shape information of EEG time series data. It seems that this feature can be used to judge the electrooculogram and EEG.The calculation methods include Sevcik, fractal Brownian motion, box counting, Higuchi and so on.
Sevcik method: fast calculation and robust analysis of noise
Higuchi: closer to the theoretical value than box counting
The Sevick method is used here because it is easier to implement
Parameters
----------
Parameters
----------
data array
data, for DEAP dataset, It's shape may be (n_trials, n_channels, points)
Return
----------
f:
Solved feature, It's shape is similar to the shape of your input data.
e.g. for input.shape is (n_trials, n_channels, points), the f.shape is (n_trials, n_channels, n_features)
Example
----------
In [7]: d.shape, l.shape
Out[7]: ((40, 32, 8064), (40, 1))
In [8]: sevcik_fd(d).shape
Out[8]: (40, 32, 1)
"""
points = data.shape[-1]
x = np.arange(1, points+1)
x_ = x / np.max(x)
miny = np.expand_dims(np.min(data, axis=-1), axis=-1)
maxy = np.expand_dims(np.max(data, axis=-1), axis=-1)
y_ = (data-miny) / (maxy-miny)
L = np.expand_dims(np.sum(np.sqrt(np.diff(y_, axis=-1)**2 + np.diff(x_)**2), axis=-1), axis=-1)
f = 1 + np.log(L) / np.log(2 * (points-1))
# print(FD.shape)
if combined:
f = f.reshape((*f.shape[:-2]))
return f
def calc_L(X, k, m):
"""
Return Lm(k) as the length of the curve.
"""
N = X.shape[-1]
n = np.floor((N-m)/k).astype(np.int64)
norm = (N-1) / (n*k)
ss = np.sum(np.abs(np.diff(X[..., m::k], n=1)), axis=-1)
Lm = (ss*norm) / k
return Lm
def calc_L_average(X, k):
"""
Return <L(k)> as the average value over k sets of Lm(k).
"""
calc_L_series = np.frompyfunc(lambda m: calc_L(X, k, m), 1, 1)
L_average = np.average(calc_L_series(np.arange(1, k+1)))
return L_average
def higuchi_fd(data, k_max, combined=True):
"""Fractal dimension feature is solved, which is used to describe the shape information of EEG time series data. It seems that this feature can be used to judge the electrooculogram and EEG.The calculation methods include Sevcik, fractal Brownian motion, box counting, Higuchi and so on.
Sevcik method: fast calculation and robust analysis of noise
Higuchi: closer to the theoretical value than box counting
The higuchi method is used here because it is easier to implement
Parameters
----------
Parameters
----------
data array
data, for DEAP dataset, It's shape may be (n_trials, n_channels, points)
Return
----------
f:
Solved feature, It's shape is similar to the shape of your input data.
e.g. for input.shape is (n_trials, n_channels, points), the f.shape is (n_trials, n_channels, n_features)
Example
----------
In [7]: d.shape, l.shape
Out[7]: ((40, 32, 8064), (40, 1))
In [8]: higuchi_fd(dif combined:
f = f
return ).shape
Out[8]: (40, 32, 1)
"""
calc_L_average_series = np.frompyfunc(lambda k: calc_L_average(data, k), 1, 1)
k = np.arange(1, k_max+1)
L = calc_L_average_series(k)
L = np.stack(L, axis=-1)
fd = np.zeros(data.shape[:-1])
for ind in np.argwhere(L[..., 0]):
tmp = L[ind[0], ind[1], ind[2]]
D, _= np.polyfit(np.log2(k), np.log2(tmp), 1)
fd[ind[0], ind[1if combined:
f = f
return ], ind[2]] = - D
f = np.expand_dims(fd, axis=-1)
if combined:
f = f.reshape((*f.shape[:-2]))
return f
| 2.703125 | 3 |
python/fill_na_v2.py | fredmell/CS229Project | 0 | 2207 | """
Fill na with most common of the whole column
"""
import numpy as np
import pandas as pd
import time
import matplotlib.pyplot as plt
from datetime import datetime
import re
from collections import Counter
from statistics import median
from tqdm import tqdm
def find_most_common_value(element_list):
for element in element_list:
if not pd.isna(element):
break
if pd.isna(element):
return np.nan
elif isinstance(element, np.double):
array = np.array(element_list)
array = array[~np.isnan(array)]
if len(array) == 0:
return np.nan
else:
array = array.astype(np.int)
return np.double(np.bincount(array).argmax())
elif isinstance(element, str):
count = Counter(df[col])
try:
del count[np.nan]
except ValueError:
pass
if count == dict():
return np.nan
else:
return count.most_common(1)[0][0]
file = '/home/nicolasbievre/yelp_data.pkl'
file_na = '/home/nicolasbievre/yelp_data_no_na.pkl'
df = pd.read_pickle(file)
categories = list(set(df['categories'].values))
n = len(categories)
for i in tqdm(range(len(df.columns))):
col = df.columns[i]
if not col in {'review_id': 0, 'business_id': 0, 'user_id': 0, 'postal_code': 0}:
df_col = df[col].values
na = sum(pd.isna(df_col))
if na > 0:
most_commom_term = find_most_common_value(df_col)
if not pd.isna(most_commom_term):
df.loc[(pd.isna(df_col)), col] = most_commom_term
if i % 35 == 0 and i > 0:
df.to_pickle(file_na)
df.to_pickle(file_na)
| 2.3125 | 2 |
Hedge/Shell.py | RonaldoAPSD/Hedge | 2 | 2215 | import Hedge
while True:
text = input('Hedge > ')
if text.strip() == "":
continue
result, error = Hedge.run('<stdin>', text)
if (error):
print(error.asString())
elif result:
if len(result.elements) == 1:
print(repr(result.elements[0]))
else:
print(repr(result)) | 1.40625 | 1 |
ribbon/exceptions.py | cloutiertyler/RibbonGraph | 2 | 2223 | from rest_framework.exceptions import APIException
from rest_framework import status
class GraphAPIError(APIException):
"""Base class for exceptions in this module."""
pass
class NodeNotFoundError(GraphAPIError):
status_code = status.HTTP_404_NOT_FOUND
def __init__(self, id):
self.id = id
super(NodeNotFoundError, self).__init__("Node with id '{}' does not exist.".format(id))
class NodeTypeNotFoundError(GraphAPIError):
status_code = status.HTTP_404_NOT_FOUND
def __init__(self, node_type):
self.node_type = node_type
super(NodeTypeNotFoundError, self).__init__("Node type '{}' does not exist.".format(node_type))
class MissingNodeTypeError(GraphAPIError):
""" Creating a node requires a type. """
status_code = status.HTTP_400_BAD_REQUEST
class MalformedUpdateDictionaryError(GraphAPIError):
status_code = status.HTTP_400_BAD_REQUEST
class InvalidPropertyError(GraphAPIError):
status_code = status.HTTP_400_BAD_REQUEST
class InvalidValueError(GraphAPIError):
status_code = status.HTTP_400_BAD_REQUEST
class PermissionDenied(GraphAPIError):
status_code = status.HTTP_403_FORBIDDEN
default_detail = 'Insufficient permissions for the request.'
| 1.585938 | 2 |
archive/data-processing/archive/features/sd1.py | FloFincke/affective-chat | 0 | 2231 | #!/usr/bin/env python
import math
import numpy as np
def sd1(rr):
sdnn = np.std(rr)
return math.sqrt(0.5 * sdnn * sdnn) | 0.960938 | 1 |
caseworker/open_general_licences/enums.py | code-review-doctor/lite-frontend-1 | 0 | 2239 | from lite_content.lite_internal_frontend.open_general_licences import (
OGEL_DESCRIPTION,
OGTCL_DESCRIPTION,
OGTL_DESCRIPTION,
)
from lite_forms.components import Option
class OpenGeneralExportLicences:
class OpenGeneralLicence:
def __init__(self, id, name, description, acronym):
self.id = id
self.name = name
self.description = description
self.acronym = acronym
open_general_export_licence = OpenGeneralLicence(
"00000000-0000-0000-0000-000000000002",
"Open General Export Licence",
OGEL_DESCRIPTION,
"OGEL",
)
open_general_trade_control_licence = OpenGeneralLicence(
"00000000-0000-0000-0000-000000000013",
"Open General Trade Control Licence",
OGTCL_DESCRIPTION,
"OGTCL",
)
open_general_transhipment_licence = OpenGeneralLicence(
"00000000-0000-0000-0000-000000000014",
"Open General Transhipment Licence",
OGTL_DESCRIPTION,
"OGTL",
)
@classmethod
def all(cls):
return [
cls.open_general_export_licence,
cls.open_general_trade_control_licence,
cls.open_general_transhipment_licence,
]
@classmethod
def as_options(cls):
return [
Option(key=ogl.id, value=f"{ogl.name} ({ogl.acronym})", description=ogl.description) for ogl in cls.all()
]
@classmethod
def get_by_id(cls, id):
return next(ogl for ogl in cls.all() if ogl.id == id)
| 1.210938 | 1 |
grocery/migrations/0003_alter_item_comments.py | akshay-kapase/shopping | 0 | 2271 | # Generated by Django 3.2.6 on 2021-09-03 15:48
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('grocery', '0002_alter_item_comments'),
]
operations = [
migrations.AlterField(
model_name='item',
name='comments',
field=models.CharField(blank=True, default='null', max_length=200),
preserve_default=False,
),
]
| 0.855469 | 1 |
pydlm/tests/base/testKalmanFilter.py | onnheimm/pydlm | 423 | 2279 | import numpy as np
import unittest
from pydlm.modeler.trends import trend
from pydlm.modeler.seasonality import seasonality
from pydlm.modeler.builder import builder
from pydlm.base.kalmanFilter import kalmanFilter
class testKalmanFilter(unittest.TestCase):
def setUp(self):
self.kf1 = kalmanFilter(discount=[1])
self.kf0 = kalmanFilter(discount=[1e-10])
self.kf11 = kalmanFilter(discount=[1, 1])
self.trend0 = trend(degree=0, discount=1, w=1.0)
self.trend0_90 = trend(degree=0, discount=0.9, w=1.0)
self.trend0_98 = trend(degree=0, discount=0.98, w=1.0, name='a')
self.trend1 = trend(degree=1, discount=1, w=1.0)
def testForwardFilter(self):
dlm = builder()
dlm.add(self.trend0)
dlm.initialize()
self.kf1.predict(dlm.model)
self.assertAlmostEqual(dlm.model.prediction.obs, 0)
# the prior on the mean is zero, but observe 1, with
# discount = 1, one should expect the filterd mean to be 0.5
self.kf1.forwardFilter(dlm.model, 1)
self.assertAlmostEqual(dlm.model.obs, 0.5)
self.assertAlmostEqual(dlm.model.prediction.obs, 0)
self.assertAlmostEqual(dlm.model.sysVar, 0.375)
self.kf1.predict(dlm.model)
self.assertAlmostEqual(dlm.model.obs, 0.5)
self.assertAlmostEqual(dlm.model.prediction.obs, 0.5)
dlm.initialize()
self.kf0.predict(dlm.model)
self.assertAlmostEqual(dlm.model.prediction.obs, 0)
# the prior on the mean is zero, but observe 1, with discount = 0
# one should expect the filtered mean close to 1
self.kf0.forwardFilter(dlm.model, 1)
self.assertAlmostEqual(dlm.model.obs[0, 0], 1)
self.assertAlmostEqual(dlm.model.prediction.obs[0, 0], 0)
self.assertAlmostEqual(dlm.model.sysVar[0, 0], 0.5)
self.kf0.predict(dlm.model)
self.assertAlmostEqual(dlm.model.obs[0, 0], 1)
self.assertAlmostEqual(dlm.model.prediction.obs[0, 0], 1)
def testForwardFilterMultiDim(self):
dlm = builder()
dlm.add(seasonality(period=2, discount=1, w=1.0))
dlm.initialize()
self.kf11.forwardFilter(dlm.model, 1)
self.assertAlmostEqual(dlm.model.state[0][0, 0], 0.33333333333)
self.assertAlmostEqual(dlm.model.state[1][0, 0], -0.33333333333)
self.kf11.forwardFilter(dlm.model, -1)
self.assertAlmostEqual(dlm.model.state[0][0, 0], -0.5)
self.assertAlmostEqual(dlm.model.state[1][0, 0], 0.5)
def testBackwardSmoother(self):
dlm = builder()
dlm.add(self.trend0)
dlm.initialize()
# with mean being 0 and observe 1 and 0 consectively, one shall
# expect the smoothed mean at 1 will be 1/3, for discount = 1
self.kf1.forwardFilter(dlm.model, 1)
self.kf1.forwardFilter(dlm.model, 0)
self.kf1.backwardSmoother(dlm.model, \
np.matrix([[0.5]]), \
np.matrix([[0.375]]))
self.assertAlmostEqual(dlm.model.obs[0, 0], 1.0/3)
self.assertAlmostEqual(dlm.model.sysVar[0, 0], 0.18518519)
# second order trend with discount = 1. The smoothed result should be
# equal to a direct fit on the three data points, 0, 1, -1. Thus, the
# smoothed observation should be 0.0
def testBackwardSmootherMultiDim(self):
dlm = builder()
dlm.add(self.trend1)
dlm.initialize()
self.kf11.forwardFilter(dlm.model, 1)
state1 = dlm.model.state
cov1 = dlm.model.sysVar
self.kf11.forwardFilter(dlm.model, -1)
self.kf11.backwardSmoother(dlm.model, \
rawState = state1, \
rawSysVar = cov1)
self.assertAlmostEqual(dlm.model.obs[0, 0], 0.0)
def testMissingData(self):
dlm = builder()
dlm.add(self.trend0)
dlm.initialize()
self.kf0.forwardFilter(dlm.model, 1)
self.assertAlmostEqual(dlm.model.obs[0, 0], 1.0)
self.assertAlmostEqual(dlm.model.obsVar[0, 0], 1.0)
self.kf0.forwardFilter(dlm.model, None)
self.assertAlmostEqual(dlm.model.obs[0, 0], 1.0)
self.assertAlmostEqual(dlm.model.obsVar[0, 0]/1e10, 0.5)
self.kf0.forwardFilter(dlm.model, None)
self.assertAlmostEqual(dlm.model.obs[0, 0], 1.0)
self.assertAlmostEqual(dlm.model.obsVar[0, 0]/1e10, 0.5)
self.kf0.forwardFilter(dlm.model, 0)
self.assertAlmostEqual(dlm.model.obs[0, 0], 0.0)
def testMissingEvaluation(self):
dlm = builder()
dlm.add(self.trend0)
dlm.initialize()
dlm.model.evaluation = np.matrix([[None]])
self.kf1.forwardFilter(dlm.model, 1.0, dealWithMissingEvaluation = True)
self.assertAlmostEqual(dlm.model.obs, 0.0)
self.assertAlmostEqual(dlm.model.transition, 1.0)
def testEvolveMode(self):
dlm = builder()
dlm.add(self.trend0_90)
dlm.add(self.trend0_98)
dlm.initialize()
kf2 = kalmanFilter(discount=[0.9, 0.98],
updateInnovation='component',
index=dlm.componentIndex)
kf2.forwardFilter(dlm.model, 1.0)
self.assertAlmostEqual(dlm.model.innovation[0, 1], 0.0)
self.assertAlmostEqual(dlm.model.innovation[1, 0], 0.0)
if __name__ == '__main__':
unittest.main()
| 1.890625 | 2 |
tests/test_bmipy.py | visr/bmi-python | 14 | 2287 | import pytest
from bmipy import Bmi
class EmptyBmi(Bmi):
def __init__(self):
pass
def initialize(self, config_file):
pass
def update(self):
pass
def update_until(self, then):
pass
def finalize(self):
pass
def get_var_type(self, var_name):
pass
def get_var_units(self, var_name):
pass
def get_var_nbytes(self, var_name):
pass
def get_var_itemsize(self, name):
pass
def get_var_location(self, name):
pass
def get_var_grid(self, var_name):
pass
def get_grid_rank(self, grid_id):
pass
def get_grid_size(self, grid_id):
pass
def get_value_ptr(self, var_name):
pass
def get_value(self, var_name):
pass
def get_value_at_indices(self, var_name, indices):
pass
def set_value(self, var_name, src):
pass
def set_value_at_indices(self, var_name, src, indices):
pass
def get_component_name(self):
pass
def get_input_item_count(self):
pass
def get_output_item_count(self):
pass
def get_input_var_names(self):
pass
def get_output_var_names(self):
pass
def get_grid_shape(self, grid_id):
pass
def get_grid_spacing(self, grid_id):
pass
def get_grid_origin(self, grid_id):
pass
def get_grid_type(self, grid_id):
pass
def get_start_time(self):
pass
def get_end_time(self):
pass
def get_current_time(self):
pass
def get_time_step(self):
pass
def get_time_units(self):
pass
def get_grid_edge_count(self, grid):
pass
def get_grid_edge_nodes(self, grid, edge_nodes):
pass
def get_grid_face_count(self, grid):
pass
def get_grid_face_nodes(self, grid, face_nodes):
pass
def get_grid_face_edges(self, grid, face_edges):
pass
def get_grid_node_count(self, grid):
pass
def get_grid_nodes_per_face(self, grid, nodes_per_face):
pass
def get_grid_x(self, grid, x):
pass
def get_grid_y(self, grid, y):
pass
def get_grid_z(self, grid, z):
pass
def test_bmi_not_implemented():
class MyBmi(Bmi):
pass
with pytest.raises(TypeError):
Bmi()
def test_bmi_implemented():
assert isinstance(EmptyBmi(), Bmi)
| 1.195313 | 1 |
chirun/plastex/color/__init__.py | sthagen/chirun-ncl-chirun | 5 | 2327 | from plasTeX import Command, Environment
def ProcessOptions(options, document):
colors = {}
document.userdata.setPath('packages/color/colors', colors)
colors['red'] = latex2htmlcolor('1,0,0')
colors['green'] = latex2htmlcolor('0,1,0')
colors['blue'] = latex2htmlcolor('0,0,1')
colors['cyan'] = latex2htmlcolor('0,1,1')
colors['magenta'] = latex2htmlcolor('1,0,1')
colors['yellow'] = latex2htmlcolor('1,1,0')
colors['white'] = latex2htmlcolor('1')
colors['black'] = latex2htmlcolor('0')
colors['gray'] = latex2htmlcolor('0.9')
colors['darkred'] = latex2htmlcolor('0.8,0,0')
colors['middlered'] = latex2htmlcolor('0.9,0,0')
colors['lightred'] = latex2htmlcolor('1,0,0')
colors['darkgreen'] = latex2htmlcolor('0,0.6,0')
colors['middlegreen'] = latex2htmlcolor('0,0.8,0')
colors['lightgreen'] = latex2htmlcolor('0,1,0')
colors['darkblue'] = latex2htmlcolor('0,0,0.8')
colors['middleblue'] = latex2htmlcolor('0,0,0.9')
colors['lightblue'] = latex2htmlcolor('0,0,1')
colors['darkcyan'] = latex2htmlcolor('0.6,0.8,0.8')
colors['middlecyan'] = latex2htmlcolor('0,0.8,0.8')
colors['darkmagenta'] = latex2htmlcolor('0.8,0.6,0.8')
colors['middlemagenta'] = latex2htmlcolor('1,0,0.6')
colors['darkyellow'] = latex2htmlcolor('0.8,0.8,0.6')
colors['middleyellow'] = latex2htmlcolor('1,1,0.2')
colors['darkgray'] = latex2htmlcolor('0.5')
colors['middlegray'] = latex2htmlcolor('0.7')
colors['lightgray'] = latex2htmlcolor('0.9')
def latex2htmlcolor(arg, model='rgb', named=None):
named = named or {}
if model == 'named':
return named.get(arg, '')
if ',' in arg:
parts = [float(x) for x in arg.split(',')]
# rgb
if len(parts) == 3:
red, green, blue = parts
red = min(int(red * 255), 255)
green = min(int(green * 255), 255)
blue = min(int(blue * 255), 255)
# cmyk
elif len(parts) == 4:
c, m, y, k = parts
red, green, blue = [int(255 * x) for x in [1 - c * (1 - k) - k, 1 - m * (1 - k) - k, 1 - y * (1 - k) - k]]
else:
return arg.strip()
else:
try:
red = green = blue = float(arg)
except ValueError:
try:
return named[arg]
except KeyError:
return arg.strip()
return '#%.2X%.2X%.2X' % (int(red), int(green), int(blue))
class definecolor(Command):
args = 'name:str model:str color:str'
def invoke(self, tex):
a = self.parse(tex)
u = self.ownerDocument.userdata
colors = u.getPath('packages/color/colors')
colors[a['name']] = latex2htmlcolor(a['color'], a['model'], colors)
class textcolor(Command):
args = '[ model:str ] color:str self'
def invoke(self, tex):
a = self.parse(tex)
self.style['color'] = latex2htmlcolor(a['color'], a['model'],
self.ownerDocument.userdata.getPath('packages/color/colors'))
class color(Environment):
args = '[ model:str ] color:str'
def invoke(self, tex):
a = self.parse(tex)
self.style['color'] = latex2htmlcolor(a['color'], a['model'],
self.ownerDocument.userdata.getPath('packages/color/colors'))
class pagecolor(Command):
args = '[ model:str ] color:str'
class colorbox(Command):
args = '[ model:str ] color:str self'
def invoke(self, tex):
a = self.parse(tex)
self.style['background-color'] = latex2htmlcolor(a['color'], a['model'],
self.ownerDocument.userdata.getPath('packages/color/colors'))
class fcolorbox(Command):
args = '[ model:str ] bordercolor:str color:str self'
def invoke(self, tex):
a = self.parse(tex)
self.style['background-color'] = latex2htmlcolor(a['color'], a['model'],
self.ownerDocument.userdata.getPath('packages/color/colors'))
self.style['border'] = ('1px solid %s'
% latex2htmlcolor(a['bordercolor'], a['model'],
self.ownerDocument.userdata.getPath('packages/color/colors')))
class normalcolor(Command):
pass
| 1.09375 | 1 |
tests/test_sqlite_wrapper.py | Privex/python-db | 1 | 2383 | """
Tests related to :class:`.SqliteWrapper` / :class:`.ExampleWrapper`
"""
# from unittest import TestCase
from tests.base import *
class TestSQLiteWrapper(PrivexDBTestBase):
def test_tables_created(self):
w = self.wrp
self.assertEqual(w.db, ':memory:')
tables = w.list_tables()
self.assertIn('users', tables)
self.assertIn('items', tables)
def test_tables_drop(self):
w = self.wrp
tables = w.list_tables()
self.assertIn('users', tables)
self.assertIn('items', tables)
w.drop_schemas()
tables = w.list_tables()
self.assertNotIn('users', tables)
self.assertNotIn('items', tables)
def test_insert_find_user(self):
w = self.wrp
w.query_mode = 'flat'
res = w.insert_user('John', 'Doe')
self.assertEqual(res.rowcount, 1)
user = w.find_user(res.lastrowid)
self.assertEqual(user[1], 'John')
self.assertEqual(user[2], 'Doe')
def test_action_update(self):
w = self.wrp
w.query_mode = 'dict'
res = w.insert_user('John', 'Doe')
last_id = res.lastrowid
rows = w.action("UPDATE users SET last_name = ? WHERE first_name = ?", ['Smith', 'John'])
self.assertEqual(rows, 1)
john = w.find_user(last_id)
self.assertEqual(john['last_name'], 'Smith')
def test_find_user_dict_mode(self):
w = self.wrp
w.query_mode = 'dict'
res = w.insert_user('John', 'Doe')
self.assertEqual(res.rowcount, 1)
user = w.find_user(res.lastrowid)
self.assertEqual(user['first_name'], 'John')
self.assertEqual(user['last_name'], 'Doe')
def test_find_user_nonexistent(self):
w = self.wrp
user = w.find_user(99)
self.assertIsNone(user)
def test_get_users_tuple(self):
w = self.wrp
w.query_mode = 'flat'
w.insert_user('John', 'Doe')
w.insert_user('Jane', 'Doe')
w.insert_user('Dave', 'Johnson')
users = list(w.get_users())
self.assertEqual(len(users), 3)
self.assertEqual(users[0][1], 'John')
self.assertEqual(users[1][1], 'Jane')
self.assertEqual(users[1][2], 'Doe')
self.assertEqual(users[2][2], 'Johnson')
def test_get_users_dict(self):
w = self.wrp
w.query_mode = 'dict'
w.insert_user('John', 'Doe')
w.insert_user('Jane', 'Doe')
w.insert_user('Dave', 'Johnson')
users = list(w.get_users())
self.assertEqual(len(users), 3)
self.assertEqual(users[0]['first_name'], 'John')
self.assertEqual(users[1]['first_name'], 'Jane')
self.assertEqual(users[1]['last_name'], 'Doe')
self.assertEqual(users[2]['last_name'], 'Johnson')
def test_insert_helper(self):
w = self.wrp
w.query_mode = 'dict'
res = w.insert('users', first_name='Dave', last_name='Johnson')
self.assertEqual(res.lastrowid, 1)
user = w.find_user(res.lastrowid)
self.assertEqual(user['first_name'], 'Dave')
self.assertEqual(user['last_name'], 'Johnson')
| 1.648438 | 2 |
examples/laser.py | MPI-IS/reactive_pepper | 0 | 2391 | import math,time,random
import pepper_interface
IP = "192.168.0.147"
PORT = 9559
simulation = False
with pepper_interface.get(IP,PORT,simulation) as pepper:
time.sleep(1.0)
values,time_stamp = pepper.laser.get()
print
print "Front"
print values["Front"]
print
print "Left"
print values["Left"]
print
print "Right"
print values["Right"]
print
| 1.398438 | 1 |
bellmanford.py | asmodehn/aiokraken | 0 | 2407 | """
Bellman Ford Arbitrage implementation over websocket API.
"""
from __future__ import annotations
from collections import namedtuple
from datetime import datetime
from decimal import Decimal
from math import log
import pandas as pd
import numpy as np
import asyncio
import typing
from aiokraken.model.assetpair import AssetPair
from aiokraken.rest import AssetPairs, Assets
from aiokraken.model.asset import Asset
from aiokraken.rest.client import RestClient
from aiokraken.websockets.publicapi import ticker
import networkx as nx
client = RestClient()
async def ticker_updates(pairs: typing.Union[AssetPairs, typing.Iterable[AssetPair]], pmatrix):
# For required pairs, get ticket updates
if isinstance(pairs, AssetPairs): # TODO : we need to unify iterable of pairs somehow...
properpairs = pairs
pairs = [p for p in pairs.values()]
else:
properpairs = AssetPairs({p.wsname: p for p in pairs})
tkrs = await client.ticker(pairs=[p for p in pairs])
# TODO : build price matrix
for p, tk in tkrs.items():
# retrieve the actual pair
pair = properpairs[p]
fee = pair.fees[0].get('fee')
# TODO : pick the right fee depending on total traded volume !
await pmatrix(base=pair.base, quote=pair.quote, ask_price=tk.ask.price, bid_price=tk.bid.price, fee_pct=fee)
# TODO : 2 levels :
# - slow updates with wide list of pairs and potential interest (no fees - small data for quick compute)
# - websockets with potential arbitrage (including fees - detailed data & precise compute)
async for upd in ticker(pairs=pairs, restclient=client):
print(f"wss ==> tick: {upd}")
# update pricematrix
base = upd.pairname.base
quote = upd.pairname.quote
fee = properpairs[upd.pairname].fees[0].get('fee')
await pmatrix(base=base, quote=quote, ask_price=upd.ask.price, bid_price=upd.bid.price, fee_pct=fee)
class PriceMatrix:
# Note This matrix is square
# since we want to do arbitrage and find cycles...
df: pd.DataFrame
# we also need to be careful that only one writer can modify data at a time...
wlock: asyncio.Lock
assets: typing.Optional[Assets]
def __init__(self, assets: typing.Union[Assets, typing.Iterable[Asset]]):
self.wlock = asyncio.Lock()
if isinstance(assets, Assets):
assets = [a for a in assets.values()]
self.df = pd.DataFrame(data={c.restname: {c.restname: None for c in assets} for c in assets}, columns=[c.restname for c in assets], dtype='float64')
self.assets = None
async def __call__(self, base: Asset, ask_price: Decimal, quote: Asset, bid_price: Decimal, fee_pct: Decimal):
if self.assets is None: # retrieve assets for filtering calls params, only once.
self.assets = await client.retrieve_assets()
async with self.wlock: # careful with concurrent control.
if not isinstance(base, Asset):
base = self.assets[base].restname
if not isinstance(quote, Asset):
quote = self.assets[quote].restname
# These are done with decimal, but stored as numpy floats for faster compute
self.df[quote][base] = bid_price * ((100 - fee_pct) /100) # bid price to get: quote_curr -- (buy_price - fee) --> base_curr
self.df[base][quote] = ((100 - fee_pct)/100) / ask_price # ask price to get: base_curr -- (sell_price - fee) --> quote_curr
def __getitem__(self, item):
if item not in self.df.columns:
raise KeyError(f"{item} not found")
if item not in self.df:
return pd.Series(dtype=pd.dtype('decimal'))
return self.df[item]
def __len__(self):
return len(self.df.columns)
def __str__(self):
return self.df.to_string()
def neglog(self):
if not self.assets:
return False
newpm = PriceMatrix(assets=[self.assets[c] for c in self.df.columns])
# copy all values and take -log()
for c in self.df.columns:
# TODO : fix this : is it on row, or columns ? which is best ??
newpm.df[c] = np.negative(np.log(self.df[c]))
return newpm
def to_graph(self):
G = nx.from_pandas_adjacency(self.df, create_using=nx.DiGraph)
# from bokeh.io import output_file, show
# from bokeh.plotting import figure, from_networkx
#
# plot = figure(title="Networkx Integration Demonstration", x_range=(-1.1, 1.1), y_range=(-1.1, 1.1),
# tools="", toolbar_location=None)
#
# graph = from_networkx(G, nx.spring_layout, scale=2, center=(0, 0))
# plot.renderers.append(graph)
#
# output_file("networkx_graph.html")
# show(plot)
return G
def test_pricematrix_mapping():
# testing with string for simplicity for now
pm = PriceMatrix(["EUR", "BTC"])
pm["EUR"]["BTC"] = Decimal(1.234)
pm["BTC"]["EUR"] = Decimal(4.321)
assert pm["EUR"]["BTC"] == Decimal(1.234)
assert pm["BTC"]["EUR"] == Decimal(4.321)
async def arbiter(user_assets):
assets = await client.retrieve_assets()
proper_userassets = Assets(assets_as_dict={assets[a].restname: assets[a] for a in user_assets})
assetpairs = await client.retrieve_assetpairs()
proper_userpairs = AssetPairs(assetpairs_as_dict={p.wsname:p for p in assetpairs.values()
if p.wsname is not None and (
p.base in proper_userassets or p.quote in proper_userassets
)})
# retrieving widely related assets
related_assets = set(assets[p.base] for p in proper_userpairs.values()) | set(assets[p.quote] for p in proper_userpairs.values())
proper_related_assets = Assets({a.restname: a for a in related_assets})
pmtx = PriceMatrix(assets=proper_related_assets)
# running ticker updates in background
bgtsk = asyncio.create_task(ticker_updates(pairs=proper_userpairs, pmatrix=pmtx))
try:
# observe pricematrix changes
while True:
# TODO : efficient TUI lib !
# print(pmtx)
# pricegraph = pmtx.to_graph() # display...
neglog = pmtx.neglog()
if neglog:
negcycle = bellmanford(neglog)
if len(negcycle):
amnt = 1 # arbitrary starting amount
pred = negcycle[-1]
dscr = f"{amnt} {pred}"
for cn in reversed(negcycle[:-1]):
amnt = amnt * pmtx[pred][cn]
pred = cn
dscr = dscr + f" -> {amnt} {pred}"
print(f"ARBITRAGE POSSIBLE: {dscr}")
# TODO : from these we can extract market making opportunities ??
# Another way :
# negloggraph = neglog.to_graph()
#
# negcycle = list()
#
# if nx.negative_edge_cycle(negloggraph):
# # find it !
# print("NEGATIVE CYCLE FOUND !")
#
# # Now find it
# print(f"computing cycles... {datetime.now()}")
#
# for cycle in nx.simple_cycles(negloggraph):
# # for cycle in nx.cycle_basis(negloggraph): # NOT implemented !
# # find negative weight sum (cycle need to be more than one node)
# if sum(negloggraph[n][m].get('weight') for n, m in zip(cycle, cycle[1:])) < 0:
# print(f"Found one: {cycle}")
# negcycle.append(cycle)
# print(negcycle)
# print(f"computing cycles DONE ! {datetime.now()}")
await asyncio.sleep(5)
finally:
# in every case cancel the background task now
bgtsk.cancel()
# TODO: react !
def bellmanford(pmatrix_neglog: PriceMatrix, source='ZEUR'):
n = len(pmatrix_neglog)
min_dist = {source: 0}
min_pred = {}
# Relax edges |V - 1| times
for i in range(n - 1): # iterations
for v in pmatrix_neglog.df.columns: # vertex source
if v in min_dist.keys(): # otherwise distance infinite until we know it...
for w in pmatrix_neglog.df.columns: # vertex target
if w not in min_dist.keys() or min_dist[w] > min_dist[v] + pmatrix_neglog[v][w]:
min_dist[w] = min_dist[v] + pmatrix_neglog[v][w]
min_pred[w] = v
# If we can still relax edges, then we have a negative cycle
for v in pmatrix_neglog.df.columns:
if v in min_dist.keys(): # otherwise node is not yet relevant here
for w in pmatrix_neglog.df.columns:
if min_dist[w] > min_dist[v] + pmatrix_neglog[v][w]:
# print(f"{min_dist[w]} > {min_dist[v]} + {pmatrix_neglog[v][w]}")
path = (w, min_pred[w])
while len(set(path)) == len(path): # while no duplicates, cycle is not complete...
path = (*path, min_pred[path[-1]])
# First cycle retrieved is *likely* (?) to be the minimal one -> the only one we are interested in
return path[path.index(path[-1]):]
return ()
if __name__ == '__main__':
asyncio.run(arbiter(user_assets=["XTZ", "ETH", "XBT", "EUR"]), debug=True)
| 2.25 | 2 |
Trajectory_Mining/Bag_of_Words/Comp_Corr_KD_CosDist/comp_dist_partialKD.py | AdamCoscia/eve-trajectory-mining | 0 | 2439 | # -*- coding: utf-8 -*-
"""Computes distance between killmails by text similarity.
Edit Distance Metrics
- Levenshtein Distance
- Damerau-Levenshtein Distance
- Jaro Distance
- Jaro-Winkler Distance
- Match Rating Approach Comparison
- Hamming Distance
Vector Distance Metrics
- Jaccard Similarity
- Cosine Distance
Written By: <NAME>
Updated On: 11/09/2019
"""
# Start timing
import time
start = time.time()
total = 0
def lap(msg):
"""Records time elapsed."""
global start, total
elapsed = (time.time() - start) - total
total = time.time() - start
if elapsed > 3600:
print(f'(+{elapsed/3600:.2f}h|t:{total/3600:.2f}h) {msg}')
elif elapsed > 60:
if total > 3600:
print(f'(+{elapsed/60:.2f}m|t:{total/3600:.2f}h) {msg}')
else:
print(f'(+{elapsed/60:.2f}m|t:{total/60:.2f}m) {msg}')
else:
if total > 3600:
print(f'(+{elapsed:.3f}s|t:{total/3600:.2f}h) {msg}')
elif total > 60:
print(f'(+{elapsed:.3f}s|t:{total/60:.2f}m) {msg}')
else:
print(f'(+{elapsed:.3f}s|t:{total:.3f}s) {msg}')
lap("Importing modules...")
from ast import literal_eval
from functools import reduce
import os
import sys
import numpy as np
import pandas as pd
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.metrics.pairwise import linear_kernel
def get_long_text_cosine_distance(los1, los2):
"""Calculates cosine distance between two killmails' item lists.
1. Converts collection of long text items to raw document representation.
2. Converts the collection of raw documents to a matrix of TF-IDF features
using TfidfVectorizer (combines vector counting and TF-IDF calculator).
3. Computes cosine similarity between feature vectors. Uses linear kernel
since TF-IDF matrix will be normalized already.
Arguments:
los1: First document, a list of raw strings.
los2: Second document, a list of raw strings.
Returns:
cosine distance as a value between 0-1, with 1 being identical.
"""
if type(los1) == float or type(los2) == float:
return 0
if len(los1) == 0 or len(los2) == 0:
return 0
doc1 = reduce(lambda x, y: f'{x} {y}', [x[0] for x in los1]) # Create bag of words
doc2 = reduce(lambda x, y: f'{x} {y}', [x[0] for x in los2]) # Create bag of words
tfidf = TfidfVectorizer().fit_transform([doc1, doc2]) # Vectorize the bag of words
cos_dist = linear_kernel(tfidf[0:1], tfidf[1:2]).flatten()[0] # Compute cosine distance
return cos_dist
def get_short_text_cosine_distance(los1, los2):
"""Calculates cosine distance between two killmails' item lists.
1. Converts collection of short text items to raw document representation.
2. Converts the collection of raw documents to a matrix of TF-IDF features
using TfidfVectorizer (combines vector counting and TF-IDF calculator).
3. Computes cosine similarity between feature vectors. Uses linear kernel
since TF-IDF matrix will be normalized already.
Arguments:
los1: First document, a list of raw strings.
los2: Second document, a list of raw strings.
Returns:
cosine distance as a value between 0-1, with 1 being identical and 0
being complete different.
"""
if type(los1) == float or type(los2) == float:
return 0
if len(los1) == 0 or len(los2) == 0:
return 0
doc1 = reduce(lambda x, y: f'{x} {y}', [x[1] for x in los1]) # Create bag of words
doc2 = reduce(lambda x, y: f'{x} {y}', [x[1] for x in los2]) # Create bag of words
tfidf = TfidfVectorizer().fit_transform([doc1, doc2]) # Vectorize the bag of words
cos_dist = linear_kernel(tfidf[0:1], tfidf[1:2]).flatten()[0] # Compute cosine distance
return cos_dist
# Load CSV from local file
lap("Loading CSV data from local file...")
df = pd.read_csv(f'data/all_victims_complete_partialKD.csv', encoding='utf-8')
df = df.drop(columns=['HighSlotISK', 'MidSlotISK', 'LowSlotISK', 'type', 'fill'])
df = df.dropna()
# Convert items column to correct data type
lap("Converting 'item' column value types...")
df['items'] = df['items'].apply(literal_eval)
# Group DataFrame by character_id and compute distance series for each group
lap("Computing cosine distances and change in kd by grouping character_id's...")
groupby = df.groupby('character_id') # group dataframe by character_id
num_groups = len(groupby) # get number of groups
count = 0 # current group number out of number of groups
groups = [] # list to append modified group dataframes to
for name, gp in groupby:
# Order the observations and prepare the dataframe
gp = (gp.sort_values(by=['killmail_id'])
.reset_index()
.drop('index', axis=1))
# Generate change in kills over change in deaths and change in kd ratio
kills1 = gp['k_count']
kills2 = gp['k_count'].shift()
deaths1 = gp['d_count']
deaths2 = gp['d_count'].shift()
idx = len(gp.columns)
gp.insert(idx, 'del_kdratio', (kills2 - kills1) / (deaths2 - deaths1))
gp.insert(idx+1, 'kd_ratio_diff', gp['kd_ratio']-gp['kd_ratio'].shift())
# Generate pairs of observations sequentially to compare
pairs = []
items1 = gp['items']
items2 = gp['items'].shift()
for i in range(1, len(gp)): # Start from 1 to avoid adding nan pair
los1 = items1.iloc[i]
los2 = items2.iloc[i]
pairs.append((los2, los1))
# Generate distance series using pairs list and different metrics
# start distance series with nan due to starting range at 1
cos_dist_lt = [np.nan] # cosine distance b/w long text BoW
cos_dist_st = [np.nan] # cosine distance b/w short text BoW
for pair in pairs:
cos_dist_lt.append(get_long_text_cosine_distance(pair[0], pair[1]))
cos_dist_st.append(get_short_text_cosine_distance(pair[0], pair[1]))
idx = len(gp.columns)
gp.insert(idx, 'cos_dist_lt', cos_dist_lt)
gp.insert(idx, 'cos_dist_st', cos_dist_st)
groups.append(gp)
# Record progress
count += 1
print(f"Progress {count/num_groups:2.1%}", end="\r")
lap("Concatenating resulting groups and writing to file...")
df_res = pd.concat(groups)
df_res.to_csv(f'data/useable_victims_distancesAndKD.csv')
lap("Exit")
| 2.765625 | 3 |
hitnet/hitnet.py | AchintyaSrivastava/HITNET-Stereo-Depth-estimation | 38 | 2447 | import tensorflow as tf
import numpy as np
import time
import cv2
from hitnet.utils_hitnet import *
drivingStereo_config = CameraConfig(0.546, 1000)
class HitNet():
def __init__(self, model_path, model_type=ModelType.eth3d, camera_config=drivingStereo_config):
self.fps = 0
self.timeLastPrediction = time.time()
self.frameCounter = 0
self.camera_config = camera_config
# Initialize model
self.model = self.initialize_model(model_path, model_type)
def __call__(self, left_img, right_img):
return self.estimate_disparity(left_img, right_img)
def initialize_model(self, model_path, model_type):
self.model_type = model_type
with tf.io.gfile.GFile(model_path, "rb") as f:
graph_def = tf.compat.v1.GraphDef()
loaded = graph_def.ParseFromString(f.read())
# Wrap frozen graph to ConcreteFunctions
if self.model_type == ModelType.flyingthings:
model = wrap_frozen_graph(graph_def=graph_def,
inputs="input:0",
outputs=["reference_output_disparity:0","secondary_output_disparity:0"])
else:
model = wrap_frozen_graph(graph_def=graph_def,
inputs="input:0",
outputs="reference_output_disparity:0")
return model
def estimate_disparity(self, left_img, right_img):
input_tensor = self.prepare_input(left_img, right_img)
# Perform inference on the image
if self.model_type == ModelType.flyingthings:
left_disparity, right_disparity = self.inference(input_tensor)
self.disparity_map = left_disparity
else:
self.disparity_map = self.inference(input_tensor)
return self.disparity_map
def get_depth(self):
return self.camera_config.f*self.camera_config.baseline/self.disparity_map
def prepare_input(self, left_img, right_img):
if (self.model_type == ModelType.eth3d):
# Shape (1, None, None, 2)
left_img = cv2.cvtColor(left_img, cv2.COLOR_BGR2GRAY)
right_img = cv2.cvtColor(right_img, cv2.COLOR_BGR2GRAY)
left_img = np.expand_dims(left_img,2)
right_img = np.expand_dims(right_img,2)
combined_img = np.concatenate((left_img, right_img), axis=-1) / 255.0
else:
# Shape (1, None, None, 6)
left_img = cv2.cvtColor(left_img, cv2.COLOR_BGR2RGB)
right_img = cv2.cvtColor(right_img, cv2.COLOR_BGR2RGB)
combined_img = np.concatenate((left_img, right_img), axis=-1) / 255.0
return tf.convert_to_tensor(np.expand_dims(combined_img, 0), dtype=tf.float32)
def inference(self, input_tensor):
output = self.model(input_tensor)
return np.squeeze(output)
| 1.765625 | 2 |
kelas_2b/echa.py | barizraihan/belajarpython | 0 | 2455 | import csv
class echa:
def werehousing(self):
with open('kelas_2b/echa.csv', 'r') as csvfile:
csv_reader = csv.reader(csvfile, delimiter=',')
for row in csv_reader:
print("menampilkan data barang:", row[0], row[1], row[2], row[3], row[4])
| 1.4375 | 1 |
weasyl/emailer.py | akash143143/weasyl | 0 | 2471 | from __future__ import absolute_import
import re
from email.mime.text import MIMEText
from smtplib import SMTP
from weasyl import define, macro
EMAIL_ADDRESS = re.compile(r"^[a-zA-Z0-9_.+-]+@[a-zA-Z0-9-]+\.[a-zA-Z0-9-.]+\Z")
def normalize_address(address):
"""
Converts an e-mail address to a consistent representation.
Returns None if the given address is not considered valid.
"""
address = address.strip()
if not EMAIL_ADDRESS.match(address):
return None
local, domain = address.split("@", 1)
return "%s@%s" % (local, domain.lower())
def send(mailto, subject, content):
"""Send an e-mail.
`mailto` must be a normalized e-mail address to send this e-mail to. The
system email will be designated as the sender.
"""
message = MIMEText(content.strip())
message["To"] = mailto
message["From"] = macro.MACRO_EMAIL_ADDRESS
message["Subject"] = subject
# smtp.sendmail() only converts CR and LF (produced by MIMEText and our templates) to CRLF in Python 3. In Python 2, we need this:
msg_crlf = re.sub(r"\r\n|[\r\n]", "\r\n", message.as_string())
smtp = SMTP(define.config_read_setting('host', "localhost", section='smtp'))
try:
smtp.sendmail(
from_addr=macro.MACRO_EMAIL_ADDRESS,
to_addrs=[mailto],
msg=msg_crlf,
)
finally:
smtp.quit()
define.metric('increment', 'emails')
| 1.945313 | 2 |
examples/test_network.py | Charles-Peeke/gwu_nn | 4 | 2479 | import numpy as np
import matplotlib.pyplot as plt
from sklearn.model_selection import train_test_split
from gwu_nn.gwu_network import GWUNetwork
from gwu_nn.layers import Dense
from gwu_nn.activation_layers import Sigmoid
np.random.seed(8)
num_obs = 8000
# Create our features to draw from two distinct 2D normal distributions
x1 = np.random.multivariate_normal([0, 0], [[1, .75],[.75, 1]], num_obs)
x2 = np.random.multivariate_normal([3, 8], [[1, .25],[.25, 1]], num_obs)
# Stack our inputs into one feature space
X = np.vstack((x1, x2))
print(X.shape)
y = np.hstack((np.zeros(num_obs), np.ones(num_obs)))
print(y.shape)
# colors = ['red'] * num_obs + ['blue'] * num_obs
# plt.figure(figsize=(12,8))
# plt.scatter(X[:, 0], X[:, 1], c = colors, alpha = 0.5)
# Lets randomly split things into training and testing sets so we don't cheat
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.33, random_state=42)
# Create our model
network = GWUNetwork()
network.add(Dense(2, 1, True, 'sigmoid'))
network.add(Sigmoid())
#network.set_loss('mse')
network.compile('log_loss', 0.001)
network.fit(X_train, y_train, epochs=100)
from scipy.special import logit
colors = ['red'] * num_obs + ['blue'] * num_obs
plt.figure(figsize=(12, 8))
plt.scatter(X[:, 0], X[:, 1], c=colors, alpha=0.5)
# Range of our X values
start_x1 = -5
end_x1 = 7
weights = network.layers[0].weights.reshape(-1).tolist()
bias = network.layers[0].bias[0][0]
start_y = (bias + start_x1 * weights[0] - logit(0.5)) / - weights[1]
end_y = (bias + end_x1 * weights[0] - logit(0.5)) / -weights[1]
plt.plot([start_x1, end_x1], [start_y, end_y], color='grey') | 2.140625 | 2 |
pgarchives/loader/load_message.py | WeilerWebServices/PostgreSQL | 0 | 2511 | #!/usr/bin/env python3
#
# load_message.py - takes a single email or mbox formatted
# file on stdin or in a file and reads it into the database.
#
import os
import sys
from optparse import OptionParser
from configparser import ConfigParser
import psycopg2
from lib.storage import ArchivesParserStorage
from lib.mbox import MailboxBreakupParser
from lib.exception import IgnorableException
from lib.log import log, opstatus
from lib.varnish import VarnishPurger
def log_failed_message(listid, srctype, src, msg, err):
try:
msgid = msg.msgid
except Exception:
msgid = "<unknown>"
log.error("Failed to load message (msgid %s) from %s, spec %s: %s" % (msgid, srctype, src, err))
# We also put the data in the db. This happens in the main transaction
# so if the whole script dies, it goes away...
conn.cursor().execute("INSERT INTO loaderrors (listid, msgid, srctype, src, err) VALUES (%(listid)s, %(msgid)s, %(srctype)s, %(src)s, %(err)s)", {
'listid': listid,
'msgid': msgid,
'srctype': srctype,
'src': src,
'err': str(str(err), 'us-ascii', 'replace'),
})
if __name__ == "__main__":
optparser = OptionParser()
optparser.add_option('-l', '--list', dest='list', help='Name of list to load message for')
optparser.add_option('-d', '--directory', dest='directory', help='Load all messages in directory')
optparser.add_option('-m', '--mbox', dest='mbox', help='Load all messages in mbox')
optparser.add_option('-i', '--interactive', dest='interactive', action='store_true', help='Prompt after each message')
optparser.add_option('-v', '--verbose', dest='verbose', action='store_true', help='Verbose output')
optparser.add_option('--force-date', dest='force_date', help='Override date (used for dates that can\'t be parsed)')
optparser.add_option('--filter-msgid', dest='filter_msgid', help='Only process message with given msgid')
(opt, args) = optparser.parse_args()
if (len(args)):
print("No bare arguments accepted")
optparser.print_usage()
sys.exit(1)
if not opt.list:
print("List must be specified")
optparser.print_usage()
sys.exit(1)
if opt.directory and opt.mbox:
print("Can't specify both directory and mbox!")
optparser.print_usage()
sys.exit(1)
if opt.force_date and (opt.directory or opt.mbox) and not opt.filter_msgid:
print("Can't use force_date with directory or mbox - only individual messages")
optparser.print_usage()
sys.exit(1)
if opt.filter_msgid and not (opt.directory or opt.mbox):
print("filter_msgid makes no sense without directory or mbox!")
optparser.print_usage()
sys.exit(1)
log.set(opt.verbose)
cfg = ConfigParser()
cfg.read('%s/archives.ini' % os.path.realpath(os.path.dirname(sys.argv[0])))
try:
connstr = cfg.get('db', 'connstr')
except Exception:
connstr = 'need_connstr'
conn = psycopg2.connect(connstr)
curs = conn.cursor()
# Take an advisory lock to force serialization.
# We could do this "properly" by reordering operations and using ON CONFLICT,
# but concurrency is not that important and this is easier...
try:
curs.execute("SET statement_timeout='30s'")
curs.execute("SELECT pg_advisory_xact_lock(8059944559669076)")
except Exception as e:
print(("Failed to wait on advisory lock: %s" % e))
sys.exit(1)
# Get the listid we're working on
curs.execute("SELECT listid FROM lists WHERE listname=%(list)s", {
'list': opt.list
})
r = curs.fetchall()
if len(r) != 1:
log.error("List %s not found" % opt.list)
conn.close()
sys.exit(1)
listid = r[0][0]
purges = set()
if opt.directory:
# Parse all files in directory
for x in os.listdir(opt.directory):
log.status("Parsing file %s" % x)
with open(os.path.join(opt.directory, x)) as f:
ap = ArchivesParserStorage()
ap.parse(f)
if opt.filter_msgid and not ap.is_msgid(opt.filter_msgid):
continue
try:
ap.analyze(date_override=opt.force_date)
except IgnorableException as e:
log_failed_message(listid, "directory", os.path.join(opt.directory, x), ap, e)
opstatus.failed += 1
continue
ap.store(conn, listid)
purges.update(ap.purges)
if opt.interactive:
print("Interactive mode, committing transaction")
conn.commit()
print("Proceed to next message with Enter, or input a period (.) to stop processing")
x = input()
if x == '.':
print("Ok, aborting!")
break
print("---------------------------------")
elif opt.mbox:
if not os.path.isfile(opt.mbox):
print("File %s does not exist" % opt.mbox)
sys.exit(1)
mboxparser = MailboxBreakupParser(opt.mbox)
while not mboxparser.EOF:
ap = ArchivesParserStorage()
msg = next(mboxparser)
if not msg:
break
ap.parse(msg)
if opt.filter_msgid and not ap.is_msgid(opt.filter_msgid):
continue
try:
ap.analyze(date_override=opt.force_date)
except IgnorableException as e:
log_failed_message(listid, "mbox", opt.mbox, ap, e)
opstatus.failed += 1
continue
ap.store(conn, listid)
purges.update(ap.purges)
if mboxparser.returncode():
log.error("Failed to parse mbox:")
log.error(mboxparser.stderr_output())
sys.exit(1)
else:
# Parse single message on stdin
ap = ArchivesParserStorage()
ap.parse(sys.stdin.buffer)
try:
ap.analyze(date_override=opt.force_date)
except IgnorableException as e:
log_failed_message(listid, "stdin", "", ap, e)
conn.close()
sys.exit(1)
ap.store(conn, listid)
purges.update(ap.purges)
if opstatus.stored:
log.log("Stored message with message-id %s" % ap.msgid)
conn.commit()
conn.close()
opstatus.print_status()
VarnishPurger(cfg).purge(purges)
| 1.726563 | 2 |
peter_sslers/web/lib/form_utils.py | aptise/peter_sslers | 35 | 2535 | # pypi
import six
# local
from ...lib import db as lib_db
from ...lib import utils
from ...model import objects as model_objects
from ...model import utils as model_utils
from . import formhandling
# ==============================================================================
def decode_args(getcreate_args):
"""
support for Python2/3
"""
if six.PY3:
for (k, v) in list(getcreate_args.items()):
if isinstance(v, bytes):
getcreate_args[k] = v.decode("utf8")
return getcreate_args
# standardized mapping for `model_utils.DomainsChallenged` to a formStash
DOMAINS_CHALLENGED_FIELDS = {
"http-01": "domain_names_http01",
"dns-01": "domain_names_dns01",
}
class AcmeAccountUploadParser(object):
"""
An AcmeAccount may be uploaded multiple ways:
* a single PEM file
* an intra-associated three file triplet from a Certbot installation
This parser operates on a validated FormEncode results object (via `pyramid_formencode_classic`)
"""
# overwritten in __init__
getcreate_args = None
formStash = None
# tracked
acme_account_provider_id = None
account_key_pem = None
le_meta_jsons = None
le_pkey_jsons = None
le_reg_jsons = None
private_key_cycle_id = None
private_key_technology_id = None
upload_type = None # pem OR letsencrypt
def __init__(self, formStash):
self.formStash = formStash
self.getcreate_args = {}
def require_new(self, require_contact=None, require_technology=True):
"""
routine for creating a NEW AcmeAccount (peter_sslers generates the credentials)
:param require_contact: ``True`` if required; ``False`` if not; ``None`` for conditional logic
:param require_technology: ``True`` if required; ``False`` if not; ``None`` for conditional logic
"""
formStash = self.formStash
acme_account_provider_id = formStash.results.get(
"acme_account_provider_id", None
)
if acme_account_provider_id is None:
# `formStash.fatal_field()` will raise `FormFieldInvalid(FormInvalid)`
formStash.fatal_field(
field="acme_account_provider_id", message="No provider submitted."
)
private_key_cycle = formStash.results.get("account__private_key_cycle", None)
if private_key_cycle is None:
# `formStash.fatal_field()` will raise `FormFieldInvalid(FormInvalid)`
formStash.fatal_field(
field="account__private_key_cycle",
message="No PrivateKey cycle submitted.",
)
private_key_cycle_id = model_utils.PrivateKeyCycle.from_string(
private_key_cycle
)
private_key_technology_id = None
private_key_technology = formStash.results.get(
"account__private_key_technology", None
)
if private_key_technology:
private_key_technology_id = model_utils.KeyTechnology.from_string(
private_key_technology
)
if not private_key_technology_id and require_technology:
# `formStash.fatal_field()` will raise `FormFieldInvalid(FormInvalid)`
formStash.fatal_field(
field="account__private_key_technology",
message="No PrivateKey technology submitted.",
)
contact = formStash.results.get("account__contact", None)
if not contact and require_contact:
# `formStash.fatal_field()` will raise `FormFieldInvalid(FormInvalid)`
formStash.fatal_field(
field="account__contact",
message="`account__contact` is required.",
)
getcreate_args = {}
self.contact = getcreate_args["contact"] = contact
self.acme_account_provider_id = getcreate_args[
"acme_account_provider_id"
] = acme_account_provider_id
self.private_key_cycle_id = getcreate_args[
"private_key_cycle_id"
] = private_key_cycle_id
self.private_key_technology_id = getcreate_args[
"private_key_technology_id"
] = private_key_technology_id
self.getcreate_args = decode_args(getcreate_args)
def require_upload(self, require_contact=None, require_technology=None):
"""
routine for uploading an exiting AcmeAccount+AcmeAccountKey
:param require_contact: ``True`` if required; ``False`` if not; ``None`` for conditional logic
:param require_technology: ``True`` if required; ``False`` if not; ``None`` for conditional logic
"""
formStash = self.formStash
# -------------------
# do a quick parse...
requirements_either_or = (
(
"account_key_file_pem",
# "acme_account_provider_id",
),
(
"account_key_file_le_meta",
"account_key_file_le_pkey",
"account_key_file_le_reg",
),
)
failures = []
passes = []
for idx, option_set in enumerate(requirements_either_or):
option_set_results = [
True if formStash.results[option_set_item] is not None else False
for option_set_item in option_set
]
# if we have any item, we need all of them
if any(option_set_results):
if not all(option_set_results):
failures.append(
"If any of %s is provided, all must be provided."
% str(option_set)
)
else:
passes.append(idx)
if (len(passes) != 1) or failures:
# `formStash.fatal_form()` will raise `FormInvalid()`
formStash.fatal_form(
"You must upload `account_key_file_pem` or all of (`account_key_file_le_meta`, `account_key_file_le_pkey`, `account_key_file_le_reg`)."
)
# -------------------
# validate the provider option
# will be None unless a pem is uploaded
# required for PEM, ignored otherwise
acme_account_provider_id = formStash.results.get(
"acme_account_provider_id", None
)
private_key_cycle = formStash.results.get("account__private_key_cycle", None)
if private_key_cycle is None:
# `formStash.fatal_field()` will raise `FormFieldInvalid(FormInvalid)`
formStash.fatal_field(
field="account__private_key_cycle",
message="No PrivateKey cycle submitted.",
)
private_key_cycle_id = model_utils.PrivateKeyCycle.from_string(
private_key_cycle
)
private_key_technology_id = None
private_key_technology = formStash.results.get(
"account__private_key_technology", None
)
if private_key_technology is not None:
private_key_technology_id = model_utils.KeyTechnology.from_string(
private_key_technology
)
if not private_key_technology_id and require_technology:
# `formStash.fatal_field()` will raise `FormFieldInvalid(FormInvalid)`
formStash.fatal_field(
field="account__private_key_technology",
message="No PrivateKey technology submitted.",
)
# require `contact` when uploading a PEM file
if formStash.results["account_key_file_pem"] is not None:
require_contact = True
contact = formStash.results.get("account__contact")
if not contact and require_contact:
# `formStash.fatal_field()` will raise `FormFieldInvalid(FormInvalid)`
formStash.fatal_field(
field="account__contact",
message="`account__contact` is required.",
)
getcreate_args = {}
self.contact = getcreate_args["contact"] = contact
self.private_key_cycle_id = getcreate_args[
"private_key_cycle_id"
] = private_key_cycle_id
self.private_key_technology_id = getcreate_args[
"private_key_technology_id"
] = private_key_technology_id
if formStash.results["account_key_file_pem"] is not None:
if acme_account_provider_id is None:
# `formStash.fatal_field()` will raise `FormFieldInvalid(FormInvalid)`
formStash.fatal_field(
field="acme_account_provider_id", message="No provider submitted."
)
self.upload_type = "pem"
self.acme_account_provider_id = getcreate_args[
"acme_account_provider_id"
] = acme_account_provider_id
self.account_key_pem = getcreate_args[
"key_pem"
] = formhandling.slurp_file_field(formStash, "account_key_file_pem")
else:
# note that we use `jsonS` to indicate a string
self.le_meta_jsons = getcreate_args[
"le_meta_jsons"
] = formhandling.slurp_file_field(formStash, "account_key_file_le_meta")
self.le_pkey_jsons = getcreate_args[
"le_pkey_jsons"
] = formhandling.slurp_file_field(formStash, "account_key_file_le_pkey")
self.le_reg_jsons = getcreate_args[
"le_reg_jsons"
] = formhandling.slurp_file_field(formStash, "account_key_file_le_reg")
self.getcreate_args = decode_args(getcreate_args)
class _PrivateKeyUploadParser(object):
"""
A PrivateKey is not a complex upload to parse itself
This code exists to mimic the AcmeAccount uploading.
"""
# overwritten in __init__
getcreate_args = None
formStash = None
# tracked
private_key_pem = None
upload_type = None # pem
def __init__(self, formStash):
self.formStash = formStash
self.getcreate_args = {}
def require_upload(self):
"""
routine for uploading an exiting PrivateKey
"""
formStash = self.formStash
getcreate_args = {}
if formStash.results["private_key_file_pem"] is not None:
self.upload_type = "pem"
self.private_key_pem = getcreate_args[
"key_pem"
] = formhandling.slurp_file_field(formStash, "private_key_file_pem")
self.getcreate_args = decode_args(getcreate_args)
class _AcmeAccountSelection(object):
"""
Class used to manage an uploaded AcmeAccount
"""
selection = None
upload_parsed = None # instance of AcmeAccountUploadParser or None
AcmeAccount = None
class _PrivateKeySelection(object):
selection = None
upload_parsed = None # instance of AcmeAccountUploadParser or None
private_key_strategy__requested = None
PrivateKey = None
@property
def private_key_strategy_id__requested(self):
return model_utils.PrivateKeyStrategy.from_string(
self.private_key_strategy__requested
)
def parse_AcmeAccountSelection(
request,
formStash,
account_key_option=None,
allow_none=None,
require_contact=None,
):
"""
:param formStash: an instance of `pyramid_formencode_classic.FormStash`
:param account_key_option:
:param allow_none:
:param require_contact: ``True`` if required; ``False`` if not; ``None`` for conditional logic
"""
account_key_pem = None
account_key_pem_md5 = None
dbAcmeAccount = None
is_global_default = None
# handle the explicit-option
acmeAccountSelection = _AcmeAccountSelection()
if account_key_option == "account_key_file":
# this will handle form validation and raise errors.
parser = AcmeAccountUploadParser(formStash)
# this will have: `contact`, `private_key_cycle`, `private_key_technology`
parser.require_upload(require_contact=require_contact)
# update our object
acmeAccountSelection.selection = "upload"
acmeAccountSelection.upload_parsed = parser
return acmeAccountSelection
else:
if account_key_option == "account_key_global_default":
acmeAccountSelection.selection = "global_default"
account_key_pem_md5 = formStash.results["account_key_global_default"]
is_global_default = True
elif account_key_option == "account_key_existing":
acmeAccountSelection.selection = "existing"
account_key_pem_md5 = formStash.results["account_key_existing"]
elif account_key_option == "account_key_reuse":
acmeAccountSelection.selection = "reuse"
account_key_pem_md5 = formStash.results["account_key_reuse"]
elif account_key_option == "none":
if not allow_none:
# `formStash.fatal_form()` will raise `FormInvalid()`
formStash.fatal_form(
"This form does not support no AcmeAccount selection."
)
# note the lowercase "none"; this is an explicit "no item" selection
# only certain routes allow this
acmeAccountSelection.selection = "none"
account_key_pem_md5 = None
return acmeAccountSelection
else:
formStash.fatal_form(
message="Invalid `account_key_option`",
)
if not account_key_pem_md5:
# `formStash.fatal_field()` will raise `FormFieldInvalid(FormInvalid)`
formStash.fatal_field(
field=account_key_option, message="You did not provide a value"
)
dbAcmeAccount = lib_db.get.get__AcmeAccount__by_pemMd5(
request.api_context, account_key_pem_md5, is_active=True
)
if not dbAcmeAccount:
# `formStash.fatal_field()` will raise `FormFieldInvalid(FormInvalid)`
formStash.fatal_field(
field=account_key_option,
message="The selected AcmeAccount is not enrolled in the system.",
)
if is_global_default and not dbAcmeAccount.is_global_default:
# `formStash.fatal_field()` will raise `FormFieldInvalid(FormInvalid)`
formStash.fatal_field(
field=account_key_option,
message="The selected AcmeAccount is not the current default.",
)
acmeAccountSelection.AcmeAccount = dbAcmeAccount
return acmeAccountSelection
# `formStash.fatal_form()` will raise `FormInvalid()`
formStash.fatal_form("There was an error validating your form.")
def parse_PrivateKeySelection(request, formStash, private_key_option=None):
private_key_pem = None
private_key_pem_md5 = None
PrivateKey = None # :class:`model.objects.PrivateKey`
# handle the explicit-option
privateKeySelection = _PrivateKeySelection()
if private_key_option == "private_key_file":
# this will handle form validation and raise errors.
parser = _PrivateKeyUploadParser(formStash)
parser.require_upload()
# update our object
privateKeySelection.selection = "upload"
privateKeySelection.upload_parsed = parser
privateKeySelection.private_key_strategy__requested = (
model_utils.PrivateKeySelection_2_PrivateKeyStrategy["upload"]
)
return privateKeySelection
else:
if private_key_option == "private_key_existing":
privateKeySelection.selection = "existing"
privateKeySelection.private_key_strategy__requested = (
model_utils.PrivateKeySelection_2_PrivateKeyStrategy["existing"]
)
private_key_pem_md5 = formStash.results["private_key_existing"]
elif private_key_option == "private_key_reuse":
privateKeySelection.selection = "reuse"
privateKeySelection.private_key_strategy__requested = (
model_utils.PrivateKeySelection_2_PrivateKeyStrategy["reuse"]
)
private_key_pem_md5 = formStash.results["private_key_reuse"]
elif private_key_option in (
"private_key_generate",
"private_key_for_account_key",
):
dbPrivateKey = lib_db.get.get__PrivateKey__by_id(request.api_context, 0)
if not dbPrivateKey:
formStash.fatal_field(
field=private_key_option,
message="Could not load the placeholder PrivateKey.",
)
privateKeySelection.PrivateKey = dbPrivateKey
if private_key_option == "private_key_generate":
privateKeySelection.selection = "generate"
privateKeySelection.private_key_strategy__requested = (
model_utils.PrivateKeySelection_2_PrivateKeyStrategy["generate"]
)
elif private_key_option == "private_key_for_account_key":
privateKeySelection.selection = "private_key_for_account_key"
privateKeySelection.private_key_strategy__requested = (
model_utils.PrivateKeySelection_2_PrivateKeyStrategy[
"private_key_for_account_key"
]
)
return privateKeySelection
else:
# `formStash.fatal_form()` will raise `FormInvalid()`
formStash.fatal_form("Invalid `private_key_option`")
if not private_key_pem_md5:
# `formStash.fatal_field()` will raise `FormFieldInvalid(FormInvalid)`
formStash.fatal_field(
field=private_key_option, message="You did not provide a value"
)
dbPrivateKey = lib_db.get.get__PrivateKey__by_pemMd5(
request.api_context, private_key_pem_md5, is_active=True
)
if not dbPrivateKey:
# `formStash.fatal_field()` will raise `FormFieldInvalid(FormInvalid)`
formStash.fatal_field(
field=private_key_option,
message="The selected PrivateKey is not enrolled in the system.",
)
privateKeySelection.PrivateKey = dbPrivateKey
return privateKeySelection
# `formStash.fatal_form()` will raise `FormInvalid()`
formStash.fatal_form("There was an error validating your form.")
def form_key_selection(request, formStash, require_contact=None):
"""
:param formStash: an instance of `pyramid_formencode_classic.FormStash`
:param require_contact: ``True`` if required; ``False`` if not; ``None`` for conditional logic
"""
acmeAccountSelection = parse_AcmeAccountSelection(
request,
formStash,
account_key_option=formStash.results["account_key_option"],
require_contact=require_contact,
)
if acmeAccountSelection.selection == "upload":
key_create_args = acmeAccountSelection.upload_parsed.getcreate_args
key_create_args["event_type"] = "AcmeAccount__insert"
key_create_args[
"acme_account_key_source_id"
] = model_utils.AcmeAccountKeySource.from_string("imported")
(dbAcmeAccount, _is_created,) = lib_db.getcreate.getcreate__AcmeAccount(
request.api_context, **key_create_args
)
acmeAccountSelection.AcmeAccount = dbAcmeAccount
privateKeySelection = parse_PrivateKeySelection(
request,
formStash,
private_key_option=formStash.results["private_key_option"],
)
if privateKeySelection.selection == "upload":
key_create_args = privateKeySelection.upload_parsed.getcreate_args
key_create_args["event_type"] = "PrivateKey__insert"
key_create_args[
"private_key_source_id"
] = model_utils.PrivateKeySource.from_string("imported")
key_create_args["private_key_type_id"] = model_utils.PrivateKeyType.from_string(
"standard"
)
(
dbPrivateKey,
_is_created,
) = lib_db.getcreate.getcreate__PrivateKey__by_pem_text(
request.api_context, **key_create_args
)
privateKeySelection.PrivateKey = dbPrivateKey
elif privateKeySelection.selection == "generate":
dbPrivateKey = lib_db.get.get__PrivateKey__by_id(request.api_context, 0)
if not dbPrivateKey:
formStash.fatal_field(
field="private_key_option",
message="Could not load the placeholder PrivateKey for autogeneration.",
)
privateKeySelection.PrivateKey = dbPrivateKey
return (acmeAccountSelection, privateKeySelection)
def form_domains_challenge_typed(request, formStash, http01_only=False):
domains_challenged = model_utils.DomainsChallenged()
domain_names_all = []
try:
# 1: iterate over the submitted domains by segment
for (target_, source_) in DOMAINS_CHALLENGED_FIELDS.items():
submitted_ = formStash.results.get(source_)
if submitted_:
# this function checks the domain names match a simple regex
# it will raise a `ValueError("invalid domain")` on the first invalid domain
submitted_ = utils.domains_from_string(submitted_)
if submitted_:
domain_names_all.extend(submitted_)
domains_challenged[target_] = submitted_
# 2: ensure there are domains
if not domain_names_all:
# `formStash.fatal_field()` will raise `FormFieldInvalid(FormInvalid)`
formStash.fatal_field(
field="Error_Main",
message="no domain names submitted",
)
# 3: ensure there is no overlap
domain_names_all_set = set(domain_names_all)
if len(domain_names_all) != len(domain_names_all_set):
# `formStash.fatal_field()` will raise `FormFieldInvalid(FormInvalid)`
formStash.fatal_field(
field="Error_Main",
message="a domain name can only be associated to one challenge type",
)
# 4: maybe we only want http01 domains submitted?
if http01_only:
for (k, v) in domains_challenged.items():
if k == "http-01":
continue
if v:
# `formStash.fatal_field()` will raise `FormFieldInvalid(FormInvalid)`
formStash.fatal_field(
field="Error_Main",
message="only http-01 domains are accepted by this form",
)
except ValueError as exc:
# `formStash.fatal_field()` will raise `FormFieldInvalid(FormInvalid)`
formStash.fatal_field(
field="Error_Main", message="invalid domain names detected"
)
return domains_challenged
def form_single_domain_challenge_typed(request, formStash, challenge_type="http-01"):
domains_challenged = model_utils.DomainsChallenged()
# this function checks the domain names match a simple regex
domain_names = utils.domains_from_string(formStash.results["domain_name"])
if not domain_names:
# `formStash.fatal_field()` will raise `FormFieldInvalid(FormInvalid)`
formStash.fatal_field(field="domain_name", message="Found no domain names")
if len(domain_names) != 1:
# `formStash.fatal_field()` will raise `FormFieldInvalid(FormInvalid)`
formStash.fatal_field(
field="domain_name",
message="This endpoint currently supports only 1 domain name",
)
domains_challenged[challenge_type] = domain_names
return domains_challenged
| 1.578125 | 2 |
tests/zone_api_test/core/zone_manager_test.py | yfaway/zone-apis | 1 | 2551 | from zone_api.core.zone_manager import ZoneManager
from zone_api import platform_encapsulator as pe
from zone_api.core.zone import Zone
from zone_api.core.zone_event import ZoneEvent
from zone_api.core.devices.dimmer import Dimmer
from zone_api.core.devices.switch import Fan, Light, Switch
from zone_api.core.devices.illuminance_sensor import IlluminanceSensor
from zone_api.core.devices.motion_sensor import MotionSensor
from zone_api.core.actions.turn_on_switch import TurnOnSwitch
from zone_api_test.core.device_test import DeviceTest
ILLUMINANCE_THRESHOLD_IN_LUX = 8
INVALID_ITEM_NAME = 'invalid item name'
class ZoneManagerTest(DeviceTest):
""" Unit tests for zone_manager.py. """
def setUp(self):
items = [pe.create_switch_item('TestLightName'),
pe.create_switch_item('TestMotionSensorName'),
pe.create_number_item('IlluminanceSensorName'),
pe.create_string_item('AstroSensorName'),
pe.create_dimmer_item('TestDimmerName'),
pe.create_switch_item('TestFanName'),
]
self.set_items(items)
super(ZoneManagerTest, self).setUp()
[self.lightItem, self.motionSensorItem,
self.illuminanceSensorItem, self.astroSensorItem, self.dimmerItem,
self.fanItem] = items
self.illuminanceSensor = IlluminanceSensor(self.illuminanceSensorItem)
self.light = Light(self.lightItem, 2,
ILLUMINANCE_THRESHOLD_IN_LUX)
self.motionSensor = MotionSensor(self.motionSensorItem)
self.dimmer = Dimmer(self.dimmerItem, 2, 100, "0-23:59")
self.fan = Fan(self.fanItem, 2)
self.zm = ZoneManager()
def tearDown(self):
self.zm.stop_auto_report_watch_dog()
self.fan._cancel_timer()
self.dimmer._cancel_timer()
self.light._cancel_timer()
super(ZoneManagerTest, self).tearDown()
def testAddZone_validZone_zoneAdded(self):
zone1 = Zone('ff')
self.zm.add_zone(zone1)
self.assertEqual(1, len(self.zm.get_zones()))
zone2 = Zone('2f')
self.zm.add_zone(zone2)
self.assertEqual(2, len(self.zm.get_zones()))
def testGetZoneById_validZoneId_returnValidZone(self):
zone1 = Zone('ff')
self.zm.add_zone(zone1)
zone2 = Zone('2f')
self.zm.add_zone(zone2)
self.assertEqual(zone1.get_name(),
self.zm.get_zone_by_id(zone1.get_id()).get_name())
self.assertEqual(zone2.get_name(),
self.zm.get_zone_by_id(zone2.get_id()).get_name())
def testGetZoneById_invalidZoneId_returnNone(self):
self.assertTrue(self.zm.get_zone_by_id('invalid zone id') is None)
def testRemoveZone_validZone_zoneRemoved(self):
zone1 = Zone('ff')
self.zm.add_zone(zone1)
zone2 = Zone('2f')
self.zm.add_zone(zone2)
self.assertEqual(2, len(self.zm.get_zones()))
self.zm.remove_zone(zone1)
self.assertEqual(1, len(self.zm.get_zones()))
self.zm.remove_zone(zone2)
self.assertEqual(0, len(self.zm.get_zones()))
def testContainingZone_validDevice_returnsCorrectZone(self):
zone1 = Zone('ff').add_device(self.light)
zone2 = Zone('sf').add_device(self.fan)
self.zm.add_zone(zone1)
self.zm.add_zone(zone2)
self.assertEqual(zone1,
self.zm.get_immutable_instance().get_containing_zone(self.light))
self.assertEqual(zone2,
self.zm.get_immutable_instance().get_containing_zone(self.fan))
def testContainingZone_invalidDevice_returnsNone(self):
zone1 = Zone('ff').add_device(self.light)
self.zm.add_zone(zone1)
self.assertEqual(None,
self.zm.get_immutable_instance().get_containing_zone(self.fan))
def testGetDevicesByType_variousScenarios_returnsCorrectList(self):
zone1 = Zone('ff').add_device(self.light)
zone2 = Zone('sf').add_device(self.fan)
self.zm.add_zone(zone1)
self.zm.add_zone(zone2)
self.assertEqual(2, len(self.zm.get_zones()))
self.assertEqual(1, len(self.zm.get_devices_by_type(Light)))
self.assertEqual(2, len(self.zm.get_devices_by_type(Switch)))
self.assertEqual(0, len(self.zm.get_devices_by_type(Dimmer)))
def testOnMotionSensorTurnedOn_noZone_returnsFalse(self):
self.assertFalse(self.zm.get_immutable_instance().dispatch_event(
ZoneEvent.MOTION, pe.get_event_dispatcher(), self.motionSensor, pe.create_string_item(INVALID_ITEM_NAME)))
def testOnMotionSensorTurnedOn_withNonApplicableZone_returnsFalse(self):
zone = Zone('ff', [self.light, self.motionSensor])
self.zm.add_zone(zone)
self.assertFalse(self.zm.get_immutable_instance().dispatch_event(
ZoneEvent.MOTION, pe.get_event_dispatcher(), self.motionSensor, pe.create_string_item(INVALID_ITEM_NAME)))
def testOnMotionSensorTurnedOn_withApplicableZone_returnsTrue(self):
self.assertFalse(self.light.is_on())
pe.set_number_value(self.illuminanceSensorItem, ILLUMINANCE_THRESHOLD_IN_LUX - 1)
zone = Zone('ff', [self.light, self.motionSensor, self.illuminanceSensor])
zone = zone.add_action(TurnOnSwitch())
self.zm.add_zone(zone)
self.assertTrue(self.zm.get_immutable_instance().dispatch_event(
ZoneEvent.MOTION, pe.get_event_dispatcher(), self.motionSensor, self.motionSensor.get_item()))
def testOnSwitchTurnedOn_noZone_returnsFalse(self):
self.assertFalse(self.zm.get_immutable_instance().on_switch_turned_on(
pe.get_event_dispatcher(), self.light, pe.create_string_item(INVALID_ITEM_NAME)))
def testOnSwitchTurnedOn_withNonApplicableZone_returnsFalse(self):
zone = Zone('ff', [self.light, self.motionSensor])
self.zm.add_zone(zone)
self.assertFalse(self.zm.get_immutable_instance().on_switch_turned_on(
pe.get_event_dispatcher(), self.light, pe.create_string_item(INVALID_ITEM_NAME)))
def testOnSwitchTurnedOn_withApplicableZone_returnsTrue(self):
zone = Zone('ff', [self.light, self.motionSensor])
self.zm.add_zone(zone)
self.assertTrue(self.zm.get_immutable_instance().on_switch_turned_on(
pe.get_event_dispatcher(), self.light, self.light.get_item()))
def testOnSwitchTurnedOff_noZone_returnsFalse(self):
self.assertFalse(self.zm.get_immutable_instance().on_switch_turned_off(
pe.get_event_dispatcher(), self.light, pe.create_string_item(INVALID_ITEM_NAME)))
def testOnSwitchTurnedOff_withNonApplicableZone_returnsFalse(self):
zone = Zone('ff', [self.light, self.motionSensor])
self.zm.add_zone(zone)
self.assertFalse(self.zm.get_immutable_instance().on_switch_turned_off(
pe.get_event_dispatcher(), self.light, pe.create_string_item(INVALID_ITEM_NAME)))
def testOnSwitchTurnedOff_withApplicableZone_returnsTrue(self):
zone = Zone('ff', [self.light, self.motionSensor])
self.zm.add_zone(zone)
self.assertTrue(self.zm.get_immutable_instance().on_switch_turned_off(
pe.get_event_dispatcher(), self.light, self.light.get_item()))
| 1.625 | 2 |
microbitAnim.py | SaitoYutaka/microbitAnim | 0 | 2559 | # -*- coding: utf-8 -*-
###########################################################################
## Python code generated with wxFormBuilder (version Aug 8 2018)
## http://www.wxformbuilder.org/
##
## PLEASE DO *NOT* EDIT THIS FILE!
###########################################################################
import wx
import wx.xrc
###########################################################################
## Class MyFrame1
###########################################################################
class MyFrame1 ( wx.Frame ):
def __init__( self, parent ):
wx.Frame.__init__ ( self, parent, id = wx.ID_ANY, title = wx.EmptyString, pos = wx.Point( 0,0 ), size = wx.Size( 767,507 ), style = wx.DEFAULT_FRAME_STYLE|wx.TAB_TRAVERSAL )
self.SetSizeHints( wx.DefaultSize, wx.DefaultSize )
gbSizer1 = wx.GridBagSizer( 0, 0 )
gbSizer1.SetFlexibleDirection( wx.BOTH )
gbSizer1.SetNonFlexibleGrowMode( wx.FLEX_GROWMODE_SPECIFIED )
self.m_button00 = wx.Button( self, wx.ID_ANY, wx.EmptyString, wx.DefaultPosition, wx.Size( 50,50 ), 0 )
self.m_button00.SetBackgroundColour( wx.Colour( 255, 0, 0 ) )
gbSizer1.Add( self.m_button00, wx.GBPosition( 0, 0 ), wx.GBSpan( 1, 1 ), wx.ALL, 5 )
self.m_button01 = wx.Button( self, wx.ID_ANY, wx.EmptyString, wx.DefaultPosition, wx.Size( 50,50 ), 0 )
self.m_button01.SetBackgroundColour( wx.Colour( 255, 0, 0 ) )
gbSizer1.Add( self.m_button01, wx.GBPosition( 0, 1 ), wx.GBSpan( 1, 1 ), wx.ALL, 5 )
self.m_button02 = wx.Button( self, wx.ID_ANY, wx.EmptyString, wx.DefaultPosition, wx.Size( 50,50 ), 0 )
self.m_button02.SetBackgroundColour( wx.Colour( 255, 0, 0 ) )
gbSizer1.Add( self.m_button02, wx.GBPosition( 0, 2 ), wx.GBSpan( 1, 1 ), wx.ALL, 5 )
self.m_button03 = wx.Button( self, wx.ID_ANY, wx.EmptyString, wx.DefaultPosition, wx.Size( 50,50 ), 0 )
self.m_button03.SetBackgroundColour( wx.Colour( 255, 0, 0 ) )
gbSizer1.Add( self.m_button03, wx.GBPosition( 0, 3 ), wx.GBSpan( 1, 1 ), wx.ALL, 5 )
self.m_button04 = wx.Button( self, wx.ID_ANY, wx.EmptyString, wx.DefaultPosition, wx.Size( 50,50 ), 0 )
self.m_button04.SetBackgroundColour( wx.Colour( 255, 0, 0 ) )
gbSizer1.Add( self.m_button04, wx.GBPosition( 0, 4 ), wx.GBSpan( 1, 1 ), wx.ALL, 5 )
self.m_button10 = wx.Button( self, wx.ID_ANY, wx.EmptyString, wx.DefaultPosition, wx.Size( 50,50 ), 0 )
self.m_button10.SetBackgroundColour( wx.Colour( 255, 0, 0 ) )
gbSizer1.Add( self.m_button10, wx.GBPosition( 1, 0 ), wx.GBSpan( 1, 1 ), wx.ALL, 5 )
self.m_button11 = wx.Button( self, wx.ID_ANY, wx.EmptyString, wx.DefaultPosition, wx.Size( 50,50 ), 0 )
self.m_button11.SetBackgroundColour( wx.Colour( 255, 0, 0 ) )
gbSizer1.Add( self.m_button11, wx.GBPosition( 1, 1 ), wx.GBSpan( 1, 1 ), wx.ALL, 5 )
self.m_button12 = wx.Button( self, wx.ID_ANY, wx.EmptyString, wx.DefaultPosition, wx.Size( 50,50 ), 0 )
self.m_button12.SetBackgroundColour( wx.Colour( 255, 0, 0 ) )
gbSizer1.Add( self.m_button12, wx.GBPosition( 1, 2 ), wx.GBSpan( 1, 1 ), wx.ALL, 5 )
self.m_button13 = wx.Button( self, wx.ID_ANY, wx.EmptyString, wx.DefaultPosition, wx.Size( 50,50 ), 0 )
self.m_button13.SetBackgroundColour( wx.Colour( 255, 0, 0 ) )
gbSizer1.Add( self.m_button13, wx.GBPosition( 1, 3 ), wx.GBSpan( 1, 1 ), wx.ALL, 5 )
self.m_button14 = wx.Button( self, wx.ID_ANY, wx.EmptyString, wx.DefaultPosition, wx.Size( 50,50 ), 0 )
self.m_button14.SetBackgroundColour( wx.Colour( 255, 0, 0 ) )
gbSizer1.Add( self.m_button14, wx.GBPosition( 1, 4 ), wx.GBSpan( 1, 1 ), wx.ALL, 5 )
self.m_button20 = wx.Button( self, wx.ID_ANY, wx.EmptyString, wx.DefaultPosition, wx.Size( 50,50 ), 0 )
self.m_button20.SetBackgroundColour( wx.Colour( 255, 0, 0 ) )
gbSizer1.Add( self.m_button20, wx.GBPosition( 2, 0 ), wx.GBSpan( 1, 1 ), wx.ALL, 5 )
self.m_button21 = wx.Button( self, wx.ID_ANY, wx.EmptyString, wx.DefaultPosition, wx.Size( 50,50 ), 0 )
self.m_button21.SetBackgroundColour( wx.Colour( 255, 0, 0 ) )
gbSizer1.Add( self.m_button21, wx.GBPosition( 2, 1 ), wx.GBSpan( 1, 1 ), wx.ALL, 5 )
self.m_button22 = wx.Button( self, wx.ID_ANY, wx.EmptyString, wx.DefaultPosition, wx.Size( 50,50 ), 0 )
self.m_button22.SetBackgroundColour( wx.Colour( 255, 0, 0 ) )
gbSizer1.Add( self.m_button22, wx.GBPosition( 2, 2 ), wx.GBSpan( 1, 1 ), wx.ALL, 5 )
self.m_button23 = wx.Button( self, wx.ID_ANY, wx.EmptyString, wx.DefaultPosition, wx.Size( 50,50 ), 0 )
self.m_button23.SetBackgroundColour( wx.Colour( 255, 0, 0 ) )
gbSizer1.Add( self.m_button23, wx.GBPosition( 2, 3 ), wx.GBSpan( 1, 1 ), wx.ALL, 5 )
self.m_button24 = wx.Button( self, wx.ID_ANY, wx.EmptyString, wx.DefaultPosition, wx.Size( 50,50 ), 0 )
self.m_button24.SetBackgroundColour( wx.Colour( 255, 0, 0 ) )
gbSizer1.Add( self.m_button24, wx.GBPosition( 2, 4 ), wx.GBSpan( 1, 1 ), wx.ALL, 5 )
self.m_button30 = wx.Button( self, wx.ID_ANY, wx.EmptyString, wx.DefaultPosition, wx.Size( 50,50 ), 0 )
self.m_button30.SetBackgroundColour( wx.Colour( 255, 0, 0 ) )
gbSizer1.Add( self.m_button30, wx.GBPosition( 3, 0 ), wx.GBSpan( 1, 1 ), wx.ALL, 5 )
self.m_button31 = wx.Button( self, wx.ID_ANY, wx.EmptyString, wx.DefaultPosition, wx.Size( 50,50 ), 0 )
self.m_button31.SetBackgroundColour( wx.Colour( 255, 0, 0 ) )
gbSizer1.Add( self.m_button31, wx.GBPosition( 3, 1 ), wx.GBSpan( 1, 1 ), wx.ALL, 5 )
self.m_button32 = wx.Button( self, wx.ID_ANY, wx.EmptyString, wx.DefaultPosition, wx.Size( 50,50 ), 0 )
self.m_button32.SetBackgroundColour( wx.Colour( 255, 0, 0 ) )
gbSizer1.Add( self.m_button32, wx.GBPosition( 3, 2 ), wx.GBSpan( 1, 1 ), wx.ALL, 5 )
self.m_button33 = wx.Button( self, wx.ID_ANY, wx.EmptyString, wx.DefaultPosition, wx.Size( 50,50 ), 0 )
self.m_button33.SetBackgroundColour( wx.Colour( 255, 0, 0 ) )
gbSizer1.Add( self.m_button33, wx.GBPosition( 3, 3 ), wx.GBSpan( 1, 1 ), wx.ALL, 5 )
self.m_button34 = wx.Button( self, wx.ID_ANY, wx.EmptyString, wx.DefaultPosition, wx.Size( 50,50 ), 0 )
self.m_button34.SetBackgroundColour( wx.Colour( 255, 0, 0 ) )
gbSizer1.Add( self.m_button34, wx.GBPosition( 3, 4 ), wx.GBSpan( 1, 1 ), wx.ALL, 5 )
self.m_button40 = wx.Button( self, wx.ID_ANY, wx.EmptyString, wx.DefaultPosition, wx.Size( 50,50 ), 0 )
self.m_button40.SetBackgroundColour( wx.Colour( 255, 0, 0 ) )
gbSizer1.Add( self.m_button40, wx.GBPosition( 4, 0 ), wx.GBSpan( 1, 1 ), wx.ALL, 5 )
self.m_button41 = wx.Button( self, wx.ID_ANY, wx.EmptyString, wx.DefaultPosition, wx.Size( 50,50 ), 0 )
self.m_button41.SetBackgroundColour( wx.Colour( 255, 0, 0 ) )
gbSizer1.Add( self.m_button41, wx.GBPosition( 4, 1 ), wx.GBSpan( 1, 1 ), wx.ALL, 5 )
self.m_button42 = wx.Button( self, wx.ID_ANY, wx.EmptyString, wx.DefaultPosition, wx.Size( 50,50 ), 0 )
self.m_button42.SetBackgroundColour( wx.Colour( 255, 0, 0 ) )
gbSizer1.Add( self.m_button42, wx.GBPosition( 4, 2 ), wx.GBSpan( 1, 1 ), wx.ALL, 5 )
self.m_button43 = wx.Button( self, wx.ID_ANY, wx.EmptyString, wx.DefaultPosition, wx.Size( 50,50 ), 0 )
self.m_button43.SetBackgroundColour( wx.Colour( 255, 0, 0 ) )
gbSizer1.Add( self.m_button43, wx.GBPosition( 4, 3 ), wx.GBSpan( 1, 1 ), wx.ALL, 5 )
self.m_button44 = wx.Button( self, wx.ID_ANY, wx.EmptyString, wx.DefaultPosition, wx.Size( 50,50 ), 0 )
self.m_button44.SetBackgroundColour( wx.Colour( 255, 0, 0 ) )
gbSizer1.Add( self.m_button44, wx.GBPosition( 4, 4 ), wx.GBSpan( 1, 1 ), wx.ALL, 5 )
self.SetSizer( gbSizer1 )
self.Layout()
self.m_menubar1 = wx.MenuBar( 0 )
self.m_menu1 = wx.Menu()
self.m_menuItem3 = wx.MenuItem( self.m_menu1, wx.ID_ANY, u"Open", wx.EmptyString, wx.ITEM_NORMAL )
self.m_menu1.Append( self.m_menuItem3 )
self.m_menuItem1 = wx.MenuItem( self.m_menu1, wx.ID_ANY, u"Save", wx.EmptyString, wx.ITEM_NORMAL )
self.m_menu1.Append( self.m_menuItem1 )
self.m_menuItem2 = wx.MenuItem( self.m_menu1, wx.ID_ANY, u"quit", wx.EmptyString, wx.ITEM_NORMAL )
self.m_menu1.Append( self.m_menuItem2 )
self.m_menubar1.Append( self.m_menu1, u"File" )
self.m_menu2 = wx.Menu()
self.m_menuItem4 = wx.MenuItem( self.m_menu2, wx.ID_ANY, u"python", wx.EmptyString, wx.ITEM_NORMAL )
self.m_menu2.Append( self.m_menuItem4 )
self.m_menubar1.Append( self.m_menu2, u"export" )
self.SetMenuBar( self.m_menubar1 )
self.Centre( wx.BOTH )
# Connect Events
self.m_button00.Bind( wx.EVT_BUTTON, self.onButton00Click )
self.m_button01.Bind( wx.EVT_BUTTON, self.onButton01Click )
self.m_button02.Bind( wx.EVT_BUTTON, self.onButton02Click )
self.m_button03.Bind( wx.EVT_BUTTON, self.onButton03Click )
self.m_button04.Bind( wx.EVT_BUTTON, self.onButton04Click )
self.m_button10.Bind( wx.EVT_BUTTON, self.onButton10Click )
self.m_button11.Bind( wx.EVT_BUTTON, self.onButton11Click )
self.m_button12.Bind( wx.EVT_BUTTON, self.onButton12Click )
self.m_button13.Bind( wx.EVT_BUTTON, self.onButton13Click )
self.m_button14.Bind( wx.EVT_BUTTON, self.onButton14Click )
self.m_button20.Bind( wx.EVT_BUTTON, self.onButton20Click )
self.m_button21.Bind( wx.EVT_BUTTON, self.onButton21Click )
self.m_button22.Bind( wx.EVT_BUTTON, self.onButton22Click )
self.m_button23.Bind( wx.EVT_BUTTON, self.onButton23Click )
self.m_button24.Bind( wx.EVT_BUTTON, self.onButton24Click )
self.m_button30.Bind( wx.EVT_BUTTON, self.onButton30Click )
self.m_button31.Bind( wx.EVT_BUTTON, self.onButton31Click )
self.m_button32.Bind( wx.EVT_BUTTON, self.onButton32Click )
self.m_button33.Bind( wx.EVT_BUTTON, self.onButton33Click )
self.m_button34.Bind( wx.EVT_BUTTON, self.onButton34Click )
self.m_button40.Bind( wx.EVT_BUTTON, self.onButton40Click )
self.m_button41.Bind( wx.EVT_BUTTON, self.onButton41Click )
self.m_button42.Bind( wx.EVT_BUTTON, self.onButton42Click )
self.m_button43.Bind( wx.EVT_BUTTON, self.onButton43Click )
self.m_button44.Bind( wx.EVT_BUTTON, self.onButton44Click )
self.Bind( wx.EVT_MENU, self.OnMenuOpenSelect, id = self.m_menuItem3.GetId() )
self.Bind( wx.EVT_MENU, self.OnMenuSaveSelect, id = self.m_menuItem1.GetId() )
self.Bind( wx.EVT_MENU, self.OnMenuQuitSelect, id = self.m_menuItem2.GetId() )
self.Bind( wx.EVT_MENU, self.OnExportPythonSelect, id = self.m_menuItem4.GetId() )
def __del__( self ):
pass
# Virtual event handlers, overide them in your derived class
def onButton00Click( self, event ):
event.Skip()
def onButton01Click( self, event ):
event.Skip()
def onButton02Click( self, event ):
event.Skip()
def onButton03Click( self, event ):
event.Skip()
def onButton04Click( self, event ):
event.Skip()
def onButton10Click( self, event ):
event.Skip()
def onButton11Click( self, event ):
event.Skip()
def onButton12Click( self, event ):
event.Skip()
def onButton13Click( self, event ):
event.Skip()
def onButton14Click( self, event ):
event.Skip()
def onButton20Click( self, event ):
event.Skip()
def onButton21Click( self, event ):
event.Skip()
def onButton22Click( self, event ):
event.Skip()
def onButton23Click( self, event ):
event.Skip()
def onButton24Click( self, event ):
event.Skip()
def onButton30Click( self, event ):
event.Skip()
def onButton31Click( self, event ):
event.Skip()
def onButton32Click( self, event ):
event.Skip()
def onButton33Click( self, event ):
event.Skip()
def onButton34Click( self, event ):
event.Skip()
def onButton40Click( self, event ):
event.Skip()
def onButton41Click( self, event ):
event.Skip()
def onButton42Click( self, event ):
event.Skip()
def onButton43Click( self, event ):
event.Skip()
def onButton44Click( self, event ):
event.Skip()
def OnMenuOpenSelect( self, event ):
event.Skip()
def OnMenuSaveSelect( self, event ):
event.Skip()
def OnMenuQuitSelect( self, event ):
event.Skip()
def OnExportPythonSelect( self, event ):
event.Skip()
| 1.28125 | 1 |
dotnet/private/actions/resx_core.bzl | purkhusid/rules_dotnet | 143 | 2567 | "Actions for compiling resx files"
load(
"@io_bazel_rules_dotnet//dotnet/private:providers.bzl",
"DotnetResourceInfo",
)
def _make_runner_arglist(dotnet, source, output, resgen):
args = dotnet.actions.args()
if type(source) == "Target":
args.add_all(source.files)
else:
args.add(source)
args.add(output)
return args
def emit_resx_core(
dotnet,
name = "",
src = None,
identifier = None,
out = None,
customresgen = None):
"""The function adds an action that compiles a single .resx file into .resources file.
Returns [DotnetResourceInfo](api.md#dotnetresourceinfo).
Args:
dotnet: [DotnetContextInfo](api.md#dotnetcontextinfo).
name: name of the file to generate.
src: The .resx source file that is transformed into .resources file. Only `.resx` files are permitted.
identifier: The logical name for the resource; the name that is used to load the resource. The default is the basename of the file name (no subfolder).
out: An alternative name of the output file (if name should not be used).
customresgen: custom resgen program to use.
Returns:
DotnetResourceInfo: [DotnetResourceInfo](api.md#dotnetresourceinfo).
"""
if name == "" and out == None:
fail("either name or out must be set")
if not out:
result = dotnet.actions.declare_file(name + ".resources")
else:
result = dotnet.actions.declare_file(out)
args = _make_runner_arglist(dotnet, src, result, customresgen.files_to_run.executable.path)
# We use the command to extrace shell path and force runfiles creation
resolve = dotnet._ctx.resolve_tools(tools = [customresgen])
inputs = src.files.to_list() if type(src) == "Target" else [src]
dotnet.actions.run(
inputs = inputs + resolve[0].to_list(),
tools = customresgen.default_runfiles.files,
outputs = [result],
executable = customresgen.files_to_run,
arguments = [args],
env = {"RUNFILES_MANIFEST_FILE": customresgen.files_to_run.runfiles_manifest.path},
mnemonic = "CoreResxCompile",
input_manifests = resolve[1],
progress_message = (
"Compiling resoources" + dotnet.label.package + ":" + dotnet.label.name
),
)
return DotnetResourceInfo(
name = name,
result = result,
identifier = identifier,
)
| 1.4375 | 1 |
selfdrive/car/toyota/carcontroller.py | aolin480/openpilot | 70 | 2575 | from cereal import car
from common.numpy_fast import clip, interp
from selfdrive.car import apply_toyota_steer_torque_limits, create_gas_interceptor_command, make_can_msg
from selfdrive.car.toyota.toyotacan import create_steer_command, create_ui_command, \
create_accel_command, create_acc_cancel_command, \
create_fcw_command, create_lta_steer_command
from selfdrive.car.toyota.values import CAR, STATIC_DSU_MSGS, NO_STOP_TIMER_CAR, TSS2_CAR, \
MIN_ACC_SPEED, PEDAL_TRANSITION, CarControllerParams
from opendbc.can.packer import CANPacker
from common.op_params import opParams
VisualAlert = car.CarControl.HUDControl.VisualAlert
class CarController():
def __init__(self, dbc_name, CP, VM):
self.last_steer = 0
self.alert_active = False
self.last_standstill = False
self.standstill_req = False
self.steer_rate_limited = False
self.standstill_hack = opParams().get('standstill_hack')
self.packer = CANPacker(dbc_name)
self.gas = 0
self.accel = 0
def update(self, enabled, active, CS, frame, actuators, pcm_cancel_cmd, hud_alert,
left_line, right_line, lead, left_lane_depart, right_lane_depart):
# gas and brake
if CS.CP.enableGasInterceptor and enabled:
MAX_INTERCEPTOR_GAS = 0.5
# RAV4 has very sensitive gas pedal
if CS.CP.carFingerprint in [CAR.RAV4, CAR.RAV4H, CAR.HIGHLANDER, CAR.HIGHLANDERH]:
PEDAL_SCALE = interp(CS.out.vEgo, [0.0, MIN_ACC_SPEED, MIN_ACC_SPEED + PEDAL_TRANSITION], [0.15, 0.3, 0.0])
elif CS.CP.carFingerprint in [CAR.COROLLA]:
PEDAL_SCALE = interp(CS.out.vEgo, [0.0, MIN_ACC_SPEED, MIN_ACC_SPEED + PEDAL_TRANSITION], [0.3, 0.4, 0.0])
else:
PEDAL_SCALE = interp(CS.out.vEgo, [0.0, MIN_ACC_SPEED, MIN_ACC_SPEED + PEDAL_TRANSITION], [0.4, 0.5, 0.0])
# offset for creep and windbrake
pedal_offset = interp(CS.out.vEgo, [0.0, 2.3, MIN_ACC_SPEED + PEDAL_TRANSITION], [-.4, 0.0, 0.2])
pedal_command = PEDAL_SCALE * (actuators.accel + pedal_offset)
interceptor_gas_cmd = clip(pedal_command, 0., MAX_INTERCEPTOR_GAS)
else:
interceptor_gas_cmd = 0.
pcm_accel_cmd = clip(actuators.accel, CarControllerParams.ACCEL_MIN, CarControllerParams.ACCEL_MAX)
# steer torque
new_steer = int(round(actuators.steer * CarControllerParams.STEER_MAX))
apply_steer = apply_toyota_steer_torque_limits(new_steer, self.last_steer, CS.out.steeringTorqueEps, CarControllerParams)
self.steer_rate_limited = new_steer != apply_steer
# Cut steering while we're in a known fault state (2s)
if not enabled or CS.steer_state in [9, 25] or abs(CS.out.steeringRateDeg) > 100:
apply_steer = 0
apply_steer_req = 0
else:
apply_steer_req = 1
# TODO: probably can delete this. CS.pcm_acc_status uses a different signal
# than CS.cruiseState.enabled. confirm they're not meaningfully different
if not enabled and CS.pcm_acc_status:
pcm_cancel_cmd = 1
# on entering standstill, send standstill request
if CS.out.standstill and not self.last_standstill and CS.CP.carFingerprint not in NO_STOP_TIMER_CAR and not self.standstill_hack:
self.standstill_req = True
if CS.pcm_acc_status != 8:
# pcm entered standstill or it's disabled
self.standstill_req = False
self.last_steer = apply_steer
self.last_standstill = CS.out.standstill
can_sends = []
#*** control msgs ***
#print("steer {0} {1} {2} {3}".format(apply_steer, min_lim, max_lim, CS.steer_torque_motor)
# toyota can trace shows this message at 42Hz, with counter adding alternatively 1 and 2;
# sending it at 100Hz seem to allow a higher rate limit, as the rate limit seems imposed
# on consecutive messages
can_sends.append(create_steer_command(self.packer, apply_steer, apply_steer_req, frame))
if frame % 2 == 0 and CS.CP.carFingerprint in TSS2_CAR:
can_sends.append(create_lta_steer_command(self.packer, 0, 0, frame // 2))
# LTA mode. Set ret.steerControlType = car.CarParams.SteerControlType.angle and whitelist 0x191 in the panda
# if frame % 2 == 0:
# can_sends.append(create_steer_command(self.packer, 0, 0, frame // 2))
# can_sends.append(create_lta_steer_command(self.packer, actuators.steeringAngleDeg, apply_steer_req, frame // 2))
# we can spam can to cancel the system even if we are using lat only control
if (frame % 3 == 0 and CS.CP.openpilotLongitudinalControl) or pcm_cancel_cmd:
lead = lead or CS.out.vEgo < 12. # at low speed we always assume the lead is present so ACC can be engaged
# Lexus IS uses a different cancellation message
if pcm_cancel_cmd and CS.CP.carFingerprint in [CAR.LEXUS_IS, CAR.LEXUS_RC]:
can_sends.append(create_acc_cancel_command(self.packer))
elif CS.CP.openpilotLongitudinalControl:
can_sends.append(create_accel_command(self.packer, pcm_accel_cmd, pcm_cancel_cmd, self.standstill_req, lead, CS.acc_type, CS.distance_btn))
self.accel = pcm_accel_cmd
else:
can_sends.append(create_accel_command(self.packer, 0, pcm_cancel_cmd, False, lead, CS.acc_type, CS.distance_btn))
if frame % 2 == 0 and CS.CP.enableGasInterceptor and CS.CP.openpilotLongitudinalControl:
# send exactly zero if gas cmd is zero. Interceptor will send the max between read value and gas cmd.
# This prevents unexpected pedal range rescaling
can_sends.append(create_gas_interceptor_command(self.packer, interceptor_gas_cmd, frame // 2))
self.gas = interceptor_gas_cmd
# ui mesg is at 100Hz but we send asap if:
# - there is something to display
# - there is something to stop displaying
fcw_alert = hud_alert == VisualAlert.fcw
steer_alert = hud_alert in [VisualAlert.steerRequired, VisualAlert.ldw]
send_ui = False
if ((fcw_alert or steer_alert) and not self.alert_active) or \
(not (fcw_alert or steer_alert) and self.alert_active):
send_ui = True
self.alert_active = not self.alert_active
elif pcm_cancel_cmd:
# forcing the pcm to disengage causes a bad fault sound so play a good sound instead
send_ui = True
if (frame % 100 == 0 or send_ui):
can_sends.append(create_ui_command(self.packer, steer_alert, pcm_cancel_cmd, left_line, right_line, left_lane_depart, right_lane_depart, enabled))
if frame % 100 == 0 and CS.CP.enableDsu:
can_sends.append(create_fcw_command(self.packer, fcw_alert))
# *** static msgs ***
for (addr, cars, bus, fr_step, vl) in STATIC_DSU_MSGS:
if frame % fr_step == 0 and CS.CP.enableDsu and CS.CP.carFingerprint in cars:
can_sends.append(make_can_msg(addr, vl, bus))
new_actuators = actuators.copy()
new_actuators.steer = apply_steer / CarControllerParams.STEER_MAX
new_actuators.accel = self.accel
new_actuators.gas = self.gas
return new_actuators, can_sends
| 1.742188 | 2 |
easyric/tests/test_io_geotiff.py | HowcanoeWang/EasyRIC | 12 | 2607 | import pyproj
import pytest
import numpy as np
from easyric.io import geotiff, shp
from skimage.io import imread
from skimage.color import rgb2gray
import matplotlib.pyplot as plt
def test_prase_header_string_width():
out_dict = geotiff._prase_header_string("* 256 image_width (1H) 13503")
assert out_dict['width'] == 13503
def test_prase_header_string_length():
out_dict = geotiff._prase_header_string("* 257 image_length (1H) 19866")
assert out_dict['length'] == 19866
def test_prase_header_string_scale():
in_str = "* 33550 model_pixel_scale (3d) (0.0029700000000000004, 0.0029700000000000004, 0"
out_dict = geotiff._prase_header_string(in_str)
assert out_dict['scale'] == (0.0029700000000000004, 0.0029700000000000004)
def test_prase_header_string_tie_point():
in_str = "* 33922 model_tie_point (6d) (0.0, 0.0, 0.0, 368090.77975000005, 3956071.13823,"
out_dict = geotiff._prase_header_string(in_str)
assert out_dict['tie_point'] == (368090.77975000005, 3956071.13823)
in_str = "* 33922 model_tie_point (6d) (0.0, 0.0, 0.0, 368090.77975000005, 3956071.13823, 0"
out_dict = geotiff._prase_header_string(in_str)
assert out_dict['tie_point'] == (368090.77975000005, 3956071.13823)
def test_prase_header_string_nodata():
out_dict = geotiff._prase_header_string("* 42113 gdal_nodata (7s) b'-10000'")
assert out_dict['nodata'] == -10000
def test_prase_header_string_proj_normal(capsys):
in_str = "* 34737 geo_ascii_params (30s) b'WGS 84 / UTM zone 54N|WGS 84|'"
out_dict = geotiff._prase_header_string(in_str)
captured = capsys.readouterr()
assert f"[io][geotiff][GeoCorrd] Comprehense [{in_str}]" in captured.out
assert out_dict['proj'] == pyproj.CRS.from_epsg(32654)
def test_prase_header_string_proj_error(capsys):
# should raise error because WGS 84 / UTM ... should be full
out_dict = geotiff._prase_header_string("* 34737 geo_ascii_params (30s) b'UTM zone 54N|WGS 84|'")
captured = capsys.readouterr()
assert '[io][geotiff][GeoCorrd] Generation failed, because [Input is not a CRS: UTM zone 54N]' in captured.out
assert out_dict['proj'] == None
def test_get_imarray_without_header(capsys):
pass
def test_get_imarray_with_header(capsys):
pass
def test_point_query_one_point():
point = (368023.004, 3955500.669)
out = geotiff.point_query(r'file/pix4d.diy/hasu_tanashi_20170525_Ins1RGB_30m_dsm.tif', point)
np.testing.assert_almost_equal(out, np.float32(97.45558), decimal=3)
def test_point_query_numpy_points():
points = np.asarray([[368022.581, 3955501.054], [368024.032, 3955500.465]])
out = geotiff.point_query(r'file/pix4d.diy/hasu_tanashi_20170525_Ins1RGB_30m_dsm.tif', points)
expected = np.asarray([97.624344, 97.59617])
np.testing.assert_almost_equal(out, expected, decimal=3)
def test_point_query_list_numpy_points():
points = np.asarray([[368022.581, 3955501.054], [368024.032, 3955500.465]])
point = np.asarray([[368023.004, 3955500.669]])
p_list = [point, points]
expected = [np.asarray([97.45558]), np.asarray([97.624344, 97.59617])]
out = geotiff.point_query(r'file/pix4d.diy/hasu_tanashi_20170525_Ins1RGB_30m_dsm.tif', p_list)
assert type(expected) == type(out)
np.testing.assert_almost_equal(expected[0], out[0], decimal=3)
np.testing.assert_almost_equal(expected[1], out[1], decimal=3)
def test_point_query_wrong_types():
# [TODO]
pass
def test_point_query_input_ndarray():
# [Todo]
pass
def test_mean_values(capsys):
mean_ht = geotiff.mean_values(r'file/pix4d.diy/hasu_tanashi_20170525_Ins1RGB_30m_dsm.tif')
captured = capsys.readouterr()
# When not convert to float, mean_values = 97.562584
# assert mean_ht == np.float32(97.562584)
np.testing.assert_almost_equal(mean_ht, np.float32(97.562584), decimal=3)
# another case that not working in previous version:
# Cannot convert np.nan to int, fixed by astype(float)
mean_ht = geotiff.mean_values(r'file/tiff_test/2_12.tif')
captured = capsys.readouterr()
np.testing.assert_almost_equal(mean_ht, np.float(72.31657466298653), decimal=3)
def test_gis2pixel2gis():
geo_head_txt = """
TIFF file: 200423_G_M600pro_transparent_mosaic_group1.tif, 411 MiB, little endian, bigtiff
Series 0: 31255x19436x4, uint8, YXS, 1 pages, not mem-mappable
Page 0: 31255x19436x4, uint8, 8 bit, rgb, lzw
* 256 image_width (1H) 19436
* 257 image_length (1H) 31255
* 258 bits_per_sample (4H) (8, 8, 8, 8)
* 259 compression (1H) 5
* 262 photometric (1H) 2
* 273 strip_offsets (31255Q) (500650, 501114, 501578, 502042, 502506, 502970, 5
* 277 samples_per_pixel (1H) 4
* 278 rows_per_strip (1H) 1
* 279 strip_byte_counts (31255Q) (464, 464, 464, 464, 464, 464, 464, 464, 464,
* 284 planar_configuration (1H) 1
* 305 software (12s) b'pix4dmapper'
* 317 predictor (1H) 2
* 338 extra_samples (1H) 2
* 339 sample_format (4H) (1, 1, 1, 1)
* 33550 model_pixel_scale (3d) (0.001, 0.001, 0.0)
* 33922 model_tie_point (6d) (0.0, 0.0, 0.0, 484576.70205, 3862285.5109300003,
* 34735 geo_key_directory (32H) (1, 1, 0, 7, 1024, 0, 1, 1, 1025, 0, 1, 1, 1026
* 34737 geo_ascii_params (30s) b'WGS 84 / UTM zone 53N|WGS 84|'
"""
gis_coord = np.asarray([[ 484593.67474654, 3862259.42413431],
[ 484593.41064743, 3862259.92582402],
[ 484593.64841806, 3862260.06515117],
[ 484593.93077419, 3862259.55455913],
[ 484593.67474654, 3862259.42413431]])
header = geotiff._prase_header_string(geo_head_txt)
expected_pixel = np.asarray([[16972, 26086],
[16708, 25585],
[16946, 25445],
[17228, 25956],
[16972, 26086]])
pixel_coord = geotiff.geo2pixel(gis_coord, header)
np.testing.assert_almost_equal(pixel_coord, expected_pixel)
gis_revert = geotiff.pixel2geo(pixel_coord, header)
np.testing.assert_almost_equal(gis_revert, gis_coord, decimal=3)
def test_is_roi_type():
roi1 = np.asarray([[123, 456], [456, 789]])
roi2 = [roi1, roi1]
roi_wrong_1 = (123, 345)
roi_wrong_2 = [123, 456]
roi_wrong_3 = [[123, 345], [456, 789]]
roi1_out = geotiff._is_roi_type(roi1)
assert roi1_out == [roi1]
roi2_out = geotiff._is_roi_type(roi2)
assert roi2_out == roi2
with pytest.raises(TypeError) as errinfo:
roi_w1_out = geotiff._is_roi_type(roi_wrong_1)
assert 'Only numpy.ndarray points and list contains numpy.ndarray points are supported' in str(errinfo.value)
with pytest.raises(TypeError) as errinfo:
roi_w2_out = geotiff._is_roi_type(roi_wrong_2)
assert 'Only list contains numpy.ndarray points are supported' in str(errinfo.value)
with pytest.raises(TypeError) as errinfo:
roi_w3_out = geotiff._is_roi_type(roi_wrong_3)
assert 'Only list contains numpy.ndarray points are supported' in str(errinfo.value)
def test_imarray_clip_2d_rgb_rgba():
photo_path = 'file/pix4d.diy/photos/DJI_0174.JPG'
roi = np.asarray([[2251, 1223], [2270, 1270], [2227, 1263], [2251, 1223]])
fig, ax = plt.subplots(1,3, figsize=(12,4))
# -----------------------------------------------
imarray_rgb = imread(photo_path)
assert imarray_rgb.shape == (3456, 4608, 3)
im_out_rgb, offsets_rgb = geotiff.imarray_clip(imarray_rgb, roi)
ax[1].imshow(im_out_rgb / 255)
ax[1].set_title('rgb')
# -----------------------------------------------
imarray_2d = rgb2gray(imarray_rgb)
assert imarray_2d.shape == (3456, 4608)
im_out_2d, offsets_2d = geotiff.imarray_clip(imarray_2d, roi)
ax[0].imshow(im_out_2d, cmap='gray')
ax[0].set_title('gray')
# -----------------------------------------------
imarray_rgba = np.dstack((imarray_rgb, np.ones((3456, 4608)) * 255))
assert imarray_rgba.shape == (3456, 4608, 4)
im_out_rgba, offsets = geotiff.imarray_clip(imarray_rgba, roi)
ax[2].imshow(im_out_rgba/255)
ax[2].set_title('rgba')
plt.show()
def test_clip_roi_pixel():
poly = shp.read_shp2d('file/shp_test/test.shp')
poly_pixel = geotiff.geo2pixel(poly['0'], geotiff.get_header('file/tiff_test/2_12.tif'))
imarray, offset = geotiff.clip_roi(poly_pixel, 'file/tiff_test/2_12.tif', is_geo=False)
assert len(imarray) == 1
def test_clip_roi_geo():
poly = shp.read_shp2d('file/shp_test/test.shp')
imarray, offset = geotiff.clip_roi(poly['0'], 'file/tiff_test/2_12.tif', is_geo=True)
assert len(imarray) == 1 | 1.5625 | 2 |
tests/web/config.py | zcqian/biothings.api | 0 | 2615 | """
Web settings to override for testing.
"""
import os
from biothings.web.settings.default import QUERY_KWARGS
# *****************************************************************************
# Elasticsearch Variables
# *****************************************************************************
ES_INDEX = 'bts_test'
ES_DOC_TYPE = 'gene'
ES_SCROLL_SIZE = 60
# *****************************************************************************
# User Input Control
# *****************************************************************************
# use a smaller size for testing
QUERY_KWARGS['GET']['facet_size']['default'] = 3
QUERY_KWARGS['GET']['facet_size']['max'] = 5
QUERY_KWARGS['POST']['q']['jsoninput'] = True
# *****************************************************************************
# Elasticsearch Query Builder
# *****************************************************************************
ALLOW_RANDOM_QUERY = True
ALLOW_NESTED_AGGS = True
USERQUERY_DIR = os.path.join(os.path.dirname(__file__), 'userquery')
# *****************************************************************************
# Endpoints Specifics
# *****************************************************************************
STATUS_CHECK = {
'id': '1017',
'index': 'bts_test',
'doc_type': '_all'
}
| 0.988281 | 1 |
etherbank_cli/oracles.py | ideal-money/etherbank-cli | 1 | 2639 | import click
from . import utils
@click.group()
def main():
"Simple CLI for oracles to work with Ether dollar"
pass
@main.command()
@click.option('--ether-price', type=float, help="The ether price in ether dollar")
@click.option('--collateral-ratio', type=float, help="The collateral ratio")
@click.option(
'--liquidation-duration',
type=int,
help="The liquidation duration in minutes")
@click.option(
'--private-key',
callback=utils.check_account,
help='The privat key to sign the transaction')
def vote(ether_price, collateral_ratio, liquidation_duration, private_key):
"Vote on the variable for setting up Ether Bank"
assert [ether_price, collateral_ratio, liquidation_duration
].count(None) == 2, "You should set one variable per vote"
if ether_price:
var_code = 0
value = int(ether_price * 100)
elif collateral_ratio:
var_code = 1
value = int(collateral_ratio * 1000)
elif liquidation_duration:
var_code = 2
value = liquidation_duration * 60
func = utils.contracts['oracles'].functions.vote(var_code, value)
tx_hash = utils.send_transaction(func, 0, private_key)
return tx_hash
@main.command()
@click.option('--oracle', required=True, help="The oracle's address")
@click.option('--score', type=int, required=True, help="The oracle's score")
@click.option(
'--private-key',
callback=utils.check_account,
help='The privat key to sign the transaction')
def set_score(oracle, score, private_key):
"Edit oracle's score"
oracle = utils.w3.toChecksumAddress(oracle)
func = utils.contracts['oracles'].functions.setScore(oracle, score)
tx_hash = utils.send_transaction(func, 0, private_key)
return tx_hash
@main.command()
@click.option(
'--private-key',
callback=utils.check_account,
help='The privat key to sign the transaction')
def finish_recruiting(private_key):
"Set recruiting as finished"
func = utils.contracts['oracles'].functions.finishRecruiting()
tx_hash = utils.send_transaction(func, 0, private_key)
return tx_hash
if __name__ == '__main__':
main()
| 1.898438 | 2 |
client/canyons-of-mars/maze.py | GamesCreatorsClub/GCC-Rover | 3 | 2679 |
#
# Copyright 2016-2019 Games Creators Club
#
# MIT License
#
import math
import pyroslib
import pyroslib.logging
import time
from pyroslib.logging import log, LOG_LEVEL_ALWAYS, LOG_LEVEL_INFO, LOG_LEVEL_DEBUG
from rover import WheelOdos, WHEEL_NAMES
from rover import normaiseAngle, angleDiference
from challenge_utils import Action, PID
SQRT2 = math.sqrt(2)
PIhalf = math.pi / 2
class MazeAttitude:
UNKNOWN = 0
LEFT_WALL = 1
RIGHT_WALL = 2
FRONT_WALL = 4
BACK_WALL = 8
NO_GAP = 0
FORWARD_GAP = 1
SIDE_GAP = 2
POINTS = [0, 45, 90, 135, 180, 225, 270, 315]
WALLS = [90, 270, 0, 180]
L0_45 = 0
L45_90 = 45
L90_135 = 90
L135_180 = 135
L180_225 = 180
L225_270 = 225
L270_315 = 270
L315_0 = 315
LINES = [L0_45, L45_90, L90_135, L135_180, L180_225, L225_270, L270_315, L315_0]
ANGLE_TOLLERANCE = 1.075
@staticmethod
def normAngle(a):
if a > PIhalf:
a = a - math.pi
elif a <= -PIhalf:
a = a + math.pi
return a
class Line:
def __init__(self, line_index, long_point_index, short_point_index, factor, adjust):
self.line_index = line_index
self.short_point_index = short_point_index
self.long_point_index = long_point_index
self.factor = factor
self.adjust = adjust
self.angle = None
def calcAngle(self, distances):
long_distance = distances[self.long_point_index]
short_distance = distances[self.short_point_index]
if long_distance is not None and short_distance is not None:
lsqrt2 = long_distance / SQRT2
self.angle = MazeAttitude.normAngle(math.atan2(lsqrt2, lsqrt2 - short_distance) * self.factor + self.adjust)
else:
self.angle = None
class Wall:
def __init__(self, distance_sensor_angle, distance_sensor_index, wall_point_kind, left_mid_point_index, left_point_index, mid_point_index, right_point_index):
self.ds_angle = distance_sensor_angle
self.ds_index = distance_sensor_index
self.wall_point_kind = wall_point_kind
self.left_mid_point_index = left_mid_point_index
self.left_point_index = left_point_index
self.mid_point_index = mid_point_index
self.right_point_index = right_point_index
self.is_front_or_back = self.ds_angle == 0 or self.ds_angle == 180
self.selected_line = None
self.angle = None
self.distance = None
def setAngle(self, angle, distances):
self.angle = angle
distance = distances[self.mid_point_index]
if distance < 1:
self.distance = 0
else:
if self.is_front_or_back:
self.distance = abs(int(math.sin(angle) * distance))
else:
self.distance = abs(int(math.cos(angle) * distance))
def setAngleAndDistance(self, angle, distance):
self.angle = angle
self.distance = distance
def tryFindingWall(self, distances, lines, points):
lmline = lines[self.left_mid_point_index]
lline = lines[self.left_point_index]
mline = lines[self.mid_point_index]
rline = lines[self.right_point_index]
dlong1 = distances[lline.long_point_index]
dmid = distances[mline.short_point_index]
dlong2 = distances[mline.long_point_index]
plong1 = points[self.left_point_index]
pmid = points[self.mid_point_index]
plong2 = points[self.right_point_index]
if dlong1 < dlong2 and plong1 != MazeAttitude.UNKNOWN and lmline.angle * MazeAttitude.ANGLE_TOLLERANCE >= lline.angle >= lmline.angle / MazeAttitude.ANGLE_TOLLERANCE:
points[self.mid_point_index] = points[lline.long_point_index]
angle = MazeAttitude.normAngle(mline.angle - PIhalf)
distance = distances[self.right_point_index] * abs(math.sin(mline.angle) / SQRT2)
self.setAngleAndDistance(angle, distance)
elif dlong1 >= dlong2 and plong2 != MazeAttitude.UNKNOWN and mline.angle * MazeAttitude.ANGLE_TOLLERANCE >= rline.angle >= mline.angle / MazeAttitude.ANGLE_TOLLERANCE:
points[self.mid_point_index] = points[rline.long_point_index]
angle = MazeAttitude.normAngle(mline.angle + PIhalf)
distance = distances[self.left_point_index] * abs(math.sin(mline.angle) / SQRT2)
self.setAngleAndDistance(angle, distance)
elif lline.angle is not None and mline.angle is not None:
if lline.angle * MazeAttitude.ANGLE_TOLLERANCE >= mline.angle >= lline.angle / MazeAttitude.ANGLE_TOLLERANCE:
if plong1 == MazeAttitude.UNKNOWN:
points[self.left_point_index] = self.wall_point_kind
if pmid == MazeAttitude.UNKNOWN:
points[self.mid_point_index] = self.wall_point_kind
if plong2 == MazeAttitude.UNKNOWN:
points[self.right_point_index] = self.wall_point_kind
self.setAngle(mline.angle, distances)
else:
if dlong1 < dlong2 and plong1 == MazeAttitude.UNKNOWN and pmid == MazeAttitude.UNKNOWN:
points[self.left_point_index] = self.wall_point_kind
points[self.mid_point_index] = self.wall_point_kind
self.setAngle(lline.angle, distances)
elif dlong1 >= dlong2 and plong2 == MazeAttitude.UNKNOWN and pmid == MazeAttitude.UNKNOWN:
points[self.mid_point_index] = self.wall_point_kind
points[self.right_point_index] = self.wall_point_kind
self.setAngle(mline.angle, distances)
elif plong1 == MazeAttitude.UNKNOWN and pmid == MazeAttitude.UNKNOWN and plong2 != MazeAttitude.UNKNOWN:
points[self.left_point_index] = self.wall_point_kind
points[self.mid_point_index] = self.wall_point_kind
self.setAngle(lline.angle, distances)
elif plong1 != MazeAttitude.UNKNOWN and pmid == MazeAttitude.UNKNOWN and plong2 == MazeAttitude.UNKNOWN:
points[self.mid_point_index] = self.wall_point_kind
points[self.right_point_index] = self.wall_point_kind
self.setAngle(mline.angle, distances)
elif lline.angle is not None and plong1 == MazeAttitude.UNKNOWN and pmid == MazeAttitude.UNKNOWN:
points[self.left_point_index] = self.wall_point_kind
points[self.mid_point_index] = self.wall_point_kind
self.setAngle(lline.angle, distances)
elif mline.angle is not None and pmid == MazeAttitude.UNKNOWN and plong2 == MazeAttitude.UNKNOWN:
points[self.mid_point_index] = self.wall_point_kind
points[self.right_point_index] = self.wall_point_kind
self.setAngle(mline.angle, distances)
def __init__(self):
self.lines = {self.L315_0: self.Line(self.L315_0, 315, 0, -1, math.pi), self.L0_45: self.Line(self.L0_45, 45, 0, 1, -math.pi),
self.L45_90: self.Line(self.L45_90, 45, 90, -1, PIhalf), self.L90_135: self.Line(self.L90_135, 135, 90, 1, -PIhalf),
self.L135_180: self.Line(self.L135_180, 135, 180, -1, math.pi), self.L180_225: self.Line(self.L180_225, 225, 180, 1, -math.pi),
self.L225_270: self.Line(self.L225_270, 225, 270, -1, PIhalf), self.L270_315: self.Line(self.L270_315, 315, 270, 1, -PIhalf)}
self.right_wall = self.Wall(90, 2, self.RIGHT_WALL, 0, 45, 90, 135)
self.left_wall = self.Wall(270, 6, self.LEFT_WALL, 180, 225, 270, 315)
self.front_wall = self.Wall(0, 0, self.FRONT_WALL, 270, 315, 0, 45)
self.back_wall = self.Wall(180, 4, self.BACK_WALL, 90, 135, 180, 225)
self.left_gap = self.NO_GAP
self.right_gap = self.NO_GAP
self.walls = {self.right_wall.ds_angle: self.right_wall, self.left_wall.ds_angle: self.left_wall, self.front_wall.ds_angle: self.front_wall, self.back_wall.ds_angle: self.back_wall}
self.points = {0: 0, 45: 0, 90: 0, 135: 0, 180: 0, 225: 0, 270: 0, 315: 0}
self.distances = {0: 0, 45: 0, 90: 0, 135: 0, 180: 0, 225: 0, 270: 0, 315: 0}
def calculate(self, state):
def getPointDistance(state, angle):
distance = state.radar.radar[angle]
status = state.radar.status[angle]
if status == 0:
return distance
last_distance = state.radar.last_radar[angle]
if abs(distance - last_distance) < 100:
return distance
return None
def updateUndefinedWall(wall, preferable_wall, wall_adjust, second_wall):
if wall.angle is None and self.distances[wall.ds_angle] is not None:
if preferable_wall.angle is not None:
wall.setAngleAndDistance(self.normAngle(preferable_wall.angle + wall_adjust), self.distances[wall.mid_point_index])
else:
wall.setAngleAndDistance(self.normAngle(second_wall.angle - wall_adjust), self.distances[wall.mid_point_index])
self.points[wall.ds_angle] = wall.wall_point_kind
self.distances = {p: getPointDistance(state, p) for p in self.POINTS}
for line in self.lines:
self.lines[line].calcAngle(self.distances)
wls = [self.walls[w_ds_angle] for w_ds_angle in self.WALLS if self.distances[w_ds_angle] is not None]
wall_processing_order = sorted(wls,
key=lambda wall: self.distances[wall.ds_angle])
for wall in wall_processing_order:
wall.tryFindingWall(self.distances, self.lines, self.points)
updateUndefinedWall(self.front_wall, self.right_wall, -PIhalf, self.left_wall)
updateUndefinedWall(self.back_wall, self.right_wall, PIhalf, self.left_wall)
updateUndefinedWall(self.right_wall, self.front_wall, PIhalf, self.back_wall)
updateUndefinedWall(self.left_wall, self.front_wall, -PIhalf, self.back_wall)
# TODO calc gaps
class MoveForwardOnOdo(Action):
def __init__(self, agent, stop_action=None):
super(MoveForwardOnOdo, self).__init__(agent)
self.stop_action = stop_action
self.required_odo = {'fl': 0, 'fr': 0, 'bl': 0, 'br': 0}
def setRequiredOdo(self, distance):
for wheel_name in WHEEL_NAMES:
self.required_odo[wheel_name] = distance
def start(self):
super(MoveForwardOnOdo, self).start()
state = self.rover.getRoverState()
for wheel in self.required_odo:
self.required_odo[wheel] = WheelOdos.normaliseOdo(state.wheel_odos[wheel] + self.required_odo[wheel])
log(LOG_LEVEL_DEBUG, "Reset odo to " + str(self.required_odo) + "; starting...")
self.rover.command(pyroslib.publish, 300, 120)
# pyroslib.publish("move/steer", "300 120")
def end(self):
super(MoveForwardOnOdo, self).end()
def next(self):
state = self.rover.getRoverState()
do_stop = False
log(LOG_LEVEL_DEBUG, "Driving to " + str(self.required_odo))
for wheel_name in WHEEL_NAMES:
if state.wheel_odos[wheel_name] >= self.required_odo[wheel_name]:
do_stop = True
if state.radar.radar[0] < 1.0 or state.radar.radar[315] < 1.0 or state.radar.radar[45] < 1.0:
do_stop = True
if do_stop:
return self.stop_action
else:
return self
def execute(self):
pass
def getActionName(self):
return "Forward ODO"
class MazeAction(Action):
LEFT = -1
RIGHT = 1
def __init__(self, agent):
super(MazeAction, self).__init__(agent)
def check_next_action_conditions(self):
return self
class ChicaneAction(MazeAction):
def __init__(self, agent, left_or_right, distance, speed, next_action=None):
super(ChicaneAction, self).__init__(agent)
self.left_or_right = left_or_right
self.distance = distance
self.speed = speed
self.next_action = next_action
if self.left_or_right == MazeAction.RIGHT:
self.a1 = 45
self.a2 = 90
self.a3 = 135
else:
self.a1 = 315
self.a2 = 270
self.a3 = 225
self.left_corner_action = MazeTurnAroundCornerAction(self, self.LEFT, self.distance, self.speed, self)
self.right_corner_action = MazeTurnAroundCornerAction(self, self.RIGHT, self.distance, self.speed, DriverForwardForTimeAction(self, 10, self.speed, None))
def start(self):
super(ChicaneAction, self).start()
def end(self):
super(ChicaneAction, self).end()
def next(self):
if self.left_or_right == self.LEFT:
diagonal_distance = state.radar.radar[45]
else:
diagonal_distance = state.radar.radar[315]
if self.left_or_right == self.LEFT and diagonal_distance > 800:
log(LOG_LEVEL_INFO, "Found second part of chicane, rfd={: 4d}".format(int(diagonal_distance)))
self.left_or_right = self.RIGHT
elif self.left_or_right == self.RIGHT and diagonal_distance > 800:
log(LOG_LEVEL_INFO, "Found end ofchicane - leaging, rfd={: 4d}".format(int(diagonal_distance)))
return self.next_action
return self
def execute(self):
state = self.rover.getRoverState()
front_distance = state.radar.radar[0]
gain = 60
offset = 150
# Values that worked speed=150, steer=5-7, dist=4
# self.speed = 150 # 150
speed = 50 # mm/second - TODO use odo to update to correct value!
speed_steer_fudge_factor = 5 # 5-7
speed_distance_fudge_factor = 4 # 4
min_angle = 1 * math.pi / 180
steer_speed = speed * speed_steer_fudge_factor
distance_speed = speed * speed_distance_fudge_factor
if self.left_or_right == self.RIGHT:
distance = -1000000000
distance_from_wall = state.radar.radar[90]
distance_error = distance_from_wall - self.distance
angle = 0
if abs(distance_error) < 10:
angle = 0
elif distance_error > 0 and distance_error > distance_speed:
angle = math.pi / 4
if front_distance < 450:
angle += math.pi * (450 - front_distance) / 1800 # divide with 10 and by 180 -> 450/10 - 45deg
elif distance_error < 0 and distance_error < -distance_speed:
angle = -math.pi / 4
if front_distance < 450:
angle -= math.pi * (450 - front_distance) / 1800 # divide with 10 and by 180 -> 450/10 - 45deg
else:
try:
angle = math.asin(distance_error / distance_speed)
except BaseException as ex:
log(LOG_LEVEL_ALWAYS, "Domain error wa={: 3d} dw={: 4d} de={: 4d} d={: 4d} s={: 3d}".format(int(0), int(distance_from_wall), int(distance_error), int(distance), int(speed)))
else:
distance = 1000000000
distance_from_wall = state.radar.radar[270]
distance_error = distance_from_wall - self.distance
angle = 0
if abs(distance_error) < 10:
angle = 0
elif distance_error > 0 and distance_error > distance_speed:
angle = -math.pi / 4
if front_distance < 450:
angle -= math.pi * (450 - front_distance) / 1800 # divide with 10 and by 180 -> 450/10 - 45deg
elif distance_error < 0 and distance_error < -distance_speed:
angle = math.pi / 4
if front_distance < 450:
angle += math.pi * (450 - front_distance) / 1800 # divide with 10 and by 180 -> 450/10 - 45deg
else:
try:
angle = -math.asin(distance_error / distance_speed)
except BaseException as ex:
log(LOG_LEVEL_ALWAYS, "Domain error wa={: 3d} dw={: 4d} de={: 4d} d={: 4d} s={: 3d}".format(int(0), int(distance_from_wall), int(distance_error), int(distance), int(speed)))
distance = int(distance)
angle = int(angle * 180 / math.pi)
self.rover.command(pyroslib.publish, self.speed, angle, distance)
# pyroslib.publish("move/steer", str(distance) + " " + str(self.speed) + " " + str(angle))
wheel_orientations = state.wheel_odos.odos
log(LOG_LEVEL_INFO, "{:16.3f}: dist_f={: 4d} wa={: 3d} dist_w={: 4d} dist_err={: 3d} la={: 3d} ld={: 3d} ra={: 3d} rd={: 3d} s_spd={: 3d} dist_spd={: 3d} dist={: 4d} angle={: 3d} heading={: 3d} odo={:7.2f}".format(
float(time.time()),
int(front_distance),
int(0 * 180 / math.pi), int(distance_from_wall), int(distance_error),
int(0 * 180 / math.pi), int(0), int(0 * 180 / math.pi), int(0),
int(steer_speed), int(distance_speed),
int(distance), int(angle), int(state.heading.heading),
float(state.wheel_orientations.orientations['fl'])
))
def getActionName(self):
return "Chicane " + ("L" if self.left_or_right == self.LEFT else "R")
class MazeCorridorAction(MazeAction):
def __init__(self, agent, left_or_right, distance, speed, next_action=None):
super(MazeCorridorAction, self).__init__(agent)
self.left_or_right = left_or_right
self.distance = distance
self.speed = speed
self.next_action = next_action
if self.left_or_right == MazeAction.RIGHT:
self.a1 = 45
self.a2 = 90
self.a3 = 135
else:
self.a1 = 315
self.a2 = 270
self.a3 = 225
self.left_corner_action = MazeTurnAroundCornerAction(self, self.LEFT, int(self.distance * 1), self.speed, self)
self.right_corner_action = MazeTurnAroundCornerAction(self, self.RIGHT, int(self.distance * 1), self.speed, self)
# self.right_corner_action = MazeTurnAroundCornerAction(self.odo, self.radar, self.heading, self.RIGHT, self.distance, self.speed, DriverForwardForTimeActoun(10, self.speed, None))
self.been_in_chicane = False
def start(self):
super(MazeCorridorAction, self).start()
self.been_in_chicane = False
def end(self):
super(MazeCorridorAction, self).end()
def next(self):
left_diagonal_distance = state.radar.radar[315]
front_distance = state.radar.radar[0]
if state.radar.status[0] != 0 and abs(state.radar.radar_deltas[0]) > 100:
log(LOG_LEVEL_INFO, "Front distance not correct: d={:4d} s={:2d} delta={:4d}".format(front_distance, state.radar.status[0], state.radar.radar_deltas[0]))
else:
if state.left_front_distance_of_wall > 100 and front_distance < 550:
expected_diagonal_distance = 0
if state.left_wall_angle < 0:
expected_diagonal_distance = front_distance * 2 * math.cos(math.pi / 4 + state.left_wall_angle)
else:
expected_diagonal_distance = front_distance * math.cos(state.left_wall_angle) * SQRT2
if False and not self.been_in_chicane and front_distance > 300 and left_diagonal_distance > expected_diagonal_distance * 1.2:
log(LOG_LEVEL_INFO, "Found chicane... lfd={: 4d} fd={: 4d} dd={: 4d} ed={: 4d}".format(int(state.left_front_distance_of_wall), int(front_distance), int(left_diagonal_distance), int(expected_diagonal_distance)))
self.been_in_chicane = True
return ChicaneAction(self, self.LEFT, self.distance, self.speed, next_action=self)
else:
log(LOG_LEVEL_INFO, "Found corner - turning, lfd={: 4d} fd={: 4d} dd={: 4d} ed={: 4d}".format(int(state.left_front_distance_of_wall), int(front_distance), int(left_diagonal_distance), int(expected_diagonal_distance)))
return self.left_corner_action
if front_distance < 550 and state.radar.radar_deltas[0] < 0:
left_distances = state.radar.radar[270] + state.radar.radar[315]
right_distances = state.radar.radar[90] + state.radar.radar[45]
if left_distances > right_distances:
log(LOG_LEVEL_INFO, "Found corner 2 - turning left, fd={: 4d} ld={: 4d} rd={: 4d}".format(int(front_distance), int(left_distances), int(right_distances)))
return self.left_corner_action
else:
log(LOG_LEVEL_INFO, "Found corner 2 - turning left, fd={: 4d} ld={: 4d} rd={: 4d}".format(int(front_distance), int(left_distances), int(right_distances)))
return self.right_corner_action
if state.right_front_distance_of_wall > 100 and state.left_front_distance_of_wall > 100 and front_distance < 700:
log(LOG_LEVEL_INFO, "Found final corner - turning to finish, rfd={: 4d} fd={: 4d} ".format(int(state.right_front_distance_of_wall), int(front_distance)))
return self.right_corner_action
return self
def execute(self):
state = self.rover.getRoverState()
left_diagonal_distance = state.radar.radar[315]
front_distance = state.radar.radar[0]
gain = 60
offset = 150
# Values that worked speed=150, steer=5-7, dist=4
# self.speed = 150 # 150
speed = 50 # mm/second - TODO use odo to update to correct value!
speed_steer_fudge_factor = 5 # 5-7
speed_distance_fudge_factor = 4 # 4
min_angle = 1 * math.pi / 180
steer_speed = speed * speed_steer_fudge_factor
distance_speed = speed * speed_distance_fudge_factor
if self.left_or_right == self.RIGHT:
wall_angle = state.right_wall_angle
if -min_angle < state.right_wall_angle < min_angle:
distance = 1000000000
else:
distance = steer_speed / state.right_wall_angle
if 0 <= distance < 150:
distance = 150
elif -150 < distance < 0:
distance = -150
distance = -distance
distance_from_wall = state.right_wall_distance
distance_error = distance_from_wall - self.distance
angle = 0
if abs(distance_error) < 10:
angle = 0
elif distance_error > 0 and distance_error > distance_speed:
angle = math.pi / 4
elif distance_error < 0 and distance_error < -distance_speed:
angle = -math.pi / 4
else:
try:
angle = math.asin(distance_error / distance_speed)
except BaseException as ex:
log(LOG_LEVEL_ALWAYS, "Domain error wa={: 3d} dw={: 4d} de={: 4d} d={: 4d} s={: 3d}".format(int(wall_angle), int(distance_from_wall), int(distance_error), int(distance), int(speed)))
else:
wall_angle = state.left_wall_angle
if -min_angle < state.left_wall_angle < min_angle:
distance = 1000000000
else:
distance = steer_speed / state.left_wall_angle
if 0 <= distance < 150:
distance = 150
elif -150 < distance < 0:
distance = -150
distance_from_wall = state.left_wall_distance
distance_error = distance_from_wall - self.distance
angle = 0
if abs(distance_error) < 10:
angle = 0
elif distance_error > 0 and distance_error > distance_speed:
angle = -math.pi / 4
elif distance_error < 0 and distance_error < -distance_speed:
angle = math.pi / 4
else:
try:
angle = -math.asin(distance_error / distance_speed)
except BaseException as ex:
log(LOG_LEVEL_ALWAYS, "Domain error wa={: 3d} dw={: 4d} de={: 4d} d={: 4d} s={: 3d}".format(int(wall_angle), int(distance_from_wall), int(distance_error), int(distance), int(speed)))
distance = int(distance)
angle = int(angle * 180 / math.pi)
self.rover.command(pyroslib.publish, self.speed, angle, distance)
# pyroslib.publish("move/steer", str(distance) + " " + str(self.speed) + " " + str(angle))
wheel_orientations = state.wheel_odos.odos
#
log(LOG_LEVEL_INFO, "{:16.3f}: dist_f={: 4d} wa={: 3d} dist_w={: 4d} dist_err={: 3d} la={: 3d} ld={: 3d} ra={: 3d} rd={: 3d} s_spd={: 3d} dist_spd={: 3d} dist={: 4d} angle={: 3d} heading={: 3d} odo={:7.2f}".format(
float(time.time()),
int(front_distance),
int(wall_angle * 180 / math.pi), int(distance_from_wall), int(distance_error),
int(state.left_wall_angle * 180 / math.pi), int(state.left_front_distance_of_wall), int(state.right_wall_angle * 180 / math.pi), int(state.right_front_distance_of_wall),
int(steer_speed), int(distance_speed),
int(distance), int(angle), int(state.heading.heading),
float(state.wheel_orientations.orientations['fl'])
))
def getActionName(self):
return "Corridor"
class MazeTurnAroundCornerAction(MazeAction):
def __init__(self, agent, left_or_right, distance, speed, next_action=None):
super(MazeTurnAroundCornerAction, self).__init__(agent)
self.left_or_right = left_or_right
self.distance = distance * (1 if left_or_right == self.RIGHT else -1)
self.speed = speed
self.start_heading = 0
self.last_heading = 0
self.requested_heading = 0
self.pid = None
self.next_action = next_action
self.error = 0
def start(self):
super(MazeTurnAroundCornerAction, self).start()
state = self.rover.getRoverState()
self.start_heading = state.heading.heading
self.requested_heading = normaiseAngle(self.start_heading + 80 * -(1 if self.left_or_right == self.RIGHT else -1))
self.pid = PID(1, 0.0, 0.05, 1, 0, diff_method=angleDiference)
self.pid.process(self.requested_heading, self.start_heading)
log(LOG_LEVEL_INFO, "Starting to turn around corner at distance {:04d} at speed {:04d}, start heading {:07.3f}, requested heading {:07.3f}".format(self.distance, self.speed, self.start_heading, self.requested_heading))
self.rover.command(pyroslib.publish, self.speed, 0, self.distance)
# pyroslib.publish("move/steer", str(self.distance) + " " + str(self.speed))
def end(self):
super(MazeTurnAroundCornerAction, self).end()
def next(self):
heading = state.heading.heading
self.error = self.pid.process(self.requested_heading, heading)
if self.left_or_right == self.LEFT and self.error > 0:
return self
elif self.left_or_right == self.RIGHT and self.error < 0:
return self
else:
if self.next_action is not None:
log(LOG_LEVEL_INFO, "Finished turning around the corner - invoking next action " + self.next_action.getActionName())
else:
log(LOG_LEVEL_INFO, "Finishing turning - no next action spectified.")
return self.next_action
def execute(self):
state = self.rover.getRoverState()
heading = state.heading.heading
last_heading = self.last_heading
self.last_heading = heading
log(LOG_LEVEL_INFO, "Turning speed={:04d} h={:07.3f} lh={:07.3f} dh={:07.3f} rh={:07.3f} e={:07.3f}"
.format(self.speed, heading, last_heading, angleDiference(heading, last_heading), self.requested_heading, self.error))
def getActionName(self):
return "Turn-Around-Corner"
class DriverForwardForTimeAction(Action):
def __init__(self, agent, time, speed, next_action):
super(DriverForwardForTimeAction, self).__init__(agent)
self.time = time
self.speed = speed
self.next_action = next_action
def start(self):
self.rover.command(pyroslib.publish, self.speed, 0)
# pyroslib.publish("move/drive", "0 " + str(self.speed))
log(LOG_LEVEL_INFO, "Going forward for " + str(self.time) + " ticks.")
def end(self):
pass
def next(self):
if self.time > 0:
self.time -= 1
log(LOG_LEVEL_INFO, "Going forward for " + str(self.time) + " ticks.")
return self
return self.next_action
if __name__ == "__main__":
from rover import Radar, RoverState
radar_values = {0: 10, 45: SQRT2 * 10, 90: 10, 135: SQRT2 * 10, 180: 10, 225: SQRT2 * 10, 270: 10, 315: SQRT2 * 10}
radar_last_values = {0: 10, 45: SQRT2 * 10, 90: 10, 135: SQRT2 * 10, 180: 10, 225: SQRT2 * 10, 270: 10, 315: SQRT2 * 10}
radar_status = {0: 0, 45: 0, 90: 0, 135: 0, 180: 0, 225: 0, 270: 0, 315: 0}
attitude = MazeAttitude()
radar = Radar(0, radar_values, radar_status, Radar(0, radar_last_values, radar_status))
state = RoverState(None, None, None, radar, None, None)
def printWallLines(a):
if attitude.lines[a].angle is None:
print("{:3d} -> point too far - not calculated".format(a))
else:
angle = int(attitude.lines[a].angle * 180 / math.pi)
point = attitude.points[a]
if point is None:
print("{:3d} -> line at {:3d} angle".format(a, angle))
else:
if point == MazeAttitude.LEFT_WALL:
wall = "left wall"
elif point == MazeAttitude.RIGHT_WALL:
wall = "right wall"
elif point == MazeAttitude.FRONT_WALL:
wall = "front wall"
elif point == MazeAttitude.BACK_WALL:
wall = "back wall"
else:
wall = "no wall"
print("{:3d} -> line at {:3d} angle belogs to {:s}".format(a, angle, wall))
def printWall(w):
if w.angle is None:
print("Wall {:3d} -> is too far - not calculated".format(w.ds_angle))
else:
if w.distance is None:
print("Wall {:3d} -> has angle {:3d} but is too far - distance not calculated".format(w.ds_angle, int(w.angle * 180 / math.pi)))
else:
print("Wall {:3d} -> has angle {:3d} and is at {:3d}".format(w.ds_angle, int(w.angle * 180 / math.pi), w.distance))
def printWalls():
for p in attitude.points:
printWallLines(p)
for w in attitude.walls:
printWall(w)
print("----------------------------------------------------------")
# attitude.calculate(state)
# printWalls()
#
# state.radar.radar[0] = 5
# state.radar.radar[45] = SQRT2 * 5 * 0.9
# state.radar.radar[315] = SQRT2 * 17
# state.radar.radar[270] = SQRT2 * 13
# state.radar.radar[225] = SQRT2 * 12
# attitude.calculate(state)
# printWalls()
state.radar.radar[180] = 50
state.radar.radar[315] = 30
attitude.calculate(state)
printWalls()
| 2.1875 | 2 |
python/old_password_test.py | XelaRellum/old_password | 0 | 2703 | import unittest
import pytest
from old_password import old_password
import csv
import re
@pytest.mark.parametrize("password,expected_hash", [
(None, None),
("", ""),
("a", "<PASSWORD>"),
("abc", "<PASSWORD>"),
("ä", "<PASSWORD>"),
])
def test_old_password(password, expected_hash):
assert old_password(password) == expected_hash
def test_password_with_space():
"""
spaces in password are skipped
"""
assert old_password("<PASSWORD>") == old_password("password")
def test_password_with_tab():
"""
tabs in password are skipped
"""
assert old_password("<PASSWORD>") == old_password("password")
def test_password_from_testdata():
with open("../testdata.csv", "r") as file:
for line in file:
line = line.strip()
password, expected_hash = line.split(";")
hash = old_password(password)
assert hash == expected_hash, "password: %s" % password
| 1.875 | 2 |
test_molecule.py | zee93/molecule_parser | 0 | 2711 | import unittest
from molecule import onize_formula, update_equation_with_multiplier, flaten_formula, parse_molecule
class MoleculeParserTestCases(unittest.TestCase):
def test_onizing_formulas(self):
self.assertEqual(onize_formula('H'), 'H1')
self.assertEqual(onize_formula('H2O'), 'H2O1')
self.assertEqual(onize_formula('Mg(OH)2'), 'Mg1(O1H1)2')
self.assertEqual(onize_formula('K4[ON(SO3)2]2'), 'K4[O1N1(S1O3)2]2')
def test_updating_formula_with_multipler(self):
self.assertEqual(update_equation_with_multiplier('H1', '2'), 'H2')
self.assertEqual(update_equation_with_multiplier('K4[O1N1(SO3)2]2', '2'), 'K8[O2N2(SO6)4]4')
def test_flatting_formula(self):
self.assertEqual(flaten_formula('H2O'), 'H2O')
self.assertEqual(flaten_formula('[H1]2O'), 'H2O')
self.assertEqual(flaten_formula('M1g1(O1H1)2'), 'M1g1O2H2')
self.assertEqual(flaten_formula('K4[O1N1(S1O3)2]2'), 'K4O2N2S4O12')
def test_full_parsing(self):
parsed_mole = parse_molecule('H2O')
self.assertEqual(len(parsed_mole.keys()), 2)
self.assertEqual(parsed_mole['H'], 2)
self.assertEqual(parsed_mole['O'], 1)
parsed_mole = parse_molecule('Mg(OH)2')
self.assertEqual(len(parsed_mole.keys()), 3)
self.assertEqual(parsed_mole['H'], 2)
self.assertEqual(parsed_mole['O'], 2)
self.assertEqual(parsed_mole['Mg'], 1)
parsed_mole = parse_molecule('K4[ON(SO3)2]2')
self.assertEqual(len(parsed_mole.keys()), 4)
self.assertEqual(parsed_mole['K'], 4)
self.assertEqual(parsed_mole['O'], 14)
self.assertEqual(parsed_mole['N'], 2)
self.assertEqual(parsed_mole['S'], 4)
if __name__ == '__main__':
unittest.main()
| 1.734375 | 2 |
src/quanguru/classes/exceptions.py | Qfabiolous/QuanGuru | 0 | 2719 | # TODO turn prints into actual error raise, they are print for testing
def qSystemInitErrors(init):
def newFunction(obj, **kwargs):
init(obj, **kwargs)
if obj._genericQSys__dimension is None:
className = obj.__class__.__name__
print(className + ' requires a dimension')
elif obj.frequency is None:
className = obj.__class__.__name__
print(className + ' requires a frequency')
return newFunction
def qCouplingInitErrors(init):
def newFunction(obj, *args, **kwargs):
init(obj, *args, **kwargs)
if obj.couplingOperators is None: # pylint: disable=protected-access
className = obj.__class__.__name__
print(className + ' requires a coupling functions')
elif obj.coupledSystems is None: # pylint: disable=protected-access
className = obj.__class__.__name__
print(className + ' requires a coupling systems')
#for ind in range(len(obj._qCoupling__qSys)):
# if len(obj._qCoupling__cFncs) != len(obj._qCoupling__qSys):
# className = obj.__class__.__name__
# print(className + ' requires same number of systems as coupling functions')
return newFunction
def sweepInitError(init):
def newFunction(obj, **kwargs):
init(obj, **kwargs)
if obj.sweepList is None:
className = obj.__class__.__name__
print(className + ' requires either a list or relevant info, here are givens'
+ '\n' + # noqa: W503, W504
'sweepList: ', obj.sweepList, '\n' + # noqa: W504
'sweepMax: ', obj.sweepMax, '\n' + # noqa: W504
'sweepMin: ', obj.sweepMin, '\n' + # noqa: W504
'sweepPert: ', obj.sweepPert, '\n' + # noqa: W504
'logSweep: ', obj.logSweep)
return newFunction
| 2.0625 | 2 |
utility/extractor_batch.py | BA-HanseML/NF_Prj_MIMII_Dataset | 10 | 2743 | print('load extractor_batch')
# Utility to run multiple feature extraction
# diagrams over many files with multiple threats
import pandas as pd
import os
import sys
import glob
from tqdm.auto import tqdm
from queue import Queue
from threading import Thread
from datetime import datetime
import time
import logging
# thread class
class ExtractorDiagramThread(Thread):
def __init__(self, queue,extdia ,wn):
Thread.__init__(self)
self.queue = queue
self.wn = wn
self.extdia = extdia
self.stop = False
def run(self):
while not self.stop:
# Get the work from the queue and expand the tuple
file_path, target_class = self.queue.get()
# execute diagaram
self.extdia.execute_diagram(file_path,target_class)
self.queue.task_done()
def IfStrReturnList(s):
if type(s) == str:
return [s]
else:
return s
def time_stemp_str():
now = datetime.now()
return (now.strftime("%Y-%m-%d %H:%M:%S"))
class LoggerWrap():
def __init__(self):
self.logger = logging.getLogger('feature_extraction_batch')
if (self.logger.hasHandlers()):
self.logger.handlers.clear()
self.logger.setLevel(logging.DEBUG)
# create file handler which logs even debug messages
self.fh = logging.FileHandler('feature_extraction_batch.log')
self.fh.setLevel(logging.DEBUG)
self.logger.addHandler(self.fh)
def close(self):
print('close log file')
#print(self.fh)
self.fh.close()
logging.shutdown()
def log(self,s):
m = time_stemp_str() + ': ' + s
self.logger.info(m)
print(m)
def get_file_list(machine, snr, id, target_class_map,
FileCountLimit,
datset_folder_from_base,
base_folder):
flist = []
tlsit = []
tn = {}
fn = {}
for tc in target_class_map:
fn[tc] = sorted( \
glob.glob( \
os.path.abspath( "{base}/{SNR}/{machine}/id_{ID}/{n}/*.{ext}".format(
base=base_folder+datset_folder_from_base,
SNR=snr,
machine=machine,ID=id,
n=tc,
ext='wav' ))))
if FileCountLimit:
if FileCountLimit < len(fn[tc]):
fn[tc] = fn[tc][:FileCountLimit]
tn[tc] = np.ones(len(fn[tc]), dtype='int')*target_class_map[tc]
for tc in target_class_map:
flist+= fn[tc]
tlsit+=(list((tn[tc])))
return flist, tlsit
def multithreadpolltracker(queue, total):
last = total
done_l = 0
pbar = tqdm(total=total)
while not queue.empty():
time.sleep(0.05)
if last > queue.qsize():
done = total-int(queue.qsize())
#print(done, end ="--")
pbar.update(done-done_l)
done_l = done
last = queue.qsize()
queue.join()
done = total
pbar.update(done)
# Main Function
def extractor_batch(base_folder, target_folder, extdia,
FileFindDict = {'SNR': '6dB',
'machine': 'pump',
'ID': ['00']},
n_jobs = 1,
target_class_map = {'abnormal':1, 'normal': 0},
FileCountLimit = None,
datset_folder_from_base = 'dataset',
augment = False, # create one augmentation for a given target class i.e. 'normal'
DeviceType = 0, # 0 continuses or 1 sporatic
fHP = None, # simple FIR HP to cut of very low freq to not overload MEL
main_channel = 0): # assuming a DOA was able to get mein direction (pseudo DOA ...)
lw = LoggerWrap()
base_folder_full = os.path.abspath(base_folder)
target_folder_full = os.path.abspath(base_folder+target_folder)
os.makedirs(target_folder_full, exist_ok=True)
lw.log('Target folder will be: ' + target_folder_full)
lw.log('Extractor diagram is fof type: ' + str(extdia))
for m in IfStrReturnList(FileFindDict['machine']):
for snr in IfStrReturnList(FileFindDict['SNR']):
for id in IfStrReturnList(FileFindDict['ID']):
lw.log('-'*44 )
lw.log('Working on machinepart:' + m + ' SNR:' + snr + ' ID:' + id )
ts = time.time()
# create file list for ID batch
filelist, targetlist = get_file_list(m, snr, id,
target_class_map,
FileCountLimit,
datset_folder_from_base,
base_folder)
lw.log('Files to process: ' + str(len(filelist)) )
# start processing
if n_jobs == 1: # in the notebook
ed = extdia(base_folder,0,main_channel,augment,DeviceType,fHP)
pbar= tqdm(total = len(filelist))
for f,tc in (zip(filelist, targetlist)):
ed.execute_diagram(f,tc)
pbar.update()
outport_akkulist_tofile(base_folder,target_folder,ed,m,snr,id)
lw.log('list for the id pickled' )
else: # to threads
# create the threads and akku diagram
edl = []
wl = []
queue = Queue()
for w in range(n_jobs):
edl.append(extdia(base_folder,w,main_channel,augment,DeviceType,fHP))
worker = ExtractorDiagramThread(queue,edl[w],w)
worker.daemon = True
worker.start()
wl.append(worker)
# fill the Queue
lw.log('multithread mode filling the queue' )
for f,tc in (zip(filelist, targetlist)):
queue.put((f, tc))
multithreadpolltracker(queue, len(filelist))
for w in wl:
w.stop = True
lw.log('multithread mode all threads done' )
joinlist = outport_akkulist_join(exdia_list=edl)
outport_akkulist_tofile(base_folder, target_folder, joinlist, m, snr, id)
lw.log('multithread mode list joined and pickled for the id' )
del edl # trying to fiht the memory leak
del joinlist
tneeded_sec = np.round(time.time()- ts,2)
tneeded_min = np.round(tneeded_sec/60,2)
lw.log('total time needed for the ID: ' + str(tneeded_sec) + 'sec' + ' = ' + str(tneeded_min) + 'min')
lw.close() | 2.015625 | 2 |
tortoise/query_utils.py | DDevine/tortoise-orm | 1 | 2751 | from copy import copy
from typing import TYPE_CHECKING, Any, Dict, List, Optional, Tuple, Type, cast
from pypika import Table
from pypika.terms import Criterion
from tortoise.exceptions import FieldError, OperationalError
from tortoise.fields.relational import BackwardFKRelation, ManyToManyFieldInstance, RelationalField
if TYPE_CHECKING: # pragma: nocoverage
from tortoise.models import Model
from tortoise.queryset import QuerySet
def _process_filter_kwarg(
model: "Type[Model]", key: str, value: Any, table: Table
) -> Tuple[Criterion, Optional[Tuple[Table, Criterion]]]:
join = None
if value is None and f"{key}__isnull" in model._meta.filters:
param = model._meta.get_filter(f"{key}__isnull")
value = True
else:
param = model._meta.get_filter(key)
pk_db_field = model._meta.db_pk_column
if param.get("table"):
join = (
param["table"],
table[pk_db_field] == param["table"][param["backward_key"]],
)
if param.get("value_encoder"):
value = param["value_encoder"](value, model)
criterion = param["operator"](param["table"][param["field"]], value)
else:
field_object = model._meta.fields_map[param["field"]]
encoded_value = (
param["value_encoder"](value, model, field_object)
if param.get("value_encoder")
else model._meta.db.executor_class._field_to_db(field_object, value, model)
)
criterion = param["operator"](table[param["source_field"]], encoded_value)
return criterion, join
def _get_joins_for_related_field(
table: Table, related_field: RelationalField, related_field_name: str
) -> List[Tuple[Table, Criterion]]:
required_joins = []
related_table: Table = related_field.related_model._meta.basetable
if isinstance(related_field, ManyToManyFieldInstance):
through_table = Table(related_field.through)
required_joins.append(
(
through_table,
table[related_field.model._meta.db_pk_column]
== through_table[related_field.backward_key],
)
)
required_joins.append(
(
related_table,
through_table[related_field.forward_key]
== related_table[related_field.related_model._meta.db_pk_column],
)
)
elif isinstance(related_field, BackwardFKRelation):
to_field_source_field = (
related_field.to_field_instance.source_field
or related_field.to_field_instance.model_field_name
)
if table == related_table:
related_table = related_table.as_(f"{table.get_table_name()}__{related_field_name}")
required_joins.append(
(
related_table,
table[to_field_source_field] == related_table[related_field.relation_source_field],
)
)
else:
to_field_source_field = (
related_field.to_field_instance.source_field
or related_field.to_field_instance.model_field_name
)
from_field = related_field.model._meta.fields_map[related_field.source_field] # type: ignore
from_field_source_field = from_field.source_field or from_field.model_field_name
related_table = related_table.as_(f"{table.get_table_name()}__{related_field_name}")
required_joins.append(
(related_table, related_table[to_field_source_field] == table[from_field_source_field],)
)
return required_joins
class EmptyCriterion(Criterion): # type: ignore
def __or__(self, other: Criterion) -> Criterion:
return other
def __and__(self, other: Criterion) -> Criterion:
return other
def __bool__(self) -> bool:
return False
def _and(left: Criterion, right: Criterion) -> Criterion:
if left and not right:
return left
return left & right
def _or(left: Criterion, right: Criterion) -> Criterion:
if left and not right:
return left
return left | right
class QueryModifier:
"""
Internal structure used to generate SQL Queries.
"""
def __init__(
self,
where_criterion: Optional[Criterion] = None,
joins: Optional[List[Tuple[Table, Criterion]]] = None,
having_criterion: Optional[Criterion] = None,
) -> None:
self.where_criterion: Criterion = where_criterion or EmptyCriterion()
self.joins = joins if joins else []
self.having_criterion: Criterion = having_criterion or EmptyCriterion()
def __and__(self, other: "QueryModifier") -> "QueryModifier":
return QueryModifier(
where_criterion=_and(self.where_criterion, other.where_criterion),
joins=self.joins + other.joins,
having_criterion=_and(self.having_criterion, other.having_criterion),
)
def __or__(self, other: "QueryModifier") -> "QueryModifier":
if self.having_criterion or other.having_criterion:
# TODO: This could be optimized?
result_having_criterion = _or(
_and(self.where_criterion, self.having_criterion),
_and(other.where_criterion, other.having_criterion),
)
return QueryModifier(
joins=self.joins + other.joins, having_criterion=result_having_criterion
)
if self.where_criterion and other.where_criterion:
return QueryModifier(
where_criterion=self.where_criterion | other.where_criterion,
joins=self.joins + other.joins,
)
return QueryModifier(
where_criterion=self.where_criterion or other.where_criterion,
joins=self.joins + other.joins,
)
def __invert__(self) -> "QueryModifier":
if not self.where_criterion and not self.having_criterion:
return QueryModifier(joins=self.joins)
if self.having_criterion:
# TODO: This could be optimized?
return QueryModifier(
joins=self.joins,
having_criterion=_and(self.where_criterion, self.having_criterion).negate(),
)
return QueryModifier(where_criterion=self.where_criterion.negate(), joins=self.joins)
def get_query_modifiers(self) -> Tuple[Criterion, List[Tuple[Table, Criterion]], Criterion]:
"""
Returns a tuple of the query criterion.
"""
return self.where_criterion, self.joins, self.having_criterion
class Q:
"""
Q Expression container.
Q Expressions are a useful tool to compose a query from many small parts.
:param join_type: Is the join an AND or OR join type?
:param args: Inner ``Q`` expressions that you want to wrap.
:param kwargs: Filter statements that this Q object should encapsulate.
"""
__slots__ = (
"children",
"filters",
"join_type",
"_is_negated",
"_annotations",
"_custom_filters",
)
AND = "AND"
OR = "OR"
def __init__(self, *args: "Q", join_type: str = AND, **kwargs: Any) -> None:
if args and kwargs:
newarg = Q(join_type=join_type, **kwargs)
args = (newarg,) + args
kwargs = {}
if not all(isinstance(node, Q) for node in args):
raise OperationalError("All ordered arguments must be Q nodes")
#: Contains the sub-Q's that this Q is made up of
self.children: Tuple[Q, ...] = args
#: Contains the filters applied to this Q
self.filters: Dict[str, Any] = kwargs
if join_type not in {self.AND, self.OR}:
raise OperationalError("join_type must be AND or OR")
#: Specifies if this Q does an AND or OR on its children
self.join_type = join_type
self._is_negated = False
self._annotations: Dict[str, Any] = {}
self._custom_filters: Dict[str, Dict[str, Any]] = {}
def __and__(self, other: "Q") -> "Q":
"""
Returns a binary AND of Q objects, use ``AND`` operator.
:raises OperationalError: AND operation requires a Q node
"""
if not isinstance(other, Q):
raise OperationalError("AND operation requires a Q node")
return Q(self, other, join_type=self.AND)
def __or__(self, other: "Q") -> "Q":
"""
Returns a binary OR of Q objects, use ``OR`` operator.
:raises OperationalError: OR operation requires a Q node
"""
if not isinstance(other, Q):
raise OperationalError("OR operation requires a Q node")
return Q(self, other, join_type=self.OR)
def __invert__(self) -> "Q":
"""
Returns a negated instance of the Q object, use ``~`` operator.
"""
q = Q(*self.children, join_type=self.join_type, **self.filters)
q.negate()
return q
def negate(self) -> None:
"""
Negates the curent Q object. (mutation)
"""
self._is_negated = not self._is_negated
def _resolve_nested_filter(
self, model: "Type[Model]", key: str, value: Any, table: Table
) -> QueryModifier:
related_field_name = key.split("__")[0]
related_field = cast(RelationalField, model._meta.fields_map[related_field_name])
required_joins = _get_joins_for_related_field(table, related_field, related_field_name)
modifier = Q(**{"__".join(key.split("__")[1:]): value}).resolve(
model=related_field.related_model,
annotations=self._annotations,
custom_filters=self._custom_filters,
table=required_joins[-1][0],
)
return QueryModifier(joins=required_joins) & modifier
def _resolve_custom_kwarg(
self, model: "Type[Model]", key: str, value: Any, table: Table
) -> QueryModifier:
having_info = self._custom_filters[key]
annotation = self._annotations[having_info["field"]]
annotation_info = annotation.resolve(model, table)
operator = having_info["operator"]
overridden_operator = model._meta.db.executor_class.get_overridden_filter_func(
filter_func=operator
)
if overridden_operator:
operator = overridden_operator
if annotation_info["field"].is_aggregate:
modifier = QueryModifier(having_criterion=operator(annotation_info["field"], value))
else:
modifier = QueryModifier(where_criterion=operator(annotation_info["field"], value))
return modifier
def _resolve_regular_kwarg(
self, model: "Type[Model]", key: str, value: Any, table: Table
) -> QueryModifier:
if key not in model._meta.filters and key.split("__")[0] in model._meta.fetch_fields:
modifier = self._resolve_nested_filter(model, key, value, table)
else:
criterion, join = _process_filter_kwarg(model, key, value, table)
joins = [join] if join else []
modifier = QueryModifier(where_criterion=criterion, joins=joins)
return modifier
def _get_actual_filter_params(
self, model: "Type[Model]", key: str, value: Table
) -> Tuple[str, Any]:
filter_key = key
if key in model._meta.fk_fields or key in model._meta.o2o_fields:
field_object = model._meta.fields_map[key]
if hasattr(value, "pk"):
filter_value = value.pk
else:
filter_value = value
filter_key = cast(str, field_object.source_field)
elif key in model._meta.m2m_fields:
if hasattr(value, "pk"):
filter_value = value.pk
else:
filter_value = value
elif (
key.split("__")[0] in model._meta.fetch_fields
or key in self._custom_filters
or key in model._meta.filters
):
filter_value = value
else:
allowed = sorted(
model._meta.fields | model._meta.fetch_fields | set(self._custom_filters)
)
raise FieldError(f"Unknown filter param '{key}'. Allowed base values are {allowed}")
return filter_key, filter_value
def _resolve_kwargs(self, model: "Type[Model]", table: Table) -> QueryModifier:
modifier = QueryModifier()
for raw_key, raw_value in self.filters.items():
key, value = self._get_actual_filter_params(model, raw_key, raw_value)
if key in self._custom_filters:
filter_modifier = self._resolve_custom_kwarg(model, key, value, table)
else:
filter_modifier = self._resolve_regular_kwarg(model, key, value, table)
if self.join_type == self.AND:
modifier &= filter_modifier
else:
modifier |= filter_modifier
if self._is_negated:
modifier = ~modifier
return modifier
def _resolve_children(self, model: "Type[Model]", table: Table) -> QueryModifier:
modifier = QueryModifier()
for node in self.children:
node_modifier = node.resolve(model, self._annotations, self._custom_filters, table)
if self.join_type == self.AND:
modifier &= node_modifier
else:
modifier |= node_modifier
if self._is_negated:
modifier = ~modifier
return modifier
def resolve(
self,
model: "Type[Model]",
annotations: Dict[str, Any],
custom_filters: Dict[str, Dict[str, Any]],
table: Table,
) -> QueryModifier:
"""
Resolves the logical Q chain into the parts of a SQL statement.
:param model: The Model this Q Expression should be resolved on.
:param annotations: Extra annotations one wants to inject into the resultset.
:param custom_filters: Pre-resolved filters to be passed though.
:param table: ``pypika.Table`` to keep track of the virtual SQL table
(to allow self referential joins)
"""
self._annotations = annotations
self._custom_filters = custom_filters
if self.filters:
return self._resolve_kwargs(model, table)
return self._resolve_children(model, table)
class Prefetch:
"""
Prefetcher container. One would directly use this when wanting to attach a custom QuerySet
for specialised prefetching.
:param relation: Related field name.
:param queryset: Custom QuerySet to use for prefetching.
"""
__slots__ = ("relation", "queryset")
def __init__(self, relation: str, queryset: "QuerySet") -> None:
self.relation = relation
self.queryset = queryset
self.queryset.query = copy(self.queryset.model._meta.basequery)
def resolve_for_queryset(self, queryset: "QuerySet") -> None:
"""
Called internally to generate prefetching query.
:param queryset: Custom QuerySet to use for prefetching.
:raises OperationalError: If field does not exist in model.
"""
relation_split = self.relation.split("__")
first_level_field = relation_split[0]
if first_level_field not in queryset.model._meta.fetch_fields:
raise OperationalError(
f"relation {first_level_field} for {queryset.model._meta.db_table} not found"
)
forwarded_prefetch = "__".join(relation_split[1:])
if forwarded_prefetch:
if first_level_field not in queryset._prefetch_map.keys():
queryset._prefetch_map[first_level_field] = set()
queryset._prefetch_map[first_level_field].add(
Prefetch(forwarded_prefetch, self.queryset)
)
else:
queryset._prefetch_queries[first_level_field] = self.queryset
| 1.46875 | 1 |
2_b_builtins_dynamic_recall.py | traff/python_completion_benchmark | 1 | 2759 | import builtins
builtins.foo = 'bar'
foo # foo | -0.155273 | 0 |
tpv/modals/sugerencias.py | vallemrv/tpvB3 | 3 | 2767 | # @Author: <NAME> <valle>
# @Date: 10-May-2017
# @Email: <EMAIL>
# @Last modified by: valle
# @Last modified time: 23-Feb-2018
# @License: Apache license vesion 2.0
from kivy.uix.modalview import ModalView
from kivy.uix.button import Button
from kivy.properties import ObjectProperty, StringProperty, ListProperty
from kivy.lang import Builder
Builder.load_file("view/sugerencias.kv")
class Sugerencias(ModalView):
onExit = ObjectProperty(None, allownone=True)
content = ObjectProperty(None, allownone=True)
texto = StringProperty("")
des = StringProperty("")
sug = ListProperty([])
key = StringProperty("")
tag = ObjectProperty(None, allownone=True)
def __init__(self, **kargs):
super(Sugerencias, self).__init__(**kargs)
self.auto_dismiss=False
def on_sug(self, key, value):
self.lista.rm_all_widgets()
for item in self.sug:
btn = Button(text=item)
btn.tag = item
btn.bind(on_press=self.onPress)
self.lista.add_linea(btn)
def onPress(self, b):
self.onExit(self.key, self.content, b.tag, self.tag)
def clear_text(self):
self.texto = ""
def exit(self):
self.texto = self.txtSug.text
if self.onExit:
if self.texto != "":
self.sug.append(self.texto)
self.onExit(self.key, self.content, self.texto, self.tag)
| 1.8125 | 2 |
library/kong_api.py | sebastienc/ansible-kong-module | 34 | 2831 | #!/usr/bin/python
DOCUMENTATION = '''
---
module: kong
short_description: Configure a Kong API Gateway
'''
EXAMPLES = '''
- name: Register a site
kong:
kong_admin_uri: http://127.0.0.1:8001/apis/
name: "Mockbin"
taget_url: "http://mockbin.com"
request_host: "mockbin.com"
state: present
- name: Delete a site
kong:
kong_admin_uri: http://127.0.0.1:8001/apis/
name: "Mockbin"
state: absent
'''
import json, requests, os
class KongAPI:
def __init__(self, base_url, auth_username=None, auth_password=<PASSWORD>):
self.base_url = base_url
if auth_username is not None and auth_password is not None:
self.auth = (auth_username, auth_password)
else:
self.auth = None
def __url(self, path):
return "{}{}" . format (self.base_url, path)
def _api_exists(self, name, api_list):
for api in api_list:
if name == api.get("name", None):
return True
return False
def add_or_update(self, name, upstream_url, request_host=None, request_path=None, strip_request_path=False, preserve_host=False):
method = "post"
url = self.__url("/apis/")
api_list = self.list().json().get("data", [])
api_exists = self._api_exists(name, api_list)
if api_exists:
method = "patch"
url = "{}{}" . format (url, name)
data = {
"name": name,
"upstream_url": upstream_url,
"strip_request_path": strip_request_path,
"preserve_host": preserve_host
}
if request_host is not None:
data['request_host'] = request_host
if request_path is not None:
data['request_path'] = request_path
return getattr(requests, method)(url, data, auth=self.auth)
def list(self):
url = self.__url("/apis")
return requests.get(url, auth=self.auth)
def info(self, id):
url = self.__url("/apis/{}" . format (id))
return requests.get(url, auth=self.auth)
def delete_by_name(self, name):
info = self.info(name)
id = info.json().get("id")
return self.delete(id)
def delete(self, id):
path = "/apis/{}" . format (id)
url = self.__url(path)
return requests.delete(url, auth=self.auth)
class ModuleHelper:
def __init__(self, fields):
self.fields = fields
def get_module(self):
args = dict(
kong_admin_uri = dict(required=False, type='str'),
kong_admin_username = dict(required=False, type='str'),
kong_admin_password = dict(required=False, type='str'),
name = dict(required=False, type='str'),
upstream_url = dict(required=False, type='str'),
request_host = dict(required=False, type='str'),
request_path = dict(required=False, type='str'),
strip_request_path = dict(required=False, default=False, type='bool'),
preserve_host = dict(required=False, default=False, type='bool'),
state = dict(required=False, default="present", choices=['present', 'absent', 'latest', 'list', 'info'], type='str'),
)
return AnsibleModule(argument_spec=args,supports_check_mode=False)
def prepare_inputs(self, module):
url = module.params['kong_admin_uri']
auth_user = module.params['kong_admin_username']
auth_password = <PASSWORD>.params['<PASSWORD>']
state = module.params['state']
data = {}
for field in self.fields:
value = module.params.get(field, None)
if value is not None:
data[field] = value
return (url, data, state, auth_user, auth_password)
def get_response(self, response, state):
if state == "present":
meta = response.json()
has_changed = response.status_code in [201, 200]
if state == "absent":
meta = {}
has_changed = response.status_code == 204
if state == "list":
meta = response.json()
has_changed = False
return (has_changed, meta)
def main():
fields = [
'name',
'upstream_url',
'request_host',
'request_path',
'strip_request_path',
'preserve_host'
]
helper = ModuleHelper(fields)
global module # might not need this
module = helper.get_module()
base_url, data, state, auth_user, auth_password = helper.prepare_inputs(module)
api = KongAPI(base_url, auth_user, auth_password)
if state == "present":
response = api.add_or_update(**data)
if state == "absent":
response = api.delete_by_name(data.get("name"))
if state == "list":
response = api.list()
if response.status_code == 401:
module.fail_json(msg="Please specify kong_admin_username and kong_admin_password", meta=response.json())
elif response.status_code == 403:
module.fail_json(msg="Please check kong_admin_username and kong_admin_password", meta=response.json())
else:
has_changed, meta = helper.get_response(response, state)
module.exit_json(changed=has_changed, meta=meta)
from ansible.module_utils.basic import *
from ansible.module_utils.urls import *
if __name__ == '__main__':
main()
| 1.703125 | 2 |
fuzzybee/joboard/views.py | youtaya/knight | 0 | 2847 | # -*- coding: utf-8 -*-
from django.shortcuts import get_object_or_404, render_to_response, render
from django.http import HttpResponseRedirect, HttpResponse
from django.core.urlresolvers import reverse
from django.shortcuts import redirect
from joboard.models import Factory
from joboard.forms import FactoryForm
from django.template import RequestContext
from django.core.exceptions import ObjectDoesNotExist
from urllib import urlopen, urlencode
import urllib2
from fuzzybee.conf import b_url, b_ak, geo_table, l_url, app_id, app_key
from utils.pack_json import toJSON, fromJSON
from django.contrib.auth.decorators import login_required
from people.models import People
import logging
logger = logging.getLogger(__name__)
@login_required
def index(request):
form = None
if request.method == 'POST':
form = FactoryForm(request.POST)
print form
if form.is_valid():
factory = form.cleaned_data
logger.debug("lat: " + str(factory['fact_lat']))
logger.debug("addr: " + factory['fact_addr'])
#save factory in model
factmodel = form.save(commit=False)
print request.user
factmodel.fact_maintainer = People.objects.get(user=request.user)
factmodel.save()
factid = factmodel.id
#save in public server: leancloud and baidu
save_factory_cloud(factory, factid)
return HttpResponseRedirect(reverse('board:detail', args=(factid,)))
else:
form = FactoryForm()
return render_to_response('board/new.html', {'form': form}, context_instance=RequestContext(request))
@login_required
def detail(request, fact_id):
print fact_id
info = get_object_or_404(Factory, pk=fact_id)
return render(request, 'board/detail.html', {'info':info})
@login_required
def manager(request):
print "manager..."
try:
people = People.objects.get(user=request.user)
factory = Factory.objects.get(fact_maintainer=people)
except ObjectDoesNotExist:
print 'no hire action...'
return redirect(reverse('joboard.views.index', args=[]))
return render(request, 'board/manager.html', {'info':factory})
def save_factory_cloud(fact_info, fact_id):
title = fact_info['fact_name']
address = fact_info['fact_addr']
lat = fact_info['fact_lat']
lng = fact_info['fact_lng']
num = fact_info['hire_num']
data = {
'title': title.encode("utf-8"),
'address': address.encode("utf-8"),
'latitude': lat,
'longitude': lng,
'job_num': num,
'factory_id': fact_id,
}
head = {
'X-AVOSCloud-Application-Id': app_id,
'X-AVOSCloud-Application-Key': app_key,
'Content-Type': 'application/json',
}
req = urllib2.Request(l_url, toJSON(data), head)
print str(req)
response = urllib2.urlopen(req)
#print respone.read()
lean_response = fromJSON(response.read())
print lean_response
lean_objectId = lean_response['objectId']
# save in Baidu Map
params = urlencode({
'title': title.encode("utf-8"),
'address': address.encode("utf-8"),
'latitude': lat,
'longitude': lng,
'coord_type': 3,
'geotable_id': geo_table,
'ak': b_ak,
'job_num': num,
'lean_id': lean_objectId,
})
req = urllib2.Request(b_url, params)
#print str(req)
response = urllib2.urlopen(req)
#print respone.read()
| 1.273438 | 1 |
tools/telemetry/telemetry/core/platform/android_device_unittest.py | kjthegod/chromium | 1 | 2879 | # Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import unittest
from telemetry import benchmark
from telemetry.core import browser_options
from telemetry.core.platform import android_device
from telemetry.core.platform import android_platform_backend
from telemetry.unittest_util import system_stub
class AndroidDeviceTest(unittest.TestCase):
def setUp(self):
self._android_device_stub = system_stub.Override(
android_device, ['adb_commands'])
def testGetAllAttachedAndroidDevices(self):
self._android_device_stub.adb_commands.attached_devices = [
'01', '02']
self.assertEquals(
set(['01', '02']),
set(device.device_id for device in
android_device.AndroidDevice.GetAllConnectedDevices()
))
def tearDown(self):
self._android_device_stub.Restore()
class GetDeviceTest(unittest.TestCase):
def setUp(self):
self._android_device_stub = system_stub.Override(
android_device, ['adb_commands', 'os', 'subprocess', 'logging'])
self._apb_stub = system_stub.Override(
android_platform_backend, ['adb_commands'])
def tearDown(self):
self._android_device_stub.Restore()
self._apb_stub.Restore()
def testNoAdbReturnsNone(self):
finder_options = browser_options.BrowserFinderOptions()
def NoAdb(*_, **__):
raise OSError('not found')
self._android_device_stub.subprocess.Popen = NoAdb
self.assertEquals([], self._android_device_stub.logging.warnings)
self.assertIsNone(android_device.GetDevice(finder_options))
def testAdbNoDevicesReturnsNone(self):
finder_options = browser_options.BrowserFinderOptions()
self.assertEquals([], self._android_device_stub.logging.warnings)
self.assertIsNone(android_device.GetDevice(finder_options))
def testAdbPermissionsErrorReturnsNone(self):
finder_options = browser_options.BrowserFinderOptions()
self._android_device_stub.subprocess.Popen.communicate_result = (
'List of devices attached\n????????????\tno permissions\n',
'* daemon not running. starting it now on port 5037 *\n'
'* daemon started successfully *\n')
device = android_device.GetDevice(finder_options)
self.assertEquals([
'adb devices gave a permissions error. Consider running adb as root:',
' adb kill-server',
' sudo `which adb` devices\n\n'],
self._android_device_stub.logging.warnings)
self.assertIsNone(device)
def testAdbTwoDevicesReturnsNone(self):
finder_options = browser_options.BrowserFinderOptions()
self._android_device_stub.adb_commands.attached_devices = [
'015d14fec128220c', '015d14fec128220d']
device = android_device.GetDevice(finder_options)
self.assertEquals([
'Multiple devices attached. Please specify one of the following:\n'
' --device=015d14fec128220c\n'
' --device=015d14fec128220d'],
self._android_device_stub.logging.warnings)
self.assertIsNone(device)
def testAdbPickOneDeviceReturnsDeviceInstance(self):
finder_options = browser_options.BrowserFinderOptions()
finder_options.android_device = '555d14fecddddddd' # pick one
self._android_device_stub.adb_commands.attached_devices = [
'015d14fec128220c', '555d14fecddddddd']
device = android_device.GetDevice(finder_options)
self.assertEquals([], self._android_device_stub.logging.warnings)
self.assertEquals('555d14fecddddddd', device.device_id)
def testAdbOneDeviceReturnsDeviceInstance(self):
finder_options = browser_options.BrowserFinderOptions()
self._android_device_stub.adb_commands.attached_devices = (
['015d14fec128220c'])
device = android_device.GetDevice(finder_options)
self.assertEquals([], self._android_device_stub.logging.warnings)
self.assertEquals('015d14fec128220c', device.device_id)
| 1.382813 | 1 |
projects/api/UsersApi.py | chamathshashika/projects-python-wrappers | 0 | 2895 | #$Id$
from projects.util.ZohoHttpClient import ZohoHttpClient
from projects.api.Api import Api
from projects.parser.UsersParser import UsersParser
base_url = Api().base_url
zoho_http_client = ZohoHttpClient()
parser = UsersParser()
class UsersApi:
"""Users Api class is used to
1.Get all the users in the given project.
"""
def __init__(self, authtoken, portal_id):
"""Initialize Users api using user's authtoken and portal id.
Args:
authtoken(str): User's authtoken.
portal_id(str): User's portal id.
"""
self.details = {
'authtoken': authtoken
}
self.portal_id = portal_id
def get_users(self, project_id):
"""Get all the users in the given project.
Args:
project_id(long): Project id.
Returns:
list of instance: List of users object.
"""
url = base_url + 'portal/' + str(self.portal_id) + '/projects/' + str(project_id) + '/users/'
response = zoho_http_client.get(url, self.details)
return parser.get_users(response)
| 1.546875 | 2 |
recumpiler/__init__.py | Toasterstein/recumpiler | 0 | 2903 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""recumpiler
Recompile text to be semi-readable memey garbage.
"""
__version__ = (0, 0, 0)
| 0.466797 | 0 |
char_map.py | rakib313/Bangla-End2End-Speech-Recognition | 0 | 2911 | """
Defines two dictionaries for converting
between text and integer sequences.
"""
char_map_str = """
' 0
<SPACE> 1
ব 2
া 3
ং 4
ল 5
দ 6
ে 7
শ 8
য 9
় 10
ি 11
ত 12
্ 13
ন 14
এ 15
ধ 16
র 17
ণ 18
ক 19
ড 20
হ 21
উ 22
প 23
জ 24
অ 25
থ 26
স 27
ষ 28
ই 29
আ 30
ছ 31
গ 32
ু 33
ো 34
ও 35
ভ 36
ী 37
ট 38
ূ 39
ম 40
ৈ 41
ৃ 42
ঙ 43
খ 44
ঃ 45
১ 46
৯ 47
৬ 48
০ 49
২ 50
চ 51
ঘ 52
ৎ 53
৫ 54
৪ 55
ফ 56
ৌ 57
৮ 58
ঁ 59
য় 60
৩ 61
ঢ 62
ঠ 63
৭ 64
ড় 65
ঝ 66
ঞ 67
ঔ 68
ঈ 69
v 70
b 71
s 72
ঐ 73
2 74
0 75
1 76
4 77
f 78
o 79
t 80
a 81
l 82
w 83
r 84
d 85
c 86
u 87
p 88
n 89
g 90
ঋ 91
i 92
z 93
m 94
e 95
ঊ 96
h 97
x 98
3 99
5 100
y 101
9 102
ৗ 103
j 104
œ 105
8 106
ঢ় 107
k 108
ৰ 109
"""
# the "blank" character is mapped to 28
char_map = {}
index_map = {}
for line in char_map_str.strip().split('\n'):
ch, index = line.split()
char_map[ch] = int(index)
index_map[int(index)+1] = ch
index_map[2] = ' ' | 1.992188 | 2 |
4day/Book04_1.py | jsjang93/joony | 0 | 2935 | # Book04_1.py
class Book:
category = '소설' # Class 멤버
b1 = Book(); print(b1.category)
b2 = b1; print(b2.category)
print(Book.category)
Book.category = '수필'
print(b2.category); print(b1.category) ; print(Book.category)
b2.category = 'IT'
print(b2.category); print(b1.category) ; print(Book.category) | 1.164063 | 1 |
torch_geometric/nn/unpool/__init__.py | mwussow/pytorch_geometric | 13 | 2967 | from .knn_interpolate import knn_interpolate
__all__ = [
'knn_interpolate',
]
| 0.131836 | 0 |
cli.py | abel-bernabeu/facecompressor | 2 | 2975 | import argparse
import autoencoder
def addTrainablesArg(parser):
parser.add_argument('--model', dest='model', help='Trained model', default='model.pt')
def addExchangeArg(parser):
parser.add_argument('--exchange', dest='exchange', help='File with exchanged data', required=True)
parser = argparse.ArgumentParser()
subparsers = parser.add_subparsers(dest="action")
encode_parser = subparsers.add_parser('encode')
addTrainablesArg(encode_parser)
encode_parser.add_argument('--input', dest='input', help='Input image file name', required=True)
addExchangeArg(encode_parser)
decode_parser = subparsers.add_parser('decode')
addTrainablesArg(decode_parser)
addExchangeArg(decode_parser)
decode_parser.add_argument('--output', dest='output', help='Output image file name', required=True)
opts = parser.parse_args()
if opts.action == 'encode':
autoencoder.encode(opts.model, opts.input, opts.exchange)
elif opts.action == 'decode':
autoencoder.decode(opts.model, opts.exchange, opts.output)
| 1.78125 | 2 |
demos/iaf_pop_demo.py | bionet/ted.python | 4 | 2983 | #!/usr/bin/env python
"""
Demos of encoding and decoding algorithms using populations of
IAF neurons.
"""
# Copyright (c) 2009-2015, <NAME>
# All rights reserved.
# Distributed under the terms of the BSD license:
# http://www.opensource.org/licenses/bsd-license
import sys
import numpy as np
# Set matplotlib backend so that plots can be generated without a
# display:
import matplotlib
matplotlib.use('AGG')
from bionet.utils.misc import func_timer
import bionet.utils.band_limited as bl
import bionet.utils.plotting as pl
import bionet.ted.iaf as iaf
# For determining output plot file names:
output_name = 'iaf_pop_demo_'
output_count = 0
output_ext = '.png'
# Define algorithm parameters and input signal:
dur = 0.1
dt = 1e-6
f = 32
bw = 2*np.pi*f
t = np.arange(0, dur, dt)
np.random.seed(0)
noise_power = None
if noise_power == None:
fig_title = 'IAF Input Signal with No Noise'
else:
fig_title = 'IAF Input Signal with %d dB of Noise' % noise_power
print fig_title
u = func_timer(bl.gen_band_limited)(dur, dt, f, noise_power)
pl.plot_signal(t, u, fig_title,
output_name + str(output_count) + output_ext)
# Test leaky IAF algorithms:
b1 = 3.5 # bias
d1 = 0.7 # threshold
R1 = 10.0 # resistance
C1 = 0.01 # capacitance
try:
iaf.iaf_recoverable(u, bw, b1, d1, R1, C1)
except ValueError('reconstruction condition not satisfied'):
sys.exit()
b2 = 3.4 # bias
d2 = 0.8 # threshold
R2 = 9.0 # resistance
C2 = 0.01 # capacitance
try:
iaf.iaf_recoverable(u, bw, b2, d2, R2, C2)
except ValueError('reconstruction condition not satisfied'):
sys.exit()
b_list = np.array([b1, b2])
d_list = np.array([d1, d2])
R_list = np.array([R1, R2])
C_list = np.array([C1, C2])
output_count += 1
fig_title = 'Signal Encoded Using Leaky IAF Encoder'
print fig_title
s_list = func_timer(iaf.iaf_encode_pop)([u, u], dt, b_list, d_list, R_list, C_list)
pl.plot_encoded(t, u, s_list[0], fig_title + ' #1',
output_name + str(output_count) + output_ext)
output_count += 1
pl.plot_encoded(t, u, s_list[1], fig_title + ' #2',
output_name + str(output_count) + output_ext)
output_count += 1
fig_title = 'Signal Decoded Using Leaky IAF Population Decoder'
print fig_title
u_rec = func_timer(iaf.iaf_decode_pop)(s_list, dur, dt, bw,
b_list, d_list, R_list,
C_list)
pl.plot_compare(t, u, u_rec, fig_title,
output_name + str(output_count) + output_ext)
# Test ideal IAF algorithms:
b1 = 3.5 # bias
d1 = 0.7 # threshold
R1 = np.inf # resistance
C1 = 0.01 # capacitance
try:
iaf.iaf_recoverable(u, bw, b1, d1, R1, C1)
except ValueError('reconstruction condition not satisfied'):
sys.exit()
b2 = 3.4 # bias
d2 = 0.8 # threshold
R2 = np.inf # resistance
C2 = 0.01 # capacitance
try:
iaf.iaf_recoverable(u, bw, b2, d2, R2, C2)
except ValueError('reconstruction condition not satisfied'):
sys.exit()
b_list = [b1, b2]
d_list = [d1, d2]
R_list = [R1, R2]
C_list = [C1, C2]
output_count += 1
fig_title = 'Signal Encoded Using Ideal IAF Encoder'
print fig_title
s_list = func_timer(iaf.iaf_encode_pop)([u, u], dt, b_list, d_list, R_list, C_list)
pl.plot_encoded(t, u, s_list[0], fig_title + ' #1',
output_name + str(output_count) + output_ext)
output_count += 1
pl.plot_encoded(t, u, s_list[1], fig_title + ' #2',
output_name + str(output_count) + output_ext)
output_count += 1
fig_title = 'Signal Decoded Using Ideal IAF Population Decoder'
print fig_title
u_rec = func_timer(iaf.iaf_decode_pop)(s_list, dur, dt, bw,
b_list, d_list, R_list,
C_list)
pl.plot_compare(t, u, u_rec, fig_title,
output_name + str(output_count) + output_ext)
| 2.28125 | 2 |
appdaemon/apps/toggle_switch/toggle_switch.py | Mithras/ha | 3 | 3007 | import globals
class ToggleSwitch(globals.Hass):
async def initialize(self):
config = self.args["config"]
self._input = config["input"]
self._toggle_service = config["toggle_service"]
self._toggle_payload = config["toggle_payload"]
self._power = config["power"]
self._power_on_threshold = float(config["power_on_threshold"])
self._check_interval = float(config["check_interval"])
self.ensure_state_task = await self.create_task(
self._ensure_state_async(False))
await self.listen_state(self._input_callback_async,
entity=self._input)
async def terminate(self):
# self.log("Terminate")
self.ensure_state_task.cancel()
async def _input_callback_async(self, entity, attribute, old, new, kwargs):
if old == new:
return
# self.log(f"InputChange: old = {old}, new = {new}")
self.ensure_state_task.cancel()
self.ensure_state_task = await self.create_task(self._ensure_state_async())
async def _ensure_state_async(self, immediate=True):
# self.log(f"EnsureState: immediate = {immediate}")
if immediate:
await self._toggle_async()
while True:
await self.sleep(self._check_interval)
power = float(await self.get_state(self._power))
input = await self.get_state(self._input)
# self.log(
# f"EnsureState: input = {input}, power: {power}")
if input == "on" and power < self._power_on_threshold or input == "off" and power > self._power_on_threshold:
await self._toggle_async()
async def _toggle_async(self):
# self.log("Toggle")
await self.call_service(self._toggle_service,
**self._toggle_payload)
| 1.867188 | 2 |
hvm/chains/base.py | hyperevo/py-helios-node | 0 | 3023 | from __future__ import absolute_import
import operator
from collections import deque
import functools
from abc import (
ABCMeta,
abstractmethod
)
import rlp_cython as rlp
import time
import math
from uuid import UUID
from typing import ( # noqa: F401
Any,
Optional,
Callable,
cast,
Dict,
Generator,
Iterator,
Tuple,
Type,
TYPE_CHECKING,
Union,
List,
Iterable,
)
import logging
from itertools import groupby
from hvm.rlp.receipts import Receipt
from hvm.types import Timestamp
from eth_typing import (
Address,
BlockNumber,
Hash32,
)
from eth_utils import (
to_tuple,
to_set,
)
from hvm.db.backends.base import BaseDB
from hvm.db.backends.memory import MemoryDB
from hvm.db.chain import (
BaseChainDB,
ChainDB,
)
from hvm.db.journal import (
JournalDB,
)
from hvm.db.read_only import ReadOnlyDB
from hvm.constants import (
BLOCK_GAS_LIMIT,
BLANK_ROOT_HASH,
NUMBER_OF_HEAD_HASH_TO_SAVE,
TIME_BETWEEN_HEAD_HASH_SAVE,
GENESIS_PARENT_HASH,
)
from hvm.db.trie import make_trie_root_and_nodes
from hvm import constants
from hvm.estimators import (
get_gas_estimator,
)
from hvm.exceptions import (
HeaderNotFound,
TransactionNotFound,
ValidationError,
VMNotFound,
BlockOnWrongChain,
CanonicalHeadNotFound,
CannotCalculateStake,
NotEnoughTimeBetweenBlocks,
ReceivableTransactionNotFound,
TriedImportingGenesisBlock,
JournalDbNotActivated,
ReplacingBlocksNotAllowed,
UnprocessedBlockNotAllowed,
AppendHistoricalRootHashTooOld,
HistoricalNetworkTPCMissing,
HistoricalMinGasPriceError,
UnprocessedBlockChildIsProcessed,
ParentNotFound,
NoChronologicalBlocks,
RewardProofSenderBlockMissing,
InvalidHeadRootTimestamp,
RewardAmountRoundsToZero, TriedDeletingGenesisBlock, NoGenesisBlockPresent)
from eth_keys.exceptions import (
BadSignature,
)
from hvm.utils.blocks import reorganize_chronological_block_list_for_correct_chronological_order_at_index
from hvm.validation import (
validate_block_number,
validate_uint256,
validate_word,
validate_vm_configuration,
validate_canonical_address,
validate_is_queue_block,
validate_centisecond_timestamp,
)
from hvm.rlp.blocks import (
BaseBlock,
BaseQueueBlock,
)
from hvm.rlp.headers import (
BlockHeader,
HeaderParams,
)
from hvm.rlp.transactions import (
BaseTransaction,
BaseReceiveTransaction
)
from hvm.utils.db import (
apply_state_dict,
)
from hvm.utils.datatypes import (
Configurable,
)
from hvm.utils.headers import (
compute_gas_limit_bounds,
)
from hvm.utils.hexadecimal import (
encode_hex,
decode_hex
)
from hvm.utils.rlp import (
ensure_imported_block_unchanged,
)
from hvm.db.chain_head import ChainHeadDB
from hvm.db.consensus import ConsensusDB
from eth_keys import keys
from eth_keys.datatypes import(
BaseKey,
PublicKey,
PrivateKey
)
from hvm.utils.numeric import (
effecient_diff,
are_items_in_list_equal,
)
from sortedcontainers import (
SortedList,
SortedDict,
)
from hvm.rlp.consensus import NodeStakingScore, PeerNodeHealth
from hvm.rlp.accounts import TransactionKey
if TYPE_CHECKING:
from hvm.vm.base import BaseVM # noqa: F401
from functools import partial
import asyncio
# Mapping from address to account state.
# 'balance', 'nonce' -> int
# 'code' -> bytes
# 'storage' -> Dict[int, int]
AccountState = Dict[Address, Dict[str, Union[int, bytes, Dict[int, int]]]]
class BaseChain(Configurable, metaclass=ABCMeta):
"""
The base class for all Chain objects
"""
chain_head_db: ChainHeadDB = None
chaindb: ChainDB = None
chaindb_class = None # type: Type[BaseChainDB]
vm_configuration = None # type: Tuple[Tuple[int, Type[BaseVM]], ...]
genesis_wallet_address: Address = None
genesis_block_timestamp: Timestamp = None
min_time_between_blocks: int = None
#
# Helpers
#
@classmethod
@abstractmethod
def get_chaindb_class(cls) -> Type[BaseChainDB]:
raise NotImplementedError("Chain classes must implement this method")
@abstractmethod
def get_consensus_db(self, header: BlockHeader = None, timestamp: Timestamp = None) -> ConsensusDB:
raise NotImplementedError("Chain classes must implement this method")
@abstractmethod
def enable_read_only_db(self) -> None:
raise NotImplementedError("Chain classes must implement this method")
#
# Chain API
#
@classmethod
@abstractmethod
def from_genesis(cls,
base_db: BaseDB,
genesis_params: Dict[str, HeaderParams],
genesis_state: AccountState=None) -> 'BaseChain':
raise NotImplementedError("Chain classes must implement this method")
@classmethod
@abstractmethod
def from_genesis_header(cls,
base_db: BaseDB,
genesis_header: BlockHeader) -> 'BaseChain':
raise NotImplementedError("Chain classes must implement this method")
@abstractmethod
def get_chain_at_block_parent(self, block: BaseBlock) -> 'BaseChain':
raise NotImplementedError("Chain classes must implement this method")
#
# VM API
#
@classmethod
def get_vm_configuration(cls) -> Tuple[Tuple[int, Type['BaseVM']], ...]:
return cls.vm_configuration
@classmethod
def get_vm_class(cls, header: BlockHeader) -> Type['BaseVM']:
"""
Returns the VM instance for the given block number.
"""
return cls.get_vm_class_for_block_timestamp(header.timestamp)
@abstractmethod
def get_vm(self, header: BlockHeader=None, timestamp: Timestamp = None) -> 'BaseVM':
raise NotImplementedError("Chain classes must implement this method")
@classmethod
def get_vm_class_for_block_timestamp(cls, timestamp: int = None) -> Type['BaseVM']:
"""
Returns the VM class for the given block number.
"""
if timestamp is None:
timestamp = int(time.time())
if cls.vm_configuration is None:
raise AttributeError("Chain classes must define the VMs in vm_configuration")
validate_uint256(timestamp)
for start_timestamp, vm_class in reversed(cls.vm_configuration):
if timestamp >= start_timestamp:
return vm_class
else:
raise VMNotFound("No vm available for timestamp #{0}".format(timestamp))
#
# Header API
#
@abstractmethod
def create_header_from_parent(self,
parent_header: BlockHeader,
**header_params: HeaderParams) -> BlockHeader:
raise NotImplementedError("Chain classes must implement this method")
@abstractmethod
def get_block_header_by_hash(self, block_hash: Hash32) -> BlockHeader:
raise NotImplementedError("Chain classes must implement this method")
@abstractmethod
def get_canonical_head(self):
raise NotImplementedError("Chain classes must implement this method")
#
# Block API
#
@abstractmethod
def get_ancestors(self, limit: int, header: BlockHeader=None) -> Iterator[BaseBlock]:
raise NotImplementedError("Chain classes must implement this method")
@abstractmethod
def get_block_by_hash(self, block_hash: Hash32) -> BaseBlock:
raise NotImplementedError("Chain classes must implement this method")
@abstractmethod
def get_block_by_header(self, block_header: BlockHeader) -> BaseBlock:
raise NotImplementedError("Chain classes must implement this method")
@abstractmethod
def get_block_by_number(self, block_number: BlockNumber, wallet_address: Address = None) -> BaseBlock:
raise NotImplementedError("Chain classes must implement this method")
@abstractmethod
def get_blocks_on_chain(self, start: int, end: int, wallet_address: Address = None) -> List[BaseBlock]:
raise NotImplementedError("Chain classes must implement this method")
@abstractmethod
def get_all_blocks_on_chain(self, wallet_address: Address = None) -> List[BaseBlock]:
raise NotImplementedError("Chain classes must implement this method")
@abstractmethod
def get_all_blocks_on_chain_by_head_block_hash(self, chain_head_hash: Hash32) -> List[BaseBlock]:
raise NotImplementedError("Chain classes must implement this method")
@abstractmethod
def get_blocks_on_chain_up_to_block_hash(self, chain_head_hash: Hash32, start_block_number: int = 0, limit: int = float('inf')) -> List[BaseBlock]:
raise NotImplementedError("Chain classes must implement this method")
@abstractmethod
def get_block(self) -> BaseBlock:
raise NotImplementedError("Chain classes must implement this method")
# @abstractmethod
# def get_canonical_block_by_number(self, block_number: BlockNumber) -> BaseBlock:
# raise NotImplementedError("Chain classes must implement this method")
# @abstractmethod
# def get_canonical_block_hash(self, block_number):
# raise NotImplementedError("Chain classes must implement this method")
@abstractmethod
def get_all_chronological_blocks_for_window(self, window_timestamp: Timestamp) -> List[BaseBlock]:
raise NotImplementedError("Chain classes must implement this method")
@abstractmethod
def import_current_queue_block(self) -> BaseBlock:
raise NotImplementedError("Chain classes must implement this method")
@abstractmethod
def import_current_queue_block_with_reward(self, node_staking_score_list: List[NodeStakingScore]) -> BaseBlock:
raise NotImplementedError("Chain classes must implement this method")
@abstractmethod
def purge_block_and_all_children_and_set_parent_as_chain_head_by_hash(self, block_hash_to_delete: Hash32) -> None:
raise NotImplementedError("Chain classes must implement this method")
@abstractmethod
def purge_block_and_all_children_and_set_parent_as_chain_head(self, existing_block_header: BlockHeader):
raise NotImplementedError("Chain classes must implement this method")
#
# Chronologically consistent blockchain db API
#
@abstractmethod
def check_block_chronological_consistency(self, block: BaseBlock) -> List[Hash32]:
raise NotImplementedError("Chain classes must implement this method")
#
# Transaction API
#
@abstractmethod
def get_transaction_by_block_hash_and_index(self, block_hash: Hash32, transaction_index: int) -> Union[BaseTransaction, BaseReceiveTransaction]:
raise NotImplementedError("Chain classes must implement this method")
@abstractmethod
def create_transaction(self, *args: Any, **kwargs: Any) -> BaseTransaction:
raise NotImplementedError("Chain classes must implement this method")
@abstractmethod
def get_canonical_transaction(self, transaction_hash: Hash32) -> BaseTransaction:
raise NotImplementedError("Chain classes must implement this method")
@abstractmethod
def populate_queue_block_with_receive_tx(self) -> List[BaseReceiveTransaction]:
raise NotImplementedError("Chain classes must implement this method")
@abstractmethod
def get_block_receive_transactions_by_hash(
self,
block_hash: Hash32) -> List['BaseReceiveTransaction']:
raise NotImplementedError("Chain classes must implement this method")
@abstractmethod
def get_receive_tx_from_send_tx(self, tx_hash: Hash32) -> Optional['BaseReceiveTransaction']:
raise NotImplementedError("Chain classes must implement this method")
@abstractmethod
def create_receivable_transactions(self) -> List[BaseReceiveTransaction]:
raise NotImplementedError("Chain classes must implement this method")
@abstractmethod
def get_receivable_transactions(self, address: Address) -> Tuple[List[BaseReceiveTransaction], List[TransactionKey]]:
raise NotImplementedError("Chain classes must implement this method")
@abstractmethod
def get_current_queue_block_nonce(self) -> int:
raise NotImplementedError("Chain classes must implement this method")
@abstractmethod
def create_and_sign_transaction_for_queue_block(self, *args: Any, **kwargs: Any) -> BaseTransaction:
raise NotImplementedError("Chain classes must implement this method")
@abstractmethod
def create_and_sign_transaction(self, *args: Any, **kwargs: Any) -> BaseTransaction:
raise NotImplementedError("Chain classes must implement this method")
#
# Chronological Chain API
#
@abstractmethod
def try_to_rebuild_chronological_chain_from_historical_root_hashes(self, historical_root_hash_timestamp: Timestamp) -> None:
raise NotImplementedError("Chain classes must implement this method")
@abstractmethod
def get_block_hashes_that_are_new_for_this_historical_root_hash_timestamp(self, historical_root_hash_timestamp: Timestamp) -> List[Tuple[Timestamp, Hash32]]:
raise NotImplementedError("Chain classes must implement this method")
@abstractmethod
def initialize_historical_root_hashes_and_chronological_blocks(self) -> None:
raise NotImplementedError("Chain classes must implement this method")
#
# Execution API
#
# @abstractmethod
# def apply_transaction(self, transaction):
# raise NotImplementedError("Chain classes must implement this method")
@abstractmethod
def estimate_gas(self, transaction: BaseTransaction, at_header: BlockHeader=None) -> int:
raise NotImplementedError("Chain classes must implement this method")
@abstractmethod
def import_block(self, block: BaseBlock, perform_validation: bool=True) -> BaseBlock:
raise NotImplementedError("Chain classes must implement this method")
@abstractmethod
def import_chain(self, block_list: List[BaseBlock], perform_validation: bool=True, save_block_head_hash_timestamp: bool = True, allow_replacement: bool = True) -> None:
raise NotImplementedError("Chain classes must implement this method")
@abstractmethod
def import_chronological_block_window(self, block_list: List[BaseBlock], window_start_timestamp: Timestamp,
save_block_head_hash_timestamp: bool = True,
allow_unprocessed: bool = False) -> None:
raise NotImplementedError("Chain classes must implement this method")
#
# Validation API
#
@abstractmethod
def get_allowed_time_of_next_block(self, chain_address: Address = None) -> Timestamp:
raise NotImplementedError("Chain classes must implement this method")
@abstractmethod
def validate_block(self, block: BaseBlock) -> None:
raise NotImplementedError("Chain classes must implement this method")
@abstractmethod
def validate_gaslimit(self, header: BlockHeader) -> None:
raise NotImplementedError("Chain classes must implement this method")
@abstractmethod
def validate_block_specification(self, block) -> bool:
raise NotImplementedError("Chain classes must implement this method")
#
# Stake API
#
@abstractmethod
def get_mature_stake(self, wallet_address: Address = None, raise_canonical_head_not_found_error:bool = False) -> int:
raise NotImplementedError("Chain classes must implement this method")
@abstractmethod
def get_mature_stake_for_chronological_block_window(self, chronological_block_window_timestamp, timestamp_for_stake):
raise NotImplementedError("Chain classes must implement this method")
@abstractmethod
def get_new_block_hash_to_test_peer_node_health(self) -> Hash32:
raise NotImplementedError("Chain classes must implement this method")
#
# Min Block Gas API used for throttling the network
#
@abstractmethod
def re_initialize_historical_minimum_gas_price_at_genesis(self) -> None:
raise NotImplementedError("Chain classes must implement this method")
@abstractmethod
def update_current_network_tpc_capability(self, current_network_tpc_cap: int,
update_min_gas_price: bool = True) -> None:
raise NotImplementedError("Chain classes must implement this method")
@abstractmethod
def get_local_tpc_cap(self) -> int:
raise NotImplementedError("Chain classes must implement this method")
#
# Consensus db passthrough with correct db corresponding to timestamp
#
@abstractmethod
def get_signed_peer_score(self, private_key: PrivateKey,
network_id: int,
peer_wallet_address: Address,
after_block_number: BlockNumber = None,
) -> NodeStakingScore:
raise NotImplementedError("Chain classes must implement this method")
@abstractmethod
def get_signed_peer_score_string_private_key(self,
private_key_string: bytes,
peer_wallet_address: Address,
after_block_number: BlockNumber = None,
) -> NodeStakingScore:
raise NotImplementedError("Chain classes must implement this method")
@abstractmethod
def validate_node_staking_score(self,
node_staking_score: NodeStakingScore,
since_block_number: BlockNumber) -> None:
raise NotImplementedError("Chain classes must implement this method")
@abstractmethod
def save_health_request(self, peer_wallet_address: Address, response_time_in_micros: int = float('inf')) -> None:
raise NotImplementedError("Chain classes must implement this method")
@abstractmethod
def get_current_peer_node_health(self,peer_wallet_address: Address) -> PeerNodeHealth:
raise NotImplementedError("Chain classes must implement this method")
class Chain(BaseChain):
"""
A Chain is a combination of one or more VM classes. Each VM is associated
with a range of blocks. The Chain class acts as a wrapper around these other
VM classes, delegating operations to the appropriate VM depending on the
current block number.
"""
raise_errors = False
logger = logging.getLogger("hvm.chain.chain.Chain")
header = None # type: BlockHeader
network_id = None # type: int
gas_estimator = None # type: Callable
_journaldb = None
num_journal_records_for_block_import = 0
chaindb_class = ChainDB # type: Type[BaseChainDB]
chain_head_db_class = ChainHeadDB
_queue_block: BaseQueueBlock = None
def __init__(self, base_db: BaseDB, wallet_address: Address, private_key: BaseKey=None) -> None:
if not self.vm_configuration:
raise ValueError(
"The Chain class cannot be instantiated with an empty `vm_configuration`"
)
else:
validate_vm_configuration(self.vm_configuration)
validate_canonical_address(wallet_address, "Wallet Address")
self.db = base_db
self.private_key = private_key
self.wallet_address = wallet_address
self.chaindb = self.get_chaindb_class()(self.db)
self.chain_head_db = self.get_chain_head_db_class().load_from_saved_root_hash(self.db)
try:
self.header = self.create_header_from_parent(self.get_canonical_head())
except CanonicalHeadNotFound:
#this is a new block, lets make a genesis block
# self.logger.debug("Creating new genesis block on chain {}".format(self.wallet_address))
self.header = self.get_vm_class_for_block_timestamp().create_genesis_block(self.wallet_address).header
if self.gas_estimator is None:
self.gas_estimator = get_gas_estimator() # type: ignore
def reinitialize(self):
self.__init__(self.db, self.wallet_address, self.private_key)
def set_new_wallet_address(self, wallet_address: Address, private_key: BaseKey=None):
self.logger.debug('setting new wallet address')
self.wallet_address = wallet_address
self.private_key = private_key
self.reinitialize()
@property
def queue_block(self):
if self._queue_block is None:
self._queue_block = self.get_queue_block()
return self._queue_block
@queue_block.setter
def queue_block(self,val:BaseQueueBlock):
self._queue_block = val
@property
def min_time_between_blocks(self):
vm = self.get_vm(timestamp=Timestamp(int(time.time())))
min_allowed_time_between_blocks = vm.min_time_between_blocks
return min_allowed_time_between_blocks
# @property
# def consensus_db(self, header: BlockHeader = None, timestamp: Timestamp = None):
# # gets the consensus db corresponding to the block timestamp
#
# return self.get_vm(header, timestamp).consensus_db
def get_consensus_db(self, header: BlockHeader = None, timestamp: Timestamp = None) -> ConsensusDB:
# gets the consensus db corresponding to the block timestamp
return self.get_vm(header, timestamp).consensus_db
#
# Global Record and discard API
#
def enable_read_only_db(self) -> None:
if not isinstance(self.db, ReadOnlyDB):
self.base_db = self.db
self.db = ReadOnlyDB(self.base_db)
self.reinitialize()
def enable_journal_db(self):
if self._journaldb is None:
self.base_db = self.db
self._journaldb = JournalDB(self.base_db)
#we keep the name self.db so that all of the functions still work, but at this point it is a journaldb.
self.db = self._journaldb
#reinitialize to ensure chain and chain_head_db have the new journaldb
self.reinitialize()
def disable_journal_db(self):
if self._journaldb is not None:
self.db = self.base_db
self._journaldb = None
#reinitialize to ensure chain and chain_head_db have the new journaldb
self.reinitialize()
def record_journal(self) -> UUID:
if self._journaldb is not None:
return (self._journaldb.record())
else:
raise JournalDbNotActivated()
def discard_journal(self, changeset: UUID) -> None:
if self._journaldb is not None:
db_changeset = changeset
self._journaldb.discard(db_changeset)
else:
raise JournalDbNotActivated()
def commit_journal(self, changeset: UUID) -> None:
if self._journaldb is not None:
db_changeset = changeset
self._journaldb.commit(db_changeset)
else:
raise JournalDbNotActivated()
def persist_journal(self) -> None:
if self._journaldb is not None:
self._journaldb.persist()
else:
raise JournalDbNotActivated()
#
# Helpers
#
@classmethod
def get_chaindb_class(cls) -> Type[BaseChainDB]:
if cls.chaindb_class is None:
raise AttributeError("`chaindb_class` not set")
return cls.chaindb_class
@classmethod
def get_chain_head_db_class(cls) -> Type[ChainHeadDB]:
if cls.chain_head_db_class is None:
raise AttributeError("`chain_head_db class` not set")
return cls.chain_head_db_class
@classmethod
def get_genesis_wallet_address(cls) -> Address:
if cls.genesis_wallet_address is None:
raise AttributeError("`genesis_wallet_address` not set")
return cls.genesis_wallet_address
#
# Chain API
#
@classmethod
def create_genesis_header(cls,
base_db: BaseDB,
wallet_address: Address,
private_key: BaseKey,
genesis_params: Dict[str, HeaderParams],
genesis_state: AccountState=None,
) -> 'BaseChain':
genesis_vm_class = cls.get_vm_class_for_block_timestamp()
account_db = genesis_vm_class.get_state_class().get_account_db_class()(base_db)
if genesis_state is None:
genesis_state = {}
# mutation
account_db = apply_state_dict(account_db, genesis_state)
account_db.persist(save_account_hash = True, wallet_address = wallet_address)
genesis_params['account_hash'] = account_db.get_account_hash(wallet_address)
genesis_header = BlockHeader(**genesis_params)
signed_genesis_header = genesis_header.get_signed(private_key, cls.network_id)
chaindb = cls.get_chaindb_class()(base_db)
chaindb.persist_header(signed_genesis_header)
return signed_genesis_header
@classmethod
def from_genesis(cls,
base_db: BaseDB,
wallet_address: Address,
genesis_params: Dict[str, HeaderParams],
genesis_state: AccountState,
private_key: BaseKey = None
) -> 'BaseChain':
"""
Initializes the Chain from a genesis state.
"""
genesis_vm_class = cls.get_vm_class_for_block_timestamp()
account_db = genesis_vm_class.get_state_class().get_account_db_class()(
base_db
)
if genesis_state is None:
genesis_state = {}
# mutation
account_db = apply_state_dict(account_db, genesis_state)
account_db.persist(save_account_hash = True, wallet_address = cls.genesis_wallet_address)
genesis_header = BlockHeader(**genesis_params)
return cls.from_genesis_header(base_db, wallet_address = wallet_address, private_key = private_key, genesis_header = genesis_header)
@classmethod
def from_genesis_header(cls,
base_db: BaseDB,
wallet_address: Address,
genesis_header: BlockHeader,
private_key: BaseKey,
) -> 'BaseChain':
"""
Initializes the chain from the genesis header.
"""
chaindb = cls.get_chaindb_class()(base_db)
chaindb.persist_header(genesis_header)
chain_head_db = cls.get_chain_head_db_class()(base_db)
#window_for_this_block = math.ceil((genesis_header.timestamp+1)/TIME_BETWEEN_HEAD_HASH_SAVE) * TIME_BETWEEN_HEAD_HASH_SAVE
window_for_this_block = int(genesis_header.timestamp / TIME_BETWEEN_HEAD_HASH_SAVE) * TIME_BETWEEN_HEAD_HASH_SAVE + TIME_BETWEEN_HEAD_HASH_SAVE
chain_head_db.set_chain_head_hash(cls.genesis_wallet_address, genesis_header.hash)
chain_head_db.initialize_historical_root_hashes(chain_head_db.root_hash, window_for_this_block)
chain_head_db.persist(save_current_root_hash = True)
#chain_head_db.add_block_hash_to_chronological_window(genesis_header.hash, genesis_header.timestamp)
return cls(base_db, wallet_address = wallet_address, private_key=private_key)
def get_chain_at_block_parent(self, block: BaseBlock) -> BaseChain:
"""
Returns a `Chain` instance with the given block's parent at the chain head.
"""
try:
parent_header = self.get_block_header_by_hash(block.header.parent_hash)
except HeaderNotFound:
raise ValidationError("Parent ({0}) of block {1} not found".format(
block.header.parent_hash,
block.header.hash
))
init_header = self.create_header_from_parent(parent_header)
return type(self)(self.chaindb.db, self.wallet_address, self.private_key, init_header)
#
# VM API
#
def get_vm(self, header: BlockHeader=None, timestamp: Timestamp = None) -> 'BaseVM':
"""
Returns the VM instance for the given block timestamp. Or if timestamp is given, gets the vm for that timestamp
"""
if header is not None and timestamp is not None:
raise ValueError("Cannot specify header and timestamp for get_vm(). Only one is allowed.")
if header is None or header == self.header:
header = self.header
if timestamp is not None:
header = header.copy(timestamp = timestamp)
vm_class = self.get_vm_class_for_block_timestamp(header.timestamp)
return vm_class(header=header,
chaindb=self.chaindb,
network_id=self.network_id)
else:
vm_class = self.get_vm_class_for_block_timestamp(header.timestamp)
return vm_class(header=header,
chaindb=self.chaindb,
network_id=self.network_id)
#
# Header API
#
def create_header_from_parent(self, parent_header, **header_params):
"""
Passthrough helper to the VM class of the block descending from the
given header.
"""
return self.get_vm_class_for_block_timestamp().create_header_from_parent(parent_header, **header_params)
def get_block_header_by_hash(self, block_hash: Hash32) -> BlockHeader:
"""
Returns the requested block header as specified by block hash.
Raises BlockNotFound if there's no block header with the given hash in the db.
"""
validate_word(block_hash, title="Block Hash")
return self.chaindb.get_block_header_by_hash(block_hash)
def get_canonical_head(self, chain_address = None):
"""
Returns the block header at the canonical chain head.
Raises CanonicalHeadNotFound if there's no head defined for the canonical chain.
"""
if chain_address is not None:
return self.chaindb.get_canonical_head(chain_address)
else:
return self.chaindb.get_canonical_head(self.wallet_address)
#
# Block API
#
def get_genesis_block_hash(self) -> Hash32:
return self.chaindb.get_canonical_block_hash(block_number = BlockNumber(0),
chain_address= self.genesis_wallet_address)
@to_tuple
def get_ancestors(self, limit: int, header: BlockHeader=None) -> Iterator[BaseBlock]:
"""
Return `limit` number of ancestor blocks from the current canonical head.
"""
if header is None:
header = self.header
lower_limit = max(header.block_number - limit, 0)
for n in reversed(range(lower_limit, header.block_number)):
yield self.get_block_by_number(BlockNumber(n), header.chain_address)
def get_block_by_hash(self, block_hash: Hash32) -> BaseBlock:
block_header = self.get_block_header_by_hash(block_hash)
return self.get_block_by_header(block_header)
def get_block_by_header(self, block_header: BlockHeader) -> BaseBlock:
"""
Returns the requested block as specified by the block header.
"""
block_class = self.get_vm_class_for_block_timestamp(block_header.timestamp).get_block_class()
send_transactions = self.chaindb.get_block_transactions(block_header, block_class.transaction_class)
receive_transactions = self.chaindb.get_block_receive_transactions(block_header,block_class.receive_transaction_class)
reward_bundle = self.chaindb.get_reward_bundle(block_header.reward_hash, block_class.reward_bundle_class)
output_block = block_class(block_header, send_transactions, receive_transactions, reward_bundle)
return output_block
def get_block_by_number(self, block_number: BlockNumber, chain_address: Address = None) -> BaseBlock:
if chain_address is None:
chain_address = self.wallet_address
block_hash = self.chaindb.get_canonical_block_hash(block_number, chain_address)
return self.get_block_by_hash(block_hash)
def get_blocks_on_chain(self, start: int, end: int, chain_address: Address = None) -> List[BaseBlock]:
if chain_address is None:
chain_address = self.wallet_address
if end == 0:
canonical_head_header = self.get_canonical_head(chain_address=chain_address)
head_block_number = canonical_head_header.block_number
end = head_block_number + 1
blocks = []
for block_number in range(start, end):
try:
new_block = self.get_block_by_number(BlockNumber(block_number), chain_address)
blocks.append(new_block)
except HeaderNotFound:
break
return blocks
def get_all_blocks_on_chain(self, chain_address: Address = None) -> List[BaseBlock]:
if chain_address is None:
chain_address = self.wallet_address
canonical_head_header = self.get_canonical_head(chain_address=chain_address)
head_block_number = canonical_head_header.block_number
return self.get_blocks_on_chain(0, head_block_number + 1, chain_address=chain_address)
def get_all_blocks_on_chain_by_head_block_hash(self, chain_head_hash: Hash32) -> List[BaseBlock]:
chain_head_header = self.get_block_header_by_hash(chain_head_hash)
chain_address = chain_head_header.chain_address
return self.get_all_blocks_on_chain(chain_address)
def get_blocks_on_chain_up_to_block_hash(self, chain_head_hash: Hash32, start_block_number: int = 0, limit: int = float('inf')) -> List[BaseBlock]:
chain_head_header = self.get_block_header_by_hash(chain_head_hash)
to_block_number = chain_head_header.block_number
if to_block_number > (start_block_number + limit):
to_block_number = (start_block_number + limit)
chain_address = chain_head_header.chain_address
return self.get_blocks_on_chain(start_block_number, to_block_number + 1, chain_address)
def get_block(self) -> BaseBlock:
"""
Returns the current TIP block.
"""
return self.get_vm().block
def get_queue_block(self) -> BaseBlock:
"""
Returns the current TIP block.
"""
return self.get_vm().queue_block
# def get_block_by_hash(self, block_hash: Hash32) -> BaseBlock:
# """
# Returns the requested block as specified by block hash.
# """
# validate_word(block_hash, title="Block Hash")
# block_header = self.get_block_header_by_hash(block_hash)
# return self.get_block_by_header(block_header)
# def get_canonical_block_by_number(self, block_number: BlockNumber) -> BaseBlock:
# """
# Returns the block with the given number in the canonical chain.
#
# Raises BlockNotFound if there's no block with the given number in the
# canonical chain.
# """
# validate_uint256(block_number, title="Block Number")
# return self.get_block_by_hash(self.chaindb.get_canonical_block_hash(block_number))
#
# def get_canonical_block_hash(self, block_number: BlockNumber) -> Hash32:
# """
# Returns the block hash with the given number in the canonical chain.
#
# Raises BlockNotFound if there's no block with the given number in the
# canonical chain.
# """
# return self.chaindb.get_canonical_block_hash(block_number)
#
# Blockchain Database API
#
def save_chain_head_hash_to_trie_for_time_period(self,block_header):
timestamp = block_header.timestamp
currently_saving_window = int(time.time()/TIME_BETWEEN_HEAD_HASH_SAVE) * TIME_BETWEEN_HEAD_HASH_SAVE +TIME_BETWEEN_HEAD_HASH_SAVE
if timestamp <= currently_saving_window:
#we have to go back and put it into the correct window, and update all windows after that
#lets only keep the past NUMBER_OF_HEAD_HASH_TO_SAVE block_head_root_hash
window_for_this_block = int(timestamp / TIME_BETWEEN_HEAD_HASH_SAVE) * TIME_BETWEEN_HEAD_HASH_SAVE + TIME_BETWEEN_HEAD_HASH_SAVE
#window_for_this_block = math.ceil((timestamp + 1)/TIME_BETWEEN_HEAD_HASH_SAVE) * TIME_BETWEEN_HEAD_HASH_SAVE
# if propogate_to_present:
self.chain_head_db.add_block_hash_to_timestamp(block_header.chain_address, block_header.hash, window_for_this_block)
# else:
# self.chain_head_db.add_block_hash_to_timestamp_without_propogating_to_present(self.wallet_address, block_header.hash, window_for_this_block)
#
# Queueblock API
#
def add_transaction_to_queue_block(self, transaction) -> None:
validate_is_queue_block(self.queue_block, title='self.queue_block')
if isinstance(transaction, BaseTransaction):
if not self.queue_block.contains_transaction(transaction):
self.queue_block = self.queue_block.add_transaction(transaction)
else:
self.logger.debug("found transaction in queueblock already, not adding again")
else:
if not self.queue_block.contains_receive_transaction(transaction):
self.queue_block = self.queue_block.add_receive_transaction(transaction)
else:
self.logger.debug("found receive transaction in queueblock already, not adding again")
def add_transactions_to_queue_block(self, transactions) -> None:
if not isinstance(transactions, list):
self.add_transaction_to_queue_block(transactions)
#self.logger.debug("tx_nonce after adding transaction = {}".format(self.queue_block.current_tx_nonce))
else:
for tx in transactions:
self.add_transaction_to_queue_block(tx)
def sign_queue_block(self, *args: Any, **kwargs: Any) -> BaseQueueBlock:
"""
Passthrough helper to the current VM class.
"""
return self.get_vm().sign_queue_block(*args, **kwargs)
def sign_header(self, *args: Any, **kwargs: Any) -> BlockHeader:
"""
Passthrough helper to the current VM class.
"""
return self.get_vm().sign_header(*args, **kwargs)
#
# Transaction API
#
def get_canonical_transaction(self, transaction_hash: Hash32) -> BaseTransaction:
"""
Returns the requested transaction as specified by the transaction hash
from the canonical chain.
Raises TransactionNotFound if no transaction with the specified hash is
found in the main chain.
"""
(block_hash, index, is_receive) = self.chaindb.get_transaction_index(transaction_hash)
block_header = self.get_block_header_by_hash(block_hash)
VM = self.get_vm_class_for_block_timestamp(block_header.timestamp)
if is_receive == False:
transaction = self.chaindb.get_transaction_by_index_and_block_hash(
block_hash,
index,
VM.get_transaction_class(),
)
else:
transaction = self.chaindb.get_receive_transaction_by_index_and_block_hash(
block_hash,
index,
VM.get_receive_transaction_class(),
)
if transaction.hash == transaction_hash:
return transaction
else:
raise TransactionNotFound("Found transaction {} instead of {} in block {} at {}".format(
encode_hex(transaction.hash),
encode_hex(transaction_hash),
block_hash,
index,
))
@functools.lru_cache(maxsize=32)
def get_transaction_by_block_hash_and_index(self, block_hash: Hash32, transaction_index: int) -> Union[BaseTransaction, BaseReceiveTransaction]:
num_send_transactions = self.chaindb.get_number_of_send_tx_in_block(block_hash)
header = self.chaindb.get_block_header_by_hash(block_hash)
vm = self.get_vm(header=header)
if transaction_index >= num_send_transactions:
# receive transaction
transaction_index = transaction_index - num_send_transactions
tx = self.chaindb.get_receive_transaction_by_index_and_block_hash(block_hash=block_hash,
transaction_index=transaction_index,
transaction_class=vm.get_receive_transaction_class())
else:
# send transaction
tx = self.chaindb.get_transaction_by_index_and_block_hash(block_hash=block_hash,
transaction_index=transaction_index,
transaction_class=vm.get_transaction_class())
return tx
def create_transaction(self, *args: Any, **kwargs: Any) -> BaseTransaction:
"""
Passthrough helper to the current VM class.
"""
return self.get_vm().create_transaction(*args, **kwargs)
def create_and_sign_transaction(self, *args: Any, **kwargs: Any) -> BaseTransaction:
if self.private_key is None:
raise ValueError("Cannot sign transaction because private key not provided for chain instantiation")
transaction = self.create_transaction(*args, **kwargs)
signed_transaction = transaction.get_signed(self.private_key, self.network_id)
return signed_transaction
def create_and_sign_transaction_for_queue_block(self, *args: Any, **kwargs: Any) -> BaseTransaction:
if 'nonce' not in kwargs or kwargs['nonce'] is None:
kwargs['nonce'] = self.get_current_queue_block_nonce()
transaction = self.create_and_sign_transaction(*args, **kwargs)
self.add_transactions_to_queue_block(transaction)
return transaction
def get_current_queue_block_nonce(self) -> int:
if self.queue_block is None or self.queue_block.current_tx_nonce is None:
tx_nonce = self.get_vm().state.account_db.get_nonce(self.wallet_address)
else:
tx_nonce =self.queue_block.current_tx_nonce
return tx_nonce
def create_receive_transaction(self, *args: Any, **kwargs: Any) -> BaseReceiveTransaction:
"""
Passthrough helper to the current VM class.
"""
return self.get_vm().create_receive_transaction(*args, **kwargs)
def get_receivable_transactions(self, address: Address) -> Tuple[List[BaseReceiveTransaction], List[TransactionKey]]:
#from hvm.rlp_templates.accounts import TransactionKey
tx_keys = self.get_vm().state.account_db.get_receivable_transactions(address)
if len(tx_keys) == 0:
return [], []
transactions = []
for tx_key in tx_keys:
tx = self.get_canonical_transaction(tx_key.transaction_hash)
transactions.append(tx)
return transactions, tx_keys
def create_receivable_transactions(self) -> List[BaseReceiveTransaction]:
tx_keys = self.get_vm().state.account_db.get_receivable_transactions(self.wallet_address)
if len(tx_keys) == 0:
return []
receive_transactions = []
for tx_key in tx_keys:
#find out if it is a receive or a refund
block_hash, index, is_receive = self.chaindb.get_transaction_index(tx_key.transaction_hash)
re_tx = self.get_vm().create_receive_transaction(
sender_block_hash = tx_key.sender_block_hash,
send_transaction_hash=tx_key.transaction_hash,
is_refund=is_receive,
)
receive_transactions.append(re_tx)
return receive_transactions
def populate_queue_block_with_receive_tx(self) -> List[BaseReceiveTransaction]:
receive_tx = self.create_receivable_transactions()
self.add_transactions_to_queue_block(receive_tx)
return receive_tx
def get_block_receive_transactions_by_hash(
self,
block_hash: Hash32) -> List['BaseReceiveTransaction']:
block_header = self.get_block_header_by_hash(block_hash)
vm = self.get_vm(header = block_header)
receive_transaction_class = vm.get_block_class().receive_transaction_class
receive_transactions = self.chaindb.get_block_receive_transactions(header = block_header, transaction_class = receive_transaction_class)
return receive_transactions
def get_receive_tx_from_send_tx(self, tx_hash: Hash32) -> Optional['BaseReceiveTransaction']:
block_hash, index, is_receive = self.chaindb.get_transaction_index(tx_hash)
if is_receive:
raise ValidationError("The provided tx hash is not for a send transaction")
send_transaction = self.get_canonical_transaction(tx_hash)
block_children = self.chaindb.get_block_children(block_hash)
if block_children is not None:
block_children_on_correct_chain = [child_hash for child_hash in block_children
if self.chaindb.get_chain_wallet_address_for_block_hash(child_hash) == send_transaction.to]
for block_hash in block_children_on_correct_chain:
receive_transactions = self.get_block_receive_transactions_by_hash(block_hash)
for receive_tx in receive_transactions:
if receive_tx.send_transaction_hash == tx_hash:
return receive_tx
return None
def get_transaction_by_index_and_block_hash(self, block_hash: Hash32, transaction_index: int) -> Union[BaseTransaction, BaseReceiveTransaction]:
header = self.chaindb.get_block_header_by_hash(block_hash)
vm = self.get_vm(header=header)
self.chaindb.get_transaction_by_index_and_block_hash()
self.chaindb.get_transaction_by_index_and_block_hash(
block_hash,
transaction_index,
vm.get_transaction_class(),
)
#
# Chronological Chain api
#
def try_to_rebuild_chronological_chain_from_historical_root_hashes(self, historical_root_hash_timestamp: Timestamp) -> None:
try:
correct_chronological_block_window = self.get_block_hashes_that_are_new_for_this_historical_root_hash_timestamp(historical_root_hash_timestamp)
self.chain_head_db.save_chronological_block_window(correct_chronological_block_window, historical_root_hash_timestamp-TIME_BETWEEN_HEAD_HASH_SAVE)
except InvalidHeadRootTimestamp:
pass
def get_block_hashes_that_are_new_for_this_historical_root_hash_timestamp(self, historical_root_hash_timestamp: Timestamp) -> List[Tuple[Timestamp, Hash32]]:
'''
This is a time consuming function that gets all of the blocks that are new in this root hash that didn't exist in the base root hash.
:param timestamp:
:return:
'''
block_window_start = historical_root_hash_timestamp - TIME_BETWEEN_HEAD_HASH_SAVE
base_root_hash = self.chain_head_db.get_historical_root_hash(block_window_start)
new_root_hash = self.chain_head_db.get_historical_root_hash(historical_root_hash_timestamp)
if base_root_hash == new_root_hash:
return None
if base_root_hash is None or new_root_hash is None:
raise InvalidHeadRootTimestamp(
"Could not load block hashes for this historical_root_hash_timestamp because we don't have a root hash for this window or the previous window.")
base_head_block_hashes = set(self.chain_head_db.get_head_block_hashes(base_root_hash))
new_head_block_hashes = set(self.chain_head_db.get_head_block_hashes(new_root_hash))
diff_head_block_hashes = new_head_block_hashes - base_head_block_hashes
chronological_block_hash_timestamps = []
# now we have to run down each chain until we get to a block that is older than block_window_start
for head_block_hash in diff_head_block_hashes:
header = self.chaindb.get_block_header_by_hash(head_block_hash)
chronological_block_hash_timestamps.append([header.timestamp, head_block_hash])
while True:
if header.parent_hash == GENESIS_PARENT_HASH:
break
try:
header = self.chaindb.get_block_header_by_hash(header.parent_hash)
except HeaderNotFound:
break
if header.timestamp < block_window_start:
break
chronological_block_hash_timestamps.append([header.timestamp, header.hash])
assert len(chronological_block_hash_timestamps) > 0
chronological_block_hash_timestamps.sort()
return chronological_block_hash_timestamps
# def initialize_historical_root_hashes_and_chronological_blocks(self) -> None:
# '''
# This function rebuilds all historical root hashes, and chronological blocks, from the blockchain database. It starts with the saved root hash and works backwards.
# This function needs to be run from chain because it requires chain_head_db and chaindb.
# :return:
# '''
#
# self.chain_head_db.load_saved_root_hash()
# current_window = self.chain_head_db.current_window
# earliest_root_hash = self.chain_head_db.earliest_window
# #TIME_BETWEEN_HEAD_HASH_SAVE
#
# # 1) iterate down the root hash times
# # 2) create new chain_head_db with memorydb
# # 3) go through each chain and any blocks newer than the timestamp, save to chronological window.
# # 4) when you reach a block less than the timestamp, set it as chain head in the new memory based chain_head_db
# # 5) get the root hash
# # 6) set this root hash in the real chain_head_db at the correct timestamp.
#
# # A chronological block window holds all of the blocks starting at its timestamp, going to timestamp + TIME_BETWEEN_HEAD_HASH_SAVE
# # A historical root hash is the root hash at the given timestamp, so it includes all blocks earlier than that timestamp.
#
# # us a journaldb so that it doesnt write changes to the database.
# temp_chain_head_db = self.get_chain_head_db_class()(MemoryDB())
# #temp_chain_head_db = self.get_chain_head_db_class().load_from_saved_root_hash(JournalDB(self.db))
# for current_timestamp in range(current_window, earliest_root_hash-TIME_BETWEEN_HEAD_HASH_SAVE, -TIME_BETWEEN_HEAD_HASH_SAVE):
# self.logger.debug("Rebuilding chronological block window {}".format(current_timestamp))
# if current_timestamp < self.genesis_block_timestamp:
# break
#
# if current_timestamp == current_window:
# head_block_hashes = self.chain_head_db.get_head_block_hashes_list()
# else:
# head_block_hashes = temp_chain_head_db.get_head_block_hashes_list()
#
# # iterate over all chains
# for head_block_hash in head_block_hashes:
# current_block_hash = head_block_hash
# # now iterate over blocks in chain
# while True:
# current_header = self.chaindb.get_block_header_by_hash(current_block_hash)
# if current_header.timestamp >= current_timestamp:
# # add it to chronological block window in the real chain head db
# self.chain_head_db.add_block_hash_to_chronological_window(current_header.hash, current_header.timestamp)
# else:
# # The block is older than the timestamp. Set it as the chain head block hash in our temp chain head db
# temp_chain_head_db.set_chain_head_hash(current_header.chain_address, current_header.hash)
# break
# if current_header.parent_hash == GENESIS_PARENT_HASH:
# # we reached the end of the chain
# temp_chain_head_db.delete_chain_head_hash(current_header.chain_address)
# break
# # set the current block to the parent so we move down the chain
# current_block_hash = current_header.parent_hash
#
# # Now that we have gone through all chains, and removed any blocks newer than this timestamp, the root hash in the
# # temp chain head db is the correct one for this historical root hash timestamp.
# self.chain_head_db.save_single_historical_root_hash(temp_chain_head_db.root_hash, Timestamp(current_timestamp))
def initialize_historical_root_hashes_and_chronological_blocks(self) -> None:
'''
This function rebuilds all historical root hashes, and chronological blocks, from the blockchain database. It starts with the saved root hash and works backwards.
This function needs to be run from chain because it requires chain_head_db and chaindb.
:return:
'''
self.chain_head_db.load_saved_root_hash()
current_window = self.chain_head_db.current_window
earliest_root_hash = self.chain_head_db.earliest_window
#TIME_BETWEEN_HEAD_HASH_SAVE
# the saved
# 1) iterate down the root hash times
# 2) create new chain_head_db with memorydb
# 3) go through each chain and any blocks newer than the timestamp, save to chronological window.
# 4) when you reach a block less than the timestamp, set it as chain head in the new memory based chain_head_db
# 5) get the root hash
# 6) set this root hash in the real chain_head_db at the correct timestamp.
# A chronological block window holds all of the blocks starting at its timestamp, going to timestamp + TIME_BETWEEN_HEAD_HASH_SAVE
# A historical root hash is the root hash at the given timestamp, so it includes all blocks earlier than that timestamp.
self.logger.debug("Rebuilding chronological block windows")
# us a journaldb so that it doesnt write changes to the database.
temp_chain_head_db = self.get_chain_head_db_class()(MemoryDB())
#temp_chain_head_db = self.get_chain_head_db_class().load_from_saved_root_hash(JournalDB(self.db))
for current_timestamp in range(current_window, earliest_root_hash-TIME_BETWEEN_HEAD_HASH_SAVE, -TIME_BETWEEN_HEAD_HASH_SAVE):
if current_timestamp < self.genesis_block_timestamp:
break
head_block_hashes = self.chain_head_db.get_head_block_hashes_list()
# iterate over all chains
for head_block_hash in head_block_hashes:
current_block_hash = head_block_hash
# now iterate over blocks in chain
while True:
current_header = self.chaindb.get_block_header_by_hash(current_block_hash)
if current_header.timestamp >= current_timestamp:
# add it to chronological block window in the real chain head db
self.chain_head_db.add_block_hash_to_chronological_window(current_header.hash, current_header.timestamp)
else:
# The block is older than the timestamp. Set it as the chain head block hash in our temp chain head db
self.chain_head_db.set_chain_head_hash(current_header.chain_address, current_header.hash)
break
if current_header.parent_hash == GENESIS_PARENT_HASH:
# we reached the end of the chain
self.chain_head_db.delete_chain_head_hash(current_header.chain_address)
break
# set the current block to the parent so we move down the chain
current_block_hash = current_header.parent_hash
# Now that we have gone through all chains, and removed any blocks newer than this timestamp, the root hash in the
# temp chain head db is the correct one for this historical root hash timestamp.
self.chain_head_db.save_single_historical_root_hash(self.chain_head_db.root_hash, Timestamp(current_timestamp))
self.chain_head_db.persist()
# finally, lets load the saved root hash again so we are up to date.
self.chain_head_db.load_saved_root_hash()
#
# Execution API
#
def estimate_gas(self, transaction: BaseTransaction, at_header: BlockHeader=None) -> int:
"""
Returns an estimation of the amount of gas the given transaction will
use if executed on top of the block specified by the given header.
"""
if at_header is None:
at_header = self.get_canonical_head()
with self.get_vm(at_header).state_in_temp_block() as state:
return self.gas_estimator(state, transaction)
def validate_time_from_genesis_block(self,block):
if not block.is_genesis:
#first make sure enough time has passed since genesis. We need at least TIME_BETWEEN_HEAD_HASH_SAVE since genesis so that the
# genesis historical root hash only contains the genesis chain.
if block.header.timestamp < (self.genesis_block_timestamp + TIME_BETWEEN_HEAD_HASH_SAVE):
raise NotEnoughTimeBetweenBlocks("Not enough time has passed since the genesis block. Must wait at least {} seconds after genesis block. "
"This block timestamp is {}, genesis block timestamp is {}.".format(TIME_BETWEEN_HEAD_HASH_SAVE, block.header.timestamp, self.genesis_block_timestamp))
return
#
# Reverting block functions
#
def delete_canonical_chain(self, wallet_address: Address, vm: 'BaseVM', save_block_head_hash_timestamp:bool = True) -> None:
self.logger.debug("delete_canonical_chain. Chain address {}".format(encode_hex(wallet_address)))
self.chain_head_db.delete_chain(wallet_address, save_block_head_hash_timestamp)
self.chaindb.delete_canonical_chain(wallet_address)
vm.state.clear_account_keep_receivable_transactions_and_persist(wallet_address)
def set_parent_as_canonical_head(self, existing_block_header: BlockHeader, vm: 'BaseVM', save_block_head_hash_timestamp:bool = True) -> None:
block_parent_header = self.chaindb.get_block_header_by_hash(existing_block_header.parent_hash)
self.logger.debug("Setting new block as canonical head after reverting blocks. Chain address {}, header hash {}".format(encode_hex(existing_block_header.chain_address), encode_hex(block_parent_header.hash)))
if save_block_head_hash_timestamp:
self.save_chain_head_hash_to_trie_for_time_period(block_parent_header)
self.chain_head_db.set_chain_head_hash(block_parent_header.chain_address, block_parent_header.hash)
self.chaindb._set_as_canonical_chain_head(block_parent_header)
vm.state.revert_account_to_hash_keep_receivable_transactions_and_persist(block_parent_header.account_hash, block_parent_header.chain_address)
def revert_block(self, descendant_block_hash: Hash32) -> None:
self.logger.debug('Reverting block with hash {}'.format(encode_hex(descendant_block_hash)))
descendant_block_header = self.chaindb.get_block_header_by_hash(descendant_block_hash)
vm = self.get_vm(descendant_block_header)
self.chain_head_db.delete_block_hash_from_chronological_window(descendant_block_hash, descendant_block_header.timestamp)
self.chaindb.remove_block_from_all_parent_child_lookups(descendant_block_header, vm.get_block_class().receive_transaction_class)
self.chaindb.delete_all_block_children_lookups(descendant_block_hash)
self.revert_block_chronological_consistency_lookups(descendant_block_hash)
#for every one, re-add pending receive transaction for all receive transactions only if sending block still exists
#make all blocks unprocessed so that receivable transactions are not saved that came from one of the non-canonical blocks.
vm.reverse_pending_transactions(descendant_block_header)
# remove the block from the canonical chain. This must be done last because reversing the pending transactions requires that it
# is still in the canonical chain to look up transactions
self.chaindb.delete_block_from_canonical_chain(descendant_block_hash)
#self.chaindb.save_unprocessed_block_lookup(descendant_block_hash)
vm.state.account_db.persist()
def revert_block_chronological_consistency_lookups(self, block_hash: Hash32) -> None:
# check to see if there are any reward type 2 proofs. Then loop through each one to revert inconsistency lookups
block_header = self.chaindb.get_block_header_by_hash(block_hash)
block_class = self.get_vm_class_for_block_timestamp(block_header.timestamp).get_block_class()
reward_bundle = self.chaindb.get_reward_bundle(block_header.reward_hash, block_class.reward_bundle_class)
chronological_consistency_key = [block_header.timestamp, block_header.hash]
for proof in reward_bundle.reward_type_2.proof:
# timestamp, block hash of block responsible
sender_chain_header = self.chaindb.get_block_header_by_hash(proof.head_hash_of_sender_chain)
# The chronological consistency restrictions are placed on the block on top of the one giving the proof.
block_number_with_restrictions = sender_chain_header.block_number + 1
self.chaindb.delete_block_consistency_key(sender_chain_header.chain_address, block_number_with_restrictions, chronological_consistency_key)
def purge_block_and_all_children_and_set_parent_as_chain_head_by_hash(self, block_hash_to_delete: Hash32, save_block_head_hash_timestamp: bool = True) -> None:
genesis_block_hash = self.chaindb.get_canonical_block_hash(BlockNumber(0), self.genesis_wallet_address)
if block_hash_to_delete == genesis_block_hash:
raise TriedDeletingGenesisBlock("Attempted to delete genesis block. This is not allowed.")
block_header_to_delete = self.chaindb.get_block_header_by_hash(block_hash_to_delete)
self.purge_block_and_all_children_and_set_parent_as_chain_head(block_header_to_delete, save_block_head_hash_timestamp)
def purge_block_and_all_children_and_set_parent_as_chain_head(self, existing_block_header: BlockHeader, save_block_head_hash_timestamp: bool = True) -> None:
# First make sure it is actually in the canonical chain. If not, then we don't have anything to do.
if self.chaindb.is_in_canonical_chain(existing_block_header.hash):
vm = self.get_vm()
if existing_block_header.block_number == 0:
self.delete_canonical_chain(existing_block_header.chain_address, vm, save_block_head_hash_timestamp)
else:
#set the parent block as the new canonical head, and handle all the data for that
self.set_parent_as_canonical_head(existing_block_header, vm, save_block_head_hash_timestamp)
#1) delete chronological transactions, delete everything from chronological root hashes, delete children lookups
all_descendant_block_hashes = self.chaindb.get_all_descendant_block_hashes(existing_block_header.hash)
#first set all of the new chain heads and all the data that goes along with them
if all_descendant_block_hashes is not None:
for descendant_block_hash in all_descendant_block_hashes:
if not self.chaindb.is_block_unprocessed(descendant_block_hash):
descendant_block_header = self.chaindb.get_block_header_by_hash(descendant_block_hash)
if descendant_block_header.parent_hash not in all_descendant_block_hashes:
#this is the new head of a chain. set it as the new head for chronological root hashes
#except for children in this chain, because it will be off by 1 block. we already set this earlier
if descendant_block_header.chain_address != existing_block_header.chain_address:
if descendant_block_header.block_number == 0:
self.delete_canonical_chain(descendant_block_header.chain_address, vm, save_block_head_hash_timestamp)
else:
self.set_parent_as_canonical_head(descendant_block_header, vm, save_block_head_hash_timestamp)
# Must persist now because revert_block creates new vm's for each block and could overrwite changes if we wait.
vm.state.account_db.persist()
#now we know what the new heads are, so we can deal with the rest of the descendants
for descendant_block_hash in all_descendant_block_hashes:
#here, since we are already going through all children, we don't need this function to purge children as well
if self.chaindb.is_block_unprocessed(descendant_block_hash):
self.purge_unprocessed_block(descendant_block_hash, purge_children_too = False)
else:
self.revert_block(descendant_block_hash)
self.revert_block(existing_block_header.hash)
#persist changes
self.chain_head_db.persist(True)
self.reinitialize()
def purge_unprocessed_block(self, block_hash, purge_children_too = True):
'''
Deletes all unprocessed block lookups, and unprocessed children lookups for this block and all children blocks.
Todo: delete saved block header, and saved transaction tries for each block as well
'''
self.logger.debug("purging unprocessed block")
if purge_children_too:
self.logger.debug("purging unprocessed children")
if self.chaindb.has_unprocessed_children(block_hash):
self.logger.debug("HAS UNPROCESSED CHILDREN BLOCKS")
children_block_hashes = self.chaindb.get_block_children(block_hash)
if children_block_hashes != None:
for child_block_hash in children_block_hashes:
#this includes the child in this actual chain as well as children from send transactions.
if not self.chaindb.is_block_unprocessed(child_block_hash):
raise UnprocessedBlockChildIsProcessed("In process of deleting children of unprocessed block, and found one that is processed. This should never happen")
else:
self.purge_unprocessed_block(child_block_hash)
try:
block = self.get_block_by_hash(block_hash)
chain = encode_hex(block.header.chain_address)
self.logger.debug("deleting unprocessed child block number {} on chain {}".format(block.number, chain))
self.chaindb.remove_block_from_unprocessed(block)
except HeaderNotFound:
pass
def import_chronological_block_window(self, block_list: List[BaseBlock], window_start_timestamp: Timestamp, save_block_head_hash_timestamp:bool = True, allow_unprocessed:bool =False) -> None:
validate_uint256(window_start_timestamp, title='timestamp')
if block_list is None or len(block_list) == 0:
return
#if we are given a block that is not one of the two allowed classes, try converting it.
if len(block_list) > 0 and not isinstance(block_list[0], self.get_vm(timestamp = block_list[0].header.timestamp).get_block_class()):
self.logger.debug("converting chain to correct class")
corrected_block_list = []
for block in block_list:
corrected_block = self.get_vm(timestamp = block.header.timestamp).convert_block_to_correct_class(block)
corrected_block_list.append(corrected_block)
block_list = corrected_block_list
#first we delete any blocks we have in the same window that are not in the new block list
local_chronological_timestamp_block_window = self.chain_head_db.load_chronological_block_window(window_start_timestamp)
if local_chronological_timestamp_block_window is not None:
local_block_hash_list = [x[1] for x in local_chronological_timestamp_block_window]
new_block_hash_list = [block.hash for block in block_list]
block_hashes_to_delete = effecient_diff(new_block_hash_list, local_block_hash_list)
if len(block_hashes_to_delete) > 0:
self.logger.debug("deleting existing blocks in chronological window {}".format(block_hashes_to_delete))
for block_hash_to_delete in block_hashes_to_delete:
self.purge_block_and_all_children_and_set_parent_as_chain_head_by_hash(block_hash_to_delete)
if len(block_list) > 0:
self.logger.debug("starting block import for chronological block window")
#if block list is empty, load the local historical root hashes and delete them all
for i in range(len(block_list)):
# Reset this after each block imports
blocks_that_have_been_reorganized = set()
wallet_address = block_list[i].header.chain_address
while True:
try:
self.import_block(block_list[i], wallet_address = wallet_address, save_block_head_hash_timestamp = save_block_head_hash_timestamp, allow_unprocessed=allow_unprocessed)
break
except (UnprocessedBlockNotAllowed, ParentNotFound) as e:
# Because of the timestamps being in seconds, there may be multiple blocks that depend on each other
# with the same timestamp, and they could be out of order. So we attempt to reorganize the blocks
# and import again. If it fails again we will raise the exception.
if block_list[i].header.hash in blocks_that_have_been_reorganized:
self.logger.debug("Already tried reorganizing this block.")
raise e
self.logger.debug("Attempting to reorganize chronological window for import")
blocks_that_have_been_reorganized.add(block_list[i].header.hash)
block_list = reorganize_chronological_block_list_for_correct_chronological_order_at_index(block_list, i, self.logger)
else:
self.logger.debug("importing an empty chronological window. going to make sure we have a saved historical root hash")
historical_root_hashes = self.chain_head_db.get_historical_root_hashes()
if historical_root_hashes is not None:
#historical_root_hashes_dict = dict(historical_root_hashes)
#if it does exist, make sure it is the same as the last one. if not, then delete all newer
try:
self.chain_head_db.propogate_previous_historical_root_hash_to_timestamp(window_start_timestamp + TIME_BETWEEN_HEAD_HASH_SAVE)
except AppendHistoricalRootHashTooOld:
self.logger.debug("Tried to propogate the previous historical root hash but there was none. This shouldn't happen")
#self.logger.debug("historical root hashes after chronological block import {}".format(self.chain_head_db.get_historical_root_hashes()))
def import_chain(self, block_list: List[BaseBlock], perform_validation: bool=True, save_block_head_hash_timestamp: bool = True, allow_replacement: bool = True) -> None:
if len(block_list) > 0:
self.logger.debug("importing chain")
#if we are given a block that is not one of the two allowed classes, try converting it.
if not isinstance(block_list[0], self.get_vm(timestamp = block_list[0].header.timestamp).get_block_class()):
self.logger.debug("converting chain to correct class")
corrected_block_list = []
for block in block_list:
corrected_block = self.get_vm(timestamp = block.header.timestamp).convert_block_to_correct_class(block)
corrected_block_list.append(corrected_block)
block_list = corrected_block_list
wallet_address = block_list[0].header.chain_address
for block in block_list:
self.import_block(block,
perform_validation = perform_validation,
save_block_head_hash_timestamp = save_block_head_hash_timestamp,
wallet_address = wallet_address,
allow_replacement = allow_replacement)
# If we started with a longer chain, and all the imported blocks match ours, our chain will remain longer even after importing the new one.
# To fix this, we need to delete any blocks of ours that is longer in length then this chain that we are importing
# First make sure the whole chain imported correctly. If not, then we don't need to do anything
try:
local_canonical_head = self.chaindb.get_canonical_head(wallet_address)
imported_canonical_head = block_list[-1].header
#self.logger.debug("imported chain head hash {}. actual chain head hash {}".format(encode_hex(imported_canonical_head.hash), encode_hex(local_canonical_head.hash)))
if imported_canonical_head.block_number < local_canonical_head.block_number:
if self.chaindb.is_in_canonical_chain(imported_canonical_head.hash):
# Our chain is the same as the imported one, but we have some extra blocks on top. In this case, we would like to prune our chain
# to match the imported one.
# We only need to purge the next block after the imported chain. The vm will automatically purge all children
self.logger.debug("After importing a chain, our local chain is identical except with additional blocks on top. We will prune the top blocks to bring"
" our chain in line with the imported one.")
block_number_to_purge = imported_canonical_head.block_number + 1
hash_to_purge = self.chaindb.get_canonical_block_hash(BlockNumber(block_number_to_purge), wallet_address)
self.purge_block_and_all_children_and_set_parent_as_chain_head_by_hash(hash_to_purge, save_block_head_hash_timestamp)
except CanonicalHeadNotFound:
pass
from hvm.utils.profile import profile
@profile(sortby='cumulative')
def import_block_with_profiler(self, *args, **kwargs):
self.import_block(*args, **kwargs)
def import_block(self, block: BaseBlock,
perform_validation: bool=True,
save_block_head_hash_timestamp = True,
wallet_address = None,
allow_unprocessed = True,
allow_replacement = True,
ensure_block_unchanged:bool = True,
microblock_origin: bool = False) -> BaseBlock:
#we handle replacing blocks here
#this includes deleting any blocks that it might be replacing
#then we start the journal db
#then within _import_block, it can commit the journal
#but we wont persist until it gets out here again.
wallet_address = block.header.chain_address
# we need to re-initialize the chain for the new wallet address.
if wallet_address != self.wallet_address:
self.logger.debug("Changing to chain with wallet address {}".format(encode_hex(wallet_address)))
self.set_new_wallet_address(wallet_address=wallet_address)
journal_enabled = False
#if we are given a block that is not one of the two allowed classes, try converting it.
#There is no reason why this should be a queueblock, because a queueblock would never come over the network, it
#it always generated locally, and should have the correct class.
if not isinstance(block, self.get_vm(timestamp = block.header.timestamp).get_block_class()):
self.logger.debug("converting block to correct class")
block = self.get_vm(timestamp = block.header.timestamp).convert_block_to_correct_class(block)
if isinstance(block, self.get_vm(timestamp = block.header.timestamp).get_queue_block_class()):
# Set the queue block timestamp to now, when it is being imported.
block = block.copy(header=block.header.copy(timestamp=int(time.time())))
else:
if block.header.chain_address == self.genesis_wallet_address and block.header.block_number == 0:
try:
our_genesis_hash = self.chaindb.get_canonical_block_header_by_number(BlockNumber(0), self.genesis_wallet_address).hash
except HeaderNotFound:
raise NoGenesisBlockPresent("Tried importing a block, but we have no genesis block loaded. Need to load a genesis block first.")
if block.header.hash == our_genesis_hash:
return block
else:
raise ValidationError("Tried to import a new genesis block on the genesis chain. This is not allowed.")
if len(block.transactions) == 0 and len(block.receive_transactions) == 0:
# if block.reward_bundle is None:
# raise ValidationError('The block must have at least 1 transaction, or a non-zero reward bundle. Reward bundle = None')
if (block.reward_bundle.reward_type_1.amount == 0 and block.reward_bundle.reward_type_2.amount == 0):
raise RewardAmountRoundsToZero('The block has no send or receive transactions, and the reward bundle has amount = 0 for all types of rewards. This is not allowed. If this is just a reward block this usually means more time needs to pass before creating reward bundle.')
#if we are adding to the top of the chain, or beyond, we need to check for unprocessed blocks
#handle deleting any unprocessed blocks that will be replaced.
if block.number >= self.header.block_number:
existing_unprocessed_block_hash = self.chaindb.get_unprocessed_block_hash_by_block_number(self.wallet_address, block.number)
if (existing_unprocessed_block_hash != block.hash) and (existing_unprocessed_block_hash is not None):
if not allow_replacement:
raise ReplacingBlocksNotAllowed("Attempted to replace an unprocessed block.")
#check to make sure the parent matches the one we have
if block.number != 0:
# if block.number == self.header.block_number:
# existing_parent_hash = self.chaindb.get_canonical_head_hash(self.wallet_address)
# else:
existing_unprocessed_parent_hash = self.chaindb.get_unprocessed_block_hash_by_block_number(self.wallet_address, block.number-1)
if existing_unprocessed_parent_hash is not None:
if block.header.parent_hash != existing_unprocessed_parent_hash:
raise ParentNotFound("Parent is unprocessed. Parent hash = {}, this hash = {}".format(
encode_hex(existing_unprocessed_parent_hash), encode_hex(block.header.parent_hash)))
else:
try:
existing_canonical_parent_hash = self.chaindb.get_canonical_block_header_by_number(block.header.block_number-1, block.header.chain_address)
if block.header.parent_hash != existing_canonical_parent_hash:
raise ParentNotFound("Parent is canonical. Parent hash = {}, this hash = {}".format(
encode_hex(existing_canonical_parent_hash), encode_hex(block.header.parent_hash)))
except HeaderNotFound:
pass
#lets delete the unprocessed block, and its children, then import
self.enable_journal_db()
journal_record = self.record_journal()
journal_enabled = True
self.purge_unprocessed_block(existing_unprocessed_block_hash)
#check to see if this is the same hash that was already saved as unprocessed
if block.number > self.header.block_number:
#check that the parent hash matches what we have.
existing_parent_hash = self.chaindb.get_unprocessed_block_hash_by_block_number(self.wallet_address, block.number-1)
#we can allow this for unprocessed blocks as long as we have the parent in our database
if existing_parent_hash == block.header.parent_hash:
if block.hash == self.chaindb.get_unprocessed_block_hash_by_block_number(self.wallet_address, block.number):
#we already imported this one
return_block = block
else:
#save as unprocessed
if not allow_unprocessed:
raise UnprocessedBlockNotAllowed()
self.logger.debug("Saving block as unprocessed because parent on this chain is unprocessed")
return_block = self.save_block_as_unprocessed(block)
if journal_enabled:
self.logger.debug('commiting journal')
self.commit_journal(journal_record)
self.persist_journal()
self.disable_journal_db()
return return_block
else:
raise ParentNotFound('Parent is unprocessed 2')
#now, if it is the head of the chain, lets make sure the parent hash is correct.
if block.number == self.header.block_number and block.number != 0:
if block.header.parent_hash != self.chaindb.get_canonical_head_hash(chain_address= self.wallet_address):
raise ParentNotFound("Block is at the head of the chain")
if block.number < self.header.block_number:
if not allow_replacement:
raise ReplacingBlocksNotAllowed("Attempted to replace a canonical block")
self.logger.debug("went into block replacing mode")
self.logger.debug("block.number = {}, self.header.block_number = {}".format(block.number,self.header.block_number))
self.logger.debug("this chains wallet address = {}, this block's sender = {}".format(encode_hex(self.wallet_address), encode_hex(block.sender)))
#check to see if we can load the existing canonical block
existing_block_header = self.chaindb.get_canonical_block_header_by_number(block.number, self.wallet_address)
if existing_block_header.hash == block.header.hash:
self.logger.debug("tried to import a block that has a hash that matches the local block. no import required.")
return block
else:
if not journal_enabled:
self.enable_journal_db()
journal_record = self.record_journal()
journal_enabled = True
self.purge_block_and_all_children_and_set_parent_as_chain_head(existing_block_header, save_block_head_hash_timestamp = save_block_head_hash_timestamp)
#check to see if this block is chronologically inconsistent - usually due to reward block that used proof from this chain
block_hashes_leading_to_inconsistency = self.check_block_chronological_consistency(block)
if len(block_hashes_leading_to_inconsistency) > 0:
if not allow_replacement:
raise ReplacingBlocksNotAllowed("Attempted to import chronologically inconsistent block. Block hashes leading to inconsistency = {}.".format([encode_hex(x) for x in block_hashes_leading_to_inconsistency]))
else:
# revert all of the blocks leading to the inconsistency.
if not journal_enabled:
self.enable_journal_db()
journal_record = self.record_journal()
journal_enabled = True
for block_hash in block_hashes_leading_to_inconsistency:
self.logger.debug("Purging block {} to preserve chronological consistency".format(encode_hex(block_hash)))
block_header = self.chaindb.get_block_header_by_hash(block_hash)
# This should be impossible, but lets double check that none of these blocks are on the same chain as this block
if block_header.chain_address == block.header.chain_address:
raise Exception("Tried to revert chronologically inconsistent block on this same chain. This should never happen...")
self.purge_block_and_all_children_and_set_parent_as_chain_head(block_header, save_block_head_hash_timestamp = save_block_head_hash_timestamp)
try:
return_block = self._import_block(block = block,
perform_validation = perform_validation,
save_block_head_hash_timestamp = save_block_head_hash_timestamp,
allow_unprocessed = allow_unprocessed,
ensure_block_unchanged= ensure_block_unchanged,
microblock_origin = microblock_origin)
# handle importing unprocessed blocks here because doing it recursively results in maximum recursion depth exceeded error
if not self.chaindb.is_block_unprocessed(return_block.hash):
self.logger.debug("Checking to see if block has unprocessed children")
self.import_all_unprocessed_descendants(return_block.hash,
perform_validation= True,
save_block_head_hash_timestamp = save_block_head_hash_timestamp,
allow_unprocessed = True)
except Exception as e:
if journal_enabled:
self.logger.debug('discarding journal')
self.discard_journal(journal_record)
self.disable_journal_db()
raise e
if journal_enabled:
self.logger.debug('commiting journal')
self.commit_journal(journal_record)
self.persist_journal()
self.disable_journal_db()
return return_block
def _import_block(self, block: BaseBlock,
perform_validation: bool=True,
save_block_head_hash_timestamp = True,
allow_unprocessed = True,
ensure_block_unchanged: bool = True,
microblock_origin: bool = False) -> BaseBlock:
"""
Imports a complete block.
"""
self.logger.debug("importing block {} with number {}".format(block.__repr__(), block.number))
self.validate_time_from_genesis_block(block)
if isinstance(block, self.get_vm(timestamp = block.header.timestamp).get_queue_block_class()):
# If it was a queueblock, then the header will have changed after importing
perform_validation = False
ensure_block_unchanged = False
queue_block = True
else:
queue_block = False
if not self.chaindb.is_block_unprocessed(block.header.parent_hash):
#this part checks to make sure the parent exists
try:
vm = self.get_vm(timestamp = block.header.timestamp)
self.logger.debug("importing block with vm {}".format(vm.__repr__()))
if queue_block:
imported_block = vm.import_block(block, private_key = self.private_key)
else:
imported_block = vm.import_block(block)
# Validate the imported block.
if ensure_block_unchanged:
if microblock_origin:
# this started out as a microblock. So we only ensure the microblock fields are unchanged.
self.logger.debug('ensuring block unchanged. microblock correction')
corrected_micro_block = block.copy(header = block.header.copy(
receipt_root = imported_block.header.receipt_root,
bloom = imported_block.header.bloom,
gas_limit = imported_block.header.gas_limit,
gas_used = imported_block.header.gas_used,
account_hash = imported_block.header.account_hash,
account_balance = imported_block.header.account_balance,
))
ensure_imported_block_unchanged(imported_block, corrected_micro_block)
else:
self.logger.debug('ensuring block unchanged')
ensure_imported_block_unchanged(imported_block, block)
else:
self.logger.debug('Not checking block for changes.')
if perform_validation:
self.validate_block(imported_block)
#self.chain_head_db.set_chain_head_hash(self.wallet_address, imported_block.header.hash)
if save_block_head_hash_timestamp:
self.chain_head_db.add_block_hash_to_chronological_window(imported_block.header.hash, imported_block.header.timestamp)
self.save_chain_head_hash_to_trie_for_time_period(imported_block.header)
self.chain_head_db.set_chain_head_hash(imported_block.header.chain_address, imported_block.header.hash)
self.chain_head_db.persist(True)
self.chaindb.persist_block(imported_block)
vm.state.account_db.persist(save_account_hash = True, wallet_address = self.wallet_address)
#here we must delete the unprocessed lookup before importing children
#because the children cannot be imported if their chain parent is unprocessed.
#but we cannot delete the lookup for unprocessed children yet.
self.chaindb.remove_block_from_unprocessed(imported_block)
# Add chronological consistency lookups
self.save_block_chronological_consistency_lookups(imported_block)
try:
self.header = self.create_header_from_parent(self.get_canonical_head())
except CanonicalHeadNotFound:
self.header = self.get_vm_class_for_block_timestamp().create_genesis_block(self.wallet_address).header
self.queue_block = None
self.logger.debug(
'IMPORTED_BLOCK: number %s | hash %s',
imported_block.number,
encode_hex(imported_block.hash),
)
# Make sure our wallet address hasn't magically changed
if self.wallet_address != imported_block.header.chain_address:
raise ValidationError("Attempted to import a block onto the wrong chain.")
return_block = imported_block
except ReceivableTransactionNotFound as e:
if not allow_unprocessed:
raise UnprocessedBlockNotAllowed()
self.logger.debug("Saving block as unprocessed because of ReceivableTransactionNotFound error: {}".format(e))
return_block = self.save_block_as_unprocessed(block)
if self.raise_errors:
raise e
except RewardProofSenderBlockMissing as e:
if not allow_unprocessed:
raise UnprocessedBlockNotAllowed()
self.logger.debug("Saving block as unprocessed because of RewardProofSenderBlockMissing error: {}".format(e))
return_block = self.save_block_as_unprocessed(block)
else:
if not allow_unprocessed:
raise UnprocessedBlockNotAllowed()
self.logger.debug("Saving block as unprocessed because parent on this chain is unprocessed")
return_block = self.save_block_as_unprocessed(block)
return return_block
def import_all_unprocessed_descendants(self, block_hash, *args, **kwargs):
# 1) get unprocessed children
# 2) loop through and import
# 3) if child imports, add their unprocessed children to list, and delete that block from unprocessed
# 4) if list of unprocessed children has 0 length, break
# need to step one level at a time. We use a queue to achieve this effect. It won't get to the next level
# until it finishes all of the blocks on this level. So it goes one level at a time.
if self.chaindb.has_unprocessed_children(block_hash):
self.logger.debug("HAS UNPROCESSED BLOCKS")
# try to import all children
children_block_hashes = self.chaindb.get_block_children(block_hash)
if children_block_hashes != None:
block_hashes_to_import = deque(children_block_hashes)
# iterate over children
while True:
# remove from right side
current_block_hash_to_import = block_hashes_to_import.pop()
if self.chaindb.is_block_unprocessed(current_block_hash_to_import):
self.logger.debug("importing child block")
try:
child_block = self.get_block_by_hash(current_block_hash_to_import)
if child_block.header.chain_address != self.wallet_address:
#self.logger.debug("Changing to chain with wallet address {}".format(encode_hex(child_block.header.chain_address)))
self.set_new_wallet_address(wallet_address=child_block.header.chain_address)
self._import_block(child_block, *args, **kwargs)
#if the block imported, add its children the the deque
if not self.chaindb.is_block_unprocessed(current_block_hash_to_import):
# it imported successfully
if self.chaindb.has_unprocessed_children(current_block_hash_to_import):
children_block_hashes = self.chaindb.get_block_children(current_block_hash_to_import)
if children_block_hashes != None:
block_hashes_to_import.extendleft(children_block_hashes)
# we have queued up its children to be imported. Assuming exceptions don't occur, we can remove this block from the unprocessed children lookup.
self.chaindb.delete_unprocessed_children_blocks_lookup(current_block_hash_to_import)
except Exception as e:
self.logger.error("Tried to import an unprocessed child block and got this error {}".format(e))
if len(block_hashes_to_import) == 0:
return
self.chaindb.delete_unprocessed_children_blocks_lookup(block_hash)
def save_block_chronological_consistency_lookups(self, block: BaseBlock) -> None:
'''
We need to require that the proof sender chain doesn't add a block after their claimed chain_head_hash, and the timestamp of this block being imported.
:param block:
:return:
'''
block_header = block.header
reward_bundle = self.chaindb.get_reward_bundle(block_header.reward_hash, block.reward_bundle_class)
chronological_consistency_key = [block_header.timestamp, block_header.hash]
for proof in reward_bundle.reward_type_2.proof:
# timestamp, block hash of block responsible
sender_chain_header = self.chaindb.get_block_header_by_hash(proof.head_hash_of_sender_chain)
# The chronological consistency restrictions are placed on the block on top of the one giving the proof.
block_number_with_restrictions = sender_chain_header.block_number + 1
self.logger.debug("saving chronological consistency lookup for chain {}, block {}, timestamp {}".format(encode_hex(sender_chain_header.chain_address), block_number_with_restrictions, block_header.timestamp))
self.chaindb.add_block_consistency_key(sender_chain_header.chain_address, block_number_with_restrictions, chronological_consistency_key)
def save_block_as_unprocessed(self, block):
#if it is already saved as unprocesessed, do nothing
if self.chaindb.is_block_unprocessed(block.hash):
return block
#before adding to unprocessed blocks, make sure the receive transactions are valid
# for receive_transaction in block.receive_transactions:
# #there must be at least 1 to get this far
# receive_transaction.validate()
#now we add it to unprocessed blocks
self.chaindb.save_block_as_unprocessed(block)
#save the transactions to db
vm = self.get_vm(timestamp = block.header.timestamp)
vm.save_items_to_db_as_trie(block.transactions, block.header.transaction_root)
vm.save_items_to_db_as_trie(block.receive_transactions, block.header.receive_transaction_root)
#we don't want to persist because that will add it to the canonical chain.
#We just want to save it to the database so we can process it later if needbe.
self.chaindb.persist_non_canonical_block(block)
#self.chaindb.persist_block(block)
try:
self.header = self.create_header_from_parent(self.get_canonical_head())
except CanonicalHeadNotFound:
self.header = self.get_vm_class_for_block_timestamp().create_genesis_block(self.wallet_address).header
self.queue_block = None
self.logger.debug(
'SAVED_BLOCK_AS_UNPROCESSED: number %s | hash %s',
block.number,
encode_hex(block.hash),
)
return block
def import_current_queue_block(self) -> BaseBlock:
return self.import_block(self.queue_block)
def import_current_queue_block_with_reward(self, node_staking_score_list: List[NodeStakingScore]) -> BaseBlock:
reward_bundle = self.get_consensus_db().create_reward_bundle_for_block(self.wallet_address, node_staking_score_list, at_timestamp=Timestamp(int(time.time())))
# #testing
# reward_bundle = reward_bundle.copy(reward_type_2 = reward_bundle.reward_type_2.copy(amount=0))
self.queue_block = self.queue_block.copy(reward_bundle = reward_bundle)
return self.import_current_queue_block()
def get_all_chronological_blocks_for_window(self, window_timestamp:Timestamp) -> List[BaseBlock]:
validate_uint256(window_timestamp, title='timestamp')
chronological_blocks = self.chain_head_db.load_chronological_block_window(window_timestamp)
if chronological_blocks is None:
return None
else:
list_of_blocks = []
for chronological_block in chronological_blocks:
block_hash = chronological_block[1]
new_block = self.get_block_by_hash(block_hash)
list_of_blocks.append(new_block)
return list_of_blocks
#
# Chronologically consistent blockchain db API
#
def check_block_chronological_consistency(self, block: BaseBlock) -> List[Hash32]:
'''
Checks to see if the block breaks any chronological consistency. If it does, it will return a list of blocks that need to be reverted for this block to be imported
returns list of block hashes that have to be reverted
:param block:
:return:
'''
consistency_keys = self.chaindb.get_block_chronological_consistency_keys(block.header.chain_address, block.header.block_number)
block_hashes_to_revert = list()
for consistency_key in consistency_keys:
if consistency_key[0] > block.header.timestamp:
block_hashes_to_revert.append(consistency_key[1])
return block_hashes_to_revert
#
# Validation API
#
def get_allowed_time_of_next_block(self, chain_address: Address = None) -> Timestamp:
if chain_address is None:
chain_address = self.wallet_address
try:
canonical_head = self.chaindb.get_canonical_head(chain_address=chain_address)
except CanonicalHeadNotFound:
return Timestamp(0)
vm = self.get_vm(timestamp=Timestamp(int(time.time())))
min_allowed_time_between_blocks = vm.min_time_between_blocks
return Timestamp(canonical_head.timestamp + min_allowed_time_between_blocks)
def validate_block(self, block: BaseBlock) -> None:
"""
Performs validation on a block that is either being mined or imported.
Since block validation (specifically the uncle validation must have
access to the ancestor blocks, this validation must occur at the Chain
level.
"""
self.validate_gaslimit(block.header)
def validate_gaslimit(self, header: BlockHeader) -> None:
"""
Validate the gas limit on the given header.
"""
#parent_header = self.get_block_header_by_hash(header.parent_hash)
#low_bound, high_bound = compute_gas_limit_bounds(parent_header)
#if header.gas_limit < low_bound:
# raise ValidationError(
# "The gas limit on block {0} is too low: {1}. It must be at least {2}".format(
# encode_hex(header.hash), header.gas_limit, low_bound))
if header.gas_limit > BLOCK_GAS_LIMIT:
raise ValidationError(
"The gas limit on block {0} is too high: {1}. It must be at most {2}".format(
encode_hex(header.hash), header.gas_limit, BLOCK_GAS_LIMIT))
def validate_block_specification(self, block) -> bool:
'''
This validates everything we can without looking at the blockchain database. It doesnt need to assume
that we have the block that sent the transactions.
This that this can check:
block signature
send transaction signatures
receive transaction signatures - dont need to check this. it doesnt add any security
signatures of send transaction within receive transactions
send transaction root matches transactions
receive transaction root matches transactions
'''
if not isinstance(block, self.get_vm(timestamp = block.header.timestamp).get_block_class()):
self.logger.debug("converting block to correct class")
block = self.get_vm(timestamp = block.header.timestamp).convert_block_to_correct_class(block)
block.header.check_signature_validity()
for transaction in block.transactions:
transaction.validate()
for transaction in block.receive_transactions:
transaction.validate()
send_tx_root_hash, _ = make_trie_root_and_nodes(block.transactions)
if block.header.transaction_root != send_tx_root_hash:
raise ValidationError("Block has invalid transaction root")
receive_tx_root_hash, _ = make_trie_root_and_nodes(block.receive_transactions)
if block.header.receive_transaction_root != receive_tx_root_hash:
raise ValidationError("Block has invalid receive transaction root")
return True
#
# Stake API
#
def get_mature_stake(self, wallet_address: Address = None, raise_canonical_head_not_found_error:bool = False) -> int:
if wallet_address is None:
wallet_address = self.wallet_address
coin_mature_time_for_staking = self.get_vm(timestamp = Timestamp(int(time.time()))).consensus_db.coin_mature_time_for_staking
return self.chaindb.get_mature_stake(wallet_address, coin_mature_time_for_staking, raise_canonical_head_not_found_error = raise_canonical_head_not_found_error)
# gets the stake for the timestamp corresponding to teh chronological block window, so it is all blocks for the next 1000 seconds.
def get_mature_stake_for_chronological_block_window(self, chronological_block_window_timestamp: Timestamp, timestamp_for_stake: Timestamp = None):
if timestamp_for_stake is not None and timestamp_for_stake < chronological_block_window_timestamp:
raise ValidationError("Cannot get chronological block window stake for a timestamp before the window")
if timestamp_for_stake is None:
timestamp_for_stake = int(time.time())
chronological_block_hash_timestamps = self.chain_head_db.load_chronological_block_window(chronological_block_window_timestamp)
chronological_block_hashes = [x[1] for x in chronological_block_hash_timestamps]
coin_mature_time_for_staking = self.get_vm(timestamp=timestamp_for_stake).consensus_db.coin_mature_time_for_staking
return self.chaindb.get_total_block_stake_of_block_hashes(chronological_block_hashes, coin_mature_time_for_staking, timestamp_for_stake)
def get_new_block_hash_to_test_peer_node_health(self) -> Hash32:
'''
returns one of the newest blocks we have seen.
:return:
'''
before_this_timestamp = int(time.time()) - 60 # ask the peer for a block that was received at before 1 minute ago
current_historical_window = int(time.time() / TIME_BETWEEN_HEAD_HASH_SAVE) * TIME_BETWEEN_HEAD_HASH_SAVE
for timestamp in range(current_historical_window,
current_historical_window-NUMBER_OF_HEAD_HASH_TO_SAVE*TIME_BETWEEN_HEAD_HASH_SAVE,
-1* TIME_BETWEEN_HEAD_HASH_SAVE):
chronological_window = self.chain_head_db.load_chronological_block_window(timestamp)
if chronological_window is not None:
chronological_window.sort(key=lambda x: -1*x[0])
for timestamp_hash in chronological_window:
if timestamp_hash[0] < before_this_timestamp:
return timestamp_hash[1]
#if we get to here then we don't have any blocks within all chronological block windows...
raise NoChronologicalBlocks()
#
# Min Block Gas API used for throttling the network
#
def re_initialize_historical_minimum_gas_price_at_genesis(self) -> None:
'''
re-initializes system with last set min gas price and net tpc cap
'''
hist_min_gas_price = self.chaindb.load_historical_minimum_gas_price()
hist_tpc_cap = self.chaindb.load_historical_network_tpc_capability()
hist_tx_per_centisecond = self.chaindb.load_historical_tx_per_centisecond()
if hist_min_gas_price is not None:
init_min_gas_price = hist_min_gas_price[-1][1]
else:
init_min_gas_price = 1
if hist_tpc_cap is not None:
init_tpc_cap = hist_tpc_cap[-1][1]
else:
init_tpc_cap = self.get_local_tpc_cap()
if hist_tx_per_centisecond is not None:
init_tpc = hist_tx_per_centisecond[-1][1]
else:
init_tpc = None
self.chaindb.initialize_historical_minimum_gas_price_at_genesis(init_min_gas_price, init_tpc_cap, init_tpc)
def update_current_network_tpc_capability(self, current_network_tpc_cap: int, update_min_gas_price:bool = True) -> None:
validate_uint256(current_network_tpc_cap, title="current_network_tpc_cap")
self.chaindb.save_current_historical_network_tpc_capability(current_network_tpc_cap)
if update_min_gas_price:
current_centisecond = int(time.time()/100) * 100
timestamp_min_gas_price_updated = self.update_tpc_from_chronological(update_min_gas_price = True)
if timestamp_min_gas_price_updated > current_centisecond:
self.chaindb._recalculate_historical_mimimum_gas_price(current_centisecond)
def update_tpc_from_chronological(self, update_min_gas_price: bool = True):
#start at the newest window, if the same tps stop. but if different tps keep going back
self.logger.debug("Updating tpc from chronological")
current_historical_window = int(time.time()/TIME_BETWEEN_HEAD_HASH_SAVE) * TIME_BETWEEN_HEAD_HASH_SAVE
current_centisecond = int(time.time()/100) * 100
#load this once to find out if its None. If it is None, then the node just started, lets only go back 50 steps
#hist_tpc = self.chaindb.load_historical_tx_per_centisecond()
end_outer = current_historical_window-20*TIME_BETWEEN_HEAD_HASH_SAVE
for historical_window_timestamp in range(current_historical_window,
end_outer,
-TIME_BETWEEN_HEAD_HASH_SAVE):
tpc_sum_dict = {}
chronological_block_window = self.chain_head_db.load_chronological_block_window(historical_window_timestamp)
self.logger.debug('loading chronological block window for timestamp {}'.format(historical_window_timestamp))
#zero the dictionary
if historical_window_timestamp+TIME_BETWEEN_HEAD_HASH_SAVE < current_centisecond:
end = historical_window_timestamp +TIME_BETWEEN_HEAD_HASH_SAVE
else:
end = current_centisecond+100
for timestamp in range(historical_window_timestamp, end, 100):
tpc_sum_dict[timestamp] = 0
if chronological_block_window is not None:
for timestamp_block_hash in chronological_block_window:
#first count up the tx in the block
#if it is 0, then set to 1? in case block is all receive
num_tx_in_block = self.chaindb.get_number_of_total_tx_in_block(timestamp_block_hash[1])
if num_tx_in_block == 0:
num_tx_in_block = 1
#then add them to the dict
centisecond_window_for_block = int(timestamp_block_hash[0]/100) * 100
if centisecond_window_for_block <= end:
tpc_sum_dict[centisecond_window_for_block] += num_tx_in_block
same_as_database = self._update_tpc_from_chronological(tpc_sum_dict)
if same_as_database == True:
break
if update_min_gas_price:
self.chaindb._recalculate_historical_mimimum_gas_price(historical_window_timestamp + TIME_BETWEEN_HEAD_HASH_SAVE)
return historical_window_timestamp+TIME_BETWEEN_HEAD_HASH_SAVE
def _update_tpc_from_chronological(self, new_hist_tpc_dict):
'''
returns True if they are all the same as what we already had in the database, otherwise it returns False
'''
if not isinstance(new_hist_tpc_dict, dict):
raise ValidationError("Expected a dict. Didn't get a dict.")
hist_tpc = self.chaindb.load_historical_tx_per_centisecond()
difference_found = False
if hist_tpc is None:
hist_tpc = list(new_hist_tpc_dict.items())
else:
hist_tpc_dict = dict(hist_tpc)
for timestamp, tpc in new_hist_tpc_dict.items():
if timestamp not in hist_tpc_dict or hist_tpc_dict[timestamp] != tpc:
#if tpc != 0:
difference_found = True
hist_tpc_dict[timestamp] = tpc
hist_tpc = list(hist_tpc_dict.items())
#print(hist_tpc)
#save it to db
self.chaindb.save_historical_tx_per_centisecond(hist_tpc, de_sparse = False)
return not difference_found
def get_local_tpc_cap(self) -> int:
#base it on the time it takes to import a block
from hvm.utils.profile import profile
from hvm.db.backends.memory import MemoryDB
from hvm import MainnetChain
from hvm.chains.mainnet import (
MAINNET_TPC_CAP_TEST_GENESIS_PARAMS,
MAINNET_TPC_CAP_TEST_GENESIS_STATE,
TPC_CAP_TEST_GENESIS_PRIVATE_KEY,
MAINNET_TPC_CAP_TEST_BLOCK_TO_IMPORT,
)
from hvm.constants import random_private_keys
db = MemoryDB()
chain = MainnetChain.from_genesis(db,
TPC_CAP_TEST_GENESIS_PRIVATE_KEY.public_key.to_canonical_address(),
MAINNET_TPC_CAP_TEST_GENESIS_PARAMS,
MAINNET_TPC_CAP_TEST_GENESIS_STATE,
private_key = TPC_CAP_TEST_GENESIS_PRIVATE_KEY)
block_to_import = chain.get_vm(timestamp = MAINNET_TPC_CAP_TEST_BLOCK_TO_IMPORT['header']['timestamp']).get_block_class().from_dict(MAINNET_TPC_CAP_TEST_BLOCK_TO_IMPORT)
chain.genesis_wallet_address = MAINNET_TPC_CAP_TEST_GENESIS_PARAMS['chain_address']
chain.genesis_block_timestamp = MAINNET_TPC_CAP_TEST_GENESIS_PARAMS['timestamp']
#@profile(sortby='cumulative')
def temp():
chain.import_block(block_to_import)
start_time = time.time()
temp()
duration = time.time()-start_time
#self.logger.debug('duration = {} seconds'.format(duration))
tx_per_centisecond = int(100/duration)
return tx_per_centisecond
#
# Consensus DB passthrough's that depend on block timestamp
#
def get_signed_peer_score(self, private_key: PrivateKey,
network_id: int,
peer_wallet_address: Address,
after_block_number: BlockNumber = None,
) -> NodeStakingScore:
# This function should always use the vm for the current timestamp. So we dont need to ask for timestamp
return self.get_consensus_db(timestamp=Timestamp(int(time.time()))).get_signed_peer_score(private_key,
network_id,
peer_wallet_address,
after_block_number)
def get_signed_peer_score_string_private_key(self,
private_key_string: bytes,
peer_wallet_address: Address,
after_block_number: BlockNumber = None,
) -> NodeStakingScore:
network_id = self.network_id
# This always occurs at this time. So we take the current consensus db
return self.get_consensus_db(timestamp=Timestamp(int(time.time()))).get_signed_peer_score_string_private_key(private_key_string,
network_id,
peer_wallet_address,
after_block_number)
def validate_node_staking_score(self,
node_staking_score: NodeStakingScore,
since_block_number: BlockNumber) -> None:
# This depends on when the staking score was created. So get the consensus db given by that timestamp
return self.get_consensus_db(timestamp = node_staking_score.timestamp).validate_node_staking_score(node_staking_score, since_block_number)
def save_health_request(self, peer_wallet_address: Address, response_time_in_micros: int = float('inf')) -> None:
# This always occurs at this time. So we take the current consensus db
return self.get_consensus_db(timestamp=Timestamp(int(time.time()))).save_health_request(peer_wallet_address,
response_time_in_micros)
def get_current_peer_node_health(self,peer_wallet_address: Address) -> PeerNodeHealth:
return self.get_consensus_db(timestamp=Timestamp(int(time.time()))).get_current_peer_node_health(peer_wallet_address)
| 1.257813 | 1 |
dependencies/svgwrite/tests/test_drawing.py | charlesmchen/typefacet | 21 | 3039 | #!/usr/bin/env python
#coding:utf-8
# Author: mozman --<<EMAIL>>
# Purpose: test drawing module
# Created: 11.09.2010
# Copyright (C) 2010, <NAME>
# License: GPLv3
from __future__ import unicode_literals
import os
import unittest
from io import StringIO
from svgwrite.drawing import Drawing
from svgwrite.container import Group
class TestDrawingFullProfile(unittest.TestCase):
def test_empty_drawing(self):
dwg = Drawing()
result = dwg.tostring()
self.assertEqual(result, '<svg baseProfile="full" height="100%" version="1.1" '\
'width="100%" xmlns="http://www.w3.org/2000/svg" '\
'xmlns:ev="http://www.w3.org/2001/xml-events" '\
'xmlns:xlink="http://www.w3.org/1999/xlink"><defs /></svg>')
def test_stylesheet(self):
dwg = Drawing()
dwg.add_stylesheet('test.css', 'Test')
f = StringIO()
dwg.write(f)
result = f.getvalue()
f.close()
self.assertEqual(result, '<?xml version="1.0" encoding="utf-8" ?>\n' \
'<?xml-stylesheet href="test.css" type="text/css" title="Test" alternate="no" media="screen"?>\n'
'<svg baseProfile="full" height="100%" version="1.1" width="100%" '\
'xmlns="http://www.w3.org/2000/svg" xmlns:ev="http://www.w3.org/2001/xml-events" '\
'xmlns:xlink="http://www.w3.org/1999/xlink"><defs /></svg>')
def test_save(self):
fn = 'test_drawing.svg'
if os.path.exists(fn):
os.remove(fn)
dwg = Drawing(fn)
dwg.save()
self.assertTrue(os.path.exists(fn))
os.remove(fn)
def test_save_as(self):
fn = 'test_drawing.svg'
if os.path.exists(fn):
os.remove(fn)
dwg = Drawing()
dwg.saveas(fn)
self.assertTrue(os.path.exists(fn))
os.remove(fn)
def test_non_us_ascii_chars(self):
dwg = Drawing()
dwg.set_desc('öäü')
f = StringIO()
dwg.write(f)
result = f.getvalue()
f.close()
self.assertEqual(result,
'<?xml version="1.0" encoding="utf-8" ?>\n' \
'<svg baseProfile="full" height="100%" version="1.1" width="100%" '\
'xmlns="http://www.w3.org/2000/svg" xmlns:ev="http://www.w3.org/2001/xml-events" '\
'xmlns:xlink="http://www.w3.org/1999/xlink">'
'<title>öäü</title><defs /></svg>')
class TestDrawingTinyProfile(unittest.TestCase):
def test_empty_drawing(self):
dwg = Drawing(profile="tiny")
result = dwg.tostring()
self.assertEqual(result, '<svg baseProfile="tiny" height="100%" version="1.2" '\
'width="100%" xmlns="http://www.w3.org/2000/svg" '\
'xmlns:ev="http://www.w3.org/2001/xml-events" '\
'xmlns:xlink="http://www.w3.org/1999/xlink"><defs /></svg>')
def test_stylesheet(self):
dwg = Drawing(profile="tiny")
dwg.add_stylesheet('test.css', 'Test')
f = StringIO()
dwg.write(f)
result = f.getvalue()
f.close()
self.assertEqual(result, '<?xml version="1.0" encoding="utf-8" ?>\n' \
'<?xml-stylesheet href="test.css" type="text/css" title="Test" alternate="no" media="screen"?>\n'
'<svg baseProfile="tiny" height="100%" version="1.2" width="100%" '\
'xmlns="http://www.w3.org/2000/svg" '\
'xmlns:ev="http://www.w3.org/2001/xml-events" '\
'xmlns:xlink="http://www.w3.org/1999/xlink"><defs /></svg>')
class TestDefs(unittest.TestCase):
def test_simple_defs(self):
dwg = Drawing()
g = dwg.defs.add(Group(id='test'))
inner_g = g.add(Group(id='innerTest'))
result = dwg.tostring()
self.assertEqual(result, '<svg baseProfile="full" height="100%" version="1.1" '\
'width="100%" xmlns="http://www.w3.org/2000/svg" '\
'xmlns:ev="http://www.w3.org/2001/xml-events" '\
'xmlns:xlink="http://www.w3.org/1999/xlink">' \
'<defs><g id="test"><g id="innerTest" /></g></defs></svg>')
if __name__=='__main__':
unittest.main()
| 1.554688 | 2 |
tools/resource_prefetch_predictor/generate_database.py | xzhan96/chromium.src | 1 | 3047 | #!/usr/bin/python
#
# Copyright 2016 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Loads a set of web pages several times on a device, and extracts the
predictor database.
"""
import argparse
import logging
import os
import sys
_SRC_PATH = os.path.abspath(os.path.join(
os.path.dirname(__file__), os.pardir, os.pardir))
sys.path.append(os.path.join(_SRC_PATH, 'third_party', 'catapult', 'devil'))
from devil.android import device_utils
sys.path.append(os.path.join(_SRC_PATH, 'build', 'android'))
import devil_chromium
sys.path.append(os.path.join(_SRC_PATH, 'tools', 'android', 'loading'))
import controller
from options import OPTIONS
import page_track
_PAGE_LOAD_TIMEOUT = 20
def _CreateArgumentParser():
"""Creates and returns the argument parser."""
parser = argparse.ArgumentParser(
description=('Loads a set of web pages several times on a device, and '
'extracts the predictor database.'),
parents=[OPTIONS.GetParentParser()])
parser.add_argument('--device', help='Device ID')
parser.add_argument('--urls_filename', help='File containing a list of URLs '
'(one per line). URLs can be repeated.')
parser.add_argument('--output_filename',
help='File to store the database in.')
parser.add_argument('--url_repeat',
help=('Number of times each URL in the input '
'file is loaded.'),
default=3)
return parser
def _FindDevice(device_id):
"""Returns a device matching |device_id| or the first one if None, or None."""
devices = device_utils.DeviceUtils.HealthyDevices()
if device_id is None:
return devices[0]
matching_devices = [d for d in devices if str(d) == device_id]
if not matching_devices:
return None
return matching_devices[0]
def _Setup(device):
"""Sets up a device and returns an instance of RemoteChromeController."""
chrome_controller = controller.RemoteChromeController(device)
device.ForceStop(OPTIONS.ChromePackage().package)
chrome_controller.AddChromeArguments(
['--speculative-resource-prefetching=learning'])
chrome_controller.ResetBrowserState()
return chrome_controller
def _Go(chrome_controller, urls_filename, output_filename, repeats):
urls = []
with open(urls_filename) as f:
urls = [line.strip() for line in f.readlines()]
with chrome_controller.Open() as connection:
for repeat in range(repeats):
logging.info('Repeat #%d', repeat)
for url in urls:
logging.info('\tLoading %s', url)
page_track.PageTrack(connection) # Registers the listeners.
connection.MonitorUrl(url, timeout_seconds=_PAGE_LOAD_TIMEOUT,
stop_delay_multiplier=1.5)
device = chrome_controller.GetDevice()
device.ForceStop(OPTIONS.ChromePackage().package)
database_filename = (
'/data/user/0/%s/app_chrome/Default/Network Action Predictor' %
OPTIONS.ChromePackage().package)
device.PullFile(database_filename, output_filename)
def main():
logging.basicConfig(level=logging.INFO)
parser = _CreateArgumentParser()
args = parser.parse_args()
OPTIONS.SetParsedArgs(args)
devil_chromium.Initialize()
device = _FindDevice(args.device)
if device is None:
logging.error('Could not find device: %s.', args.device)
sys.exit(1)
chrome_controller = _Setup(device)
_Go(chrome_controller, args.urls_filename, args.output_filename,
int(args.url_repeat))
if __name__ == '__main__':
main()
| 1.8125 | 2 |
cgbind/esp.py | duartegroup/cgbind | 7 | 3055 | import numpy as np
from time import time
from cgbind.atoms import get_atomic_number
from cgbind.log import logger
from cgbind.constants import Constants
from cgbind.exceptions import CgbindCritical
def get_esp_cube_lines(charges, atoms):
"""
From a list of charges and a set of xyzs create the electrostatic potential
map grid-ed uniformly between the most negative x, y, z values -5 Å
and the largest x, y, z +5 Å
:param charges: (list(float))
:param atoms: (list(autode.atoms.Atom))
:return: (list(str)), (min ESP value, max ESP value)
"""
logger.info('Calculating the ESP and generating a .cube file')
start_time = time()
try:
from esp_gen import get_cube_lines
except ModuleNotFoundError:
raise CgbindCritical('esp_gen not available. cgbind must be '
'installed with the --esp_gen flag')
if charges is None:
logger.error('Could not generate an .cube file, charges were None')
return [], (None, None)
coords = np.array([atom.coord for atom in atoms])
charges = np.array(charges)
# Get the max and min points from the coordinates
max_cart_values = np.max(coords, axis=0)
min_cat_values = np.min(coords, axis=0)
# The grid needs to be slightly larger than the smallest/largest Cartesian
# coordinate
# NOTE: All distances from here are in Bohr (a0) i.e. atomic units
min_carts = Constants.ang2a0 * (min_cat_values - 5 * np.ones(3))
max_carts = Constants.ang2a0 * (max_cart_values + 5 * np.ones(3))
coords = np.array([Constants.ang2a0 * np.array(coord) for coord in coords])
# Number of voxels will be nx * ny * nz
nx, ny, nz = 50, 50, 50
vox_size = max_carts - min_carts
rx, ry, rz = vox_size[0] / nx, vox_size[1] / ny, vox_size[2] / nz
# Write the .cube file lines
cube_file_lines = ['Generated by cgbind\n', 'ESP\n']
n_atoms = len(coords)
min_x, min_y, min_z = min_carts
cube_file_lines.append(f'{n_atoms:>5d}{min_x:>12f}{min_y:>12f}{min_z:>12f}\n') # n_atoms origin(x y z)
cube_file_lines.append(f'{nx:>5d}{rx:>12f}{0.0:>12f}{0.0:>12f}\n') # Number of voxels and their size
cube_file_lines.append(f'{ny:>5d}{0.0:>12f}{ry:>12f}{0.0:>12f}\n')
cube_file_lines.append(f'{nz:>5d}{0.0:>12f}{0.0:>12f}{rz:>12f}\n')
for atom in atoms:
x, y, z = atom.coord
cube_file_lines.append(f'{get_atomic_number(atom):>5d}{0.0:>12f}'
f'{Constants.ang2a0*x:>12f}{Constants.ang2a0*y:>12f}{Constants.ang2a0*z:>12f}\n')
# Looping over x, y, z is slow in python so use Cython extension
cube_val_lines, min_val, max_val = get_cube_lines(nx, ny, nz, coords, min_carts, charges, vox_size)
cube_file_lines += cube_val_lines
logger.info(f'ESP generated in {time()-start_time:.3f} s')
return cube_file_lines, (min_val, max_val)
| 2.390625 | 2 |
package/tests/test_init_command.py | MrKriss/stonemason | 2 | 3063 |
from pathlib import Path
import pytest
import git
import json
from conftest import TEST_DIR
def test_init_with_project(tmpdir):
output_path = Path(tmpdir.strpath)
# Set arguments
args = f"init -o {output_path} {TEST_DIR}/example_templates/python_project"
from masonry import main
# Run from entry point
main.main(args=args)
# Check files were created
package_name = 'testpackage'
files = [
'.git/',
'.mason',
'MANIFEST.in',
'README',
'requirements.txt',
'setup.py',
'src/testpackage',
'src/testpackage/__init__.py',
'src/testpackage/main.py'
]
for f in files:
p = output_path / package_name / f
assert p.exists()
# Check requirements were polulated
target = "requests\nlogzero\n"
req_file = output_path / package_name / 'requirements.txt'
result = req_file.read_text()
assert result == target
# Check git repo was created and commits made
repo_dir = output_path / package_name
r = git.Repo(repo_dir.as_posix())
log = r.git.log(pretty='oneline').split('\n')
assert len(log) == 1
assert "Add 'package' template layer via stone mason." in log[0]
def test_init_with_project_and_template(tmpdir, no_prompts):
output_path = Path(tmpdir.strpath)
# Set arguments
args = f"init -o {output_path} {TEST_DIR}/example_templates/python_project/pytest"
from masonry import main
# Run from entry point
main.main(args=args)
# Check files were created
package_name = 'testpackage'
files = [
'.git/',
'.mason',
'MANIFEST.in',
'README',
'requirements.txt',
'setup.py',
'src/testpackage',
'src/testpackage/__init__.py',
'src/testpackage/main.py',
'tests/test_foo.py'
]
for f in files:
p = output_path / package_name / f
assert p.exists()
# Check requirements were polulated
target = "requests\nlogzero\npytest\npytest-cov\ncoverage\n"
req_file = output_path / package_name / 'requirements.txt'
result = req_file.read_text()
assert result == target
# Check MANIFEST was prefixed
target = "graft tests\ngraft src\n"
manifest_file = output_path / package_name / 'MANIFEST.in'
result = manifest_file.read_text()
assert result == target
# Check git repo was created and commits made
repo_dir = output_path / package_name
r = git.Repo(repo_dir.as_posix())
log = r.git.log(pretty='oneline').split('\n')
assert len(log) == 2
assert "Add 'pytest' template layer via stone mason." in log[0]
assert "Add 'package' template layer via stone mason." in log[1]
| 1.601563 | 2 |
modules/optimizations/dead_codes.py | OMGhozlan/deobshell | 0 | 3071 | # coding=utf-8
from ..logger import log_debug
from ..utils import parent_map, replace_node, is_prefixed_var, get_used_vars
def opt_unused_variable(ast):
parents = parent_map(ast)
used_vars = get_used_vars(ast)
for node in ast.iter():
if node.tag in ["AssignmentStatementAst"]:
subnodes = list(node)
if subnodes[0].tag == "VariableExpressionAst":
if subnodes[0].attrib["VariablePath"].lower() not in used_vars:
if not is_prefixed_var(subnodes[0].attrib["VariablePath"]):
log_debug("Remove assignement of unused variable %s" % (subnodes[0].attrib["VariablePath"]))
parents[node].remove(node)
return True
return False
def opt_remove_uninitialised_variable_usage(ast):
assigned = set()
for node in ast.iter():
if node.tag in ["AssignmentStatementAst"]:
subnodes = list(node)
if subnodes[0].tag == "VariableExpressionAst":
assigned.add(subnodes[0].attrib["VariablePath"].lower())
if node.tag in ["BinaryExpressionAst"]:
subnodes = list(node)
if subnodes[0].tag == "VariableExpressionAst":
variable = subnodes[0]
other = subnodes[1]
elif subnodes[1].tag == "VariableExpressionAst":
variable = subnodes[1]
other = subnodes[0]
else:
variable, other = None, None
if variable is not None and other is not None:
if variable.attrib["VariablePath"].lower() not in assigned:
if not is_prefixed_var(variable.attrib["VariablePath"]):
log_debug("Remove unassigned variable use '%s'" % (variable.attrib["VariablePath"]))
replace_node(ast, node, other)
return True
return False
| 1.992188 | 2 |
autovirt/equipment/domain/equipment.py | xlam/autovirt | 0 | 3079 | from enum import Enum
from functools import reduce
from math import ceil
from typing import Optional, Tuple
from autovirt import utils
from autovirt.exception import AutovirtError
from autovirt.structs import UnitEquipment, RepairOffer
logger = utils.get_logger()
# maximum allowed equipment price
PRICE_MAX = 100000
# value to add and sub from offer quality when filtering
QUALITY_DELTA = 3
class QualityType(Enum):
INSTALLED = "quality"
REQUIRED = "quality_required"
def quantity_to_repair(units: list[UnitEquipment]) -> int:
"""Calculate total quantity of equipment to repair on given units"""
return sum([unit.wear_quantity for unit in units])
def quantity_total(units: list[UnitEquipment]) -> int:
"""Calculate total equipment count on given units"""
return sum([unit.quantity for unit in units])
def filter_offers(
offers: list[RepairOffer], quality: float, quantity: int
) -> list[RepairOffer]:
# select units in range [quality-DELTA ... quality+DELTA] and having enough repair parts
filtered = list(filter(lambda x: x.quality > quality - QUALITY_DELTA, offers))
filtered = list(filter(lambda x: x.quality < quality + QUALITY_DELTA, filtered))
filtered = list(filter(lambda x: x.quantity > quantity, filtered))
filtered = list(filter(lambda x: x.price < PRICE_MAX, filtered))
return filtered
def expected_quality(
qual_rep: float, qual_inst: float, items_total: int, items_wear: int
) -> float:
return (
qual_inst * (items_total - items_wear) + qual_rep * items_wear
) / items_total
def select_offer(
offers: list[RepairOffer], units: list[UnitEquipment], quality: float = None
) -> RepairOffer:
if not quality:
quality = units[0].quality_required
qnt_rep = quantity_to_repair(units)
qnt_total = quantity_total(units)
qual_min = utils.get_min(units, QualityType.INSTALLED.value)
qual_exp = [
expected_quality(o.quality, qual_min, qnt_total, qnt_rep) for o in offers
]
qual_diff = [abs(qual - quality) for qual in qual_exp]
diff_norm = utils.normalize_array(qual_diff)
price_norm = utils.normalize_array([o.price for o in offers])
qp_dist = [p + q for (p, q) in zip(price_norm, diff_norm)]
summary: list = [
[o, price_norm[i], qual_exp[i], qual_diff[i], diff_norm[i], qp_dist[i]]
for i, o in enumerate(offers)
if qual_exp[i] >= quality
]
logger.info(f"listing filtered offers for quality of {quality}:")
for o in summary:
logger.info(
f"id: {o[0].id}, quality: {o[0].quality}, price: {o[0].price},"
f" quantity: {o[0].quantity}, qual_exp: {o[2]:.2f}, qp: {o[5]:.3f}"
)
minimum_qp_item = reduce(lambda x, y: x if x[5] < y[5] else y, summary)
return minimum_qp_item[0]
def select_offer_to_raise_quality(
unit: UnitEquipment, offers: list[RepairOffer], margin: float = 0
) -> Optional[Tuple[RepairOffer, int]]:
required = unit.quality_required + margin
quality_coeff = unit.quantity * (required - unit.quality)
offers = list(filter(lambda o: o.quality >= required, offers))
if not offers:
return None
offer = offers[0]
count_to_replace = ceil(quality_coeff / (offer.quality - unit.quality))
price = count_to_replace * offer.price
for offer_ in offers[1:]:
count = ceil(quality_coeff / (offer_.quality - unit.quality))
price_ = count * offer_.price
if price_ < price:
offer = offer_
count_to_replace = count
return offer, count_to_replace
def split_by_quality(
units: list[UnitEquipment], quality_type: QualityType = QualityType.REQUIRED
) -> dict[float, list[UnitEquipment]]:
"""Split units by quality (required or installed)"""
res: dict[float, list[UnitEquipment]] = {}
for unit in units:
quality = getattr(unit, quality_type.value)
if quality not in res.keys():
res[quality] = []
res[quality].append(unit)
return res
def split_mismatch_quality_units(
units: list[UnitEquipment],
) -> tuple[list[UnitEquipment], list[UnitEquipment]]:
"""Split units into 'normal' and 'mismatch' groups.
Mismatched unit have installed equipment of lower quality then required.
We need to treat them in different manner then normal while repairing.
"""
normal = []
mismatch = []
for unit in units:
if unit.quality < unit.quality_required:
mismatch.append(unit)
else:
normal.append(unit)
return normal, mismatch
| 2.9375 | 3 |
imread/tests/test_bmp.py | luispedro/imread | 51 | 3095 | import numpy as np
from imread import imread
from . import file_path
def test_read():
im = imread(file_path('star1.bmp'))
assert np.any(im)
assert im.shape == (128, 128, 3)
def test_indexed():
im = imread(file_path('py-installer-indexed.bmp'))
assert np.any(im)
assert im.shape == (352, 162, 3)
assert np.any(im[:,:,0])
assert np.any(im[:,:,1])
assert np.any(im[:,:,2])
| 1.203125 | 1 |
gbfs/serializers.py | stadtulm/cykel | 80 | 3103 | from datetime import timedelta
from django.utils.timezone import now
from preferences import preferences
from rest_framework import fields, serializers
from bikesharing.models import Bike, Station, VehicleType
from cykel.serializers import EnumFieldSerializer
class TimestampSerializer(fields.CharField):
def to_representation(self, value):
return value.timestamp()
class GbfsFreeBikeStatusSerializer(serializers.HyperlinkedModelSerializer):
bike_id = serializers.CharField(source="non_static_bike_uuid", read_only=True)
vehicle_type_id = serializers.CharField(read_only=True)
last_reported = TimestampSerializer(read_only=True)
class Meta:
model = Bike
fields = (
"bike_id",
"vehicle_type_id",
"current_range_meters",
"last_reported",
)
def to_representation(self, instance):
representation = super().to_representation(instance)
# defined by GBFS 2.1: Only if the vehicle has a motor the field is required
if (
instance.vehicle_type is not None
and instance.vehicle_type.propulsion_type
== VehicleType.PropulsionType.HUMAN
):
representation.pop("current_range_meters")
# Default to False TODO: maybe configuration later
representation["is_reserved"] = False
# Default to False TODO: maybe configuration later
representation["is_disabled"] = False
public_geolocation = instance.public_geolocation()
if public_geolocation is not None:
pos = public_geolocation.geo
if pos and pos.x and pos.y:
representation["lat"] = pos.y
representation["lon"] = pos.x
return representation # only return bikes with public geolocation
class GbfsVehicleOnStationSerializer(GbfsFreeBikeStatusSerializer):
def to_representation(self, instance):
representation = super().to_representation(instance)
if representation is None:
return None
representation.pop("lat")
representation.pop("lon")
return representation
class GbfsStationInformationSerializer(serializers.HyperlinkedModelSerializer):
name = serializers.CharField(source="station_name", read_only=True)
capacity = serializers.IntegerField(source="max_bikes", read_only=True)
station_id = serializers.CharField(source="id", read_only=True)
class Meta:
model = Station
fields = (
"name",
"capacity",
"station_id",
)
def to_representation(self, instance):
representation = super().to_representation(instance)
if (
instance.location is not None
and instance.location.x
and instance.location.y
):
representation["lat"] = instance.location.y
representation["lon"] = instance.location.x
return representation
class GbfsStationStatusSerializer(serializers.HyperlinkedModelSerializer):
station_id = serializers.CharField(source="id", read_only=True)
vehicles = serializers.SerializerMethodField()
def get_vehicles(self, obj):
# if configured filter vehicles, where time report
# is older than configure allowed silent timeperiod
bsp = preferences.BikeSharePreferences
if bsp.gbfs_hide_bikes_after_location_report_silence:
available_bikes = obj.bike_set.filter(
availability_status=Bike.Availability.AVAILABLE,
last_reported__gte=now()
- timedelta(hours=bsp.gbfs_hide_bikes_after_location_report_hours),
)
else:
available_bikes = obj.bike_set.filter(
availability_status=Bike.Availability.AVAILABLE
)
vehicles = GbfsVehicleOnStationSerializer(available_bikes, many=True).data
return list(filter(lambda val: val is not None, vehicles))
class Meta:
model = Station
fields = (
"station_id",
"vehicles",
)
def to_representation(self, instance):
representation = super().to_representation(instance)
representation["num_bikes_available"] = len(representation["vehicles"])
representation["num_docks_available"] = (
instance.max_bikes - representation["num_bikes_available"]
)
if representation["num_bikes_available"] > 0:
representation["last_reported"] = max(
(
vehicle["last_reported"]
if vehicle["last_reported"] is not None
else 0
)
for vehicle in representation["vehicles"]
)
else:
# if no bike is at the station, last_report is the current time
# not sure if this is the intended behavior of the field
# or it should be the timestamp of the last bike removed
# but it is not so easy to implement
representation["last_reported"] = int(now().timestamp())
def drop_last_reported(obj):
obj.pop("last_reported")
return obj
representation["vehicles"] = list(
map(drop_last_reported, representation["vehicles"])
)
status = (instance.status == Station.Status.ACTIVE) or False
representation["is_installed"] = status
representation["is_renting"] = status
representation["is_returning"] = status
return representation
class GbfsVehicleTypeSerializer(serializers.HyperlinkedModelSerializer):
vehicle_type_id = serializers.CharField(source="id", read_only=True)
form_factor = EnumFieldSerializer(
read_only=True,
mapping={
VehicleType.FormFactor.BIKE: "bicycle",
VehicleType.FormFactor.ESCOOTER: "scooter",
VehicleType.FormFactor.CAR: "car",
VehicleType.FormFactor.MOPED: "moped",
VehicleType.FormFactor.OTHER: "other",
},
)
propulsion_type = EnumFieldSerializer(
read_only=True,
mapping={
VehicleType.PropulsionType.HUMAN: "human",
VehicleType.PropulsionType.ELECTRIC_ASSIST: "electric_assist",
VehicleType.PropulsionType.ELECTRIC: "electric",
VehicleType.PropulsionType.COMBUSTION: "combustion",
},
)
def to_representation(self, instance):
data = super(GbfsVehicleTypeSerializer, self).to_representation(instance)
# defined by GBFS 2.1: Only if the vehicle has a motor the field is required
if instance.propulsion_type == VehicleType.PropulsionType.HUMAN:
data.pop("max_range_meters")
return data
class Meta:
model = VehicleType
fields = (
"vehicle_type_id",
"form_factor",
"propulsion_type",
"max_range_meters",
"name",
)
| 1.484375 | 1 |
tamilmorse/morse_encode.py | CRE2525/open-tamil | 1 | 3111 | ## -*- coding: utf-8 -*-
#(C) 2018 <NAME>
# This file is part of Open-Tamil project
# You may use or distribute this file under terms of MIT license
import codecs
import json
import tamil
import sys
import os
#e.g. python morse_encode.py கலைஞர்
CURRDIR = os.path.dirname(os.path.realpath(__file__))
def encode(text):
with codecs.open(os.path.join(CURRDIR,"data","madurai_tamilmorse.json"),"r","utf-8") as fp:
codebook = json.loads(fp.read())
output = [codebook.get(l,l) for l in tamil.utf8.get_letters(text)]
return u" ".join(output)
if __name__ == u"__main__":
encode(u" ".join([i.decode("utf-8") for i in sys.argv[1:]]))
| 2.3125 | 2 |
misc/_local_settings.py | lzantal/djskell | 4 | 3119 | """
Django settings.
Generated by 'django-admin startproject' using Django 2.2.4.
For more information on this file, see
https://docs.djangoproject.com/en/2.2/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/2.2/ref/settings/
"""
#DEBUG = False
DEBUG = True
SERVE_STATIC = DEBUG
ALLOWED_HOSTS = []
# Database
# https://docs.djangoproject.com/en/1.11/ref/settings/#databases
DATABASES = {
'default': {
#'ENGINE': 'django.db.backends.oracle'
#'ENGINE': 'django.db.backends.mysql',
#'ENGINE': 'django.db.backends.sqlite3',
'ENGINE': 'django.db.backends.postgresql',
'NAME': 'mydatabase',
'USER': 'mydatabaseuser',
'PASSWORD': '<PASSWORD>',
'HOST': '127.0.0.1',
'PORT': '5432',
}
}
| 0.816406 | 1 |
model/losses.py | askerlee/rift | 11 | 3143 | import torch
import numpy as np
import torch.nn as nn
import torch.nn.functional as F
import torchvision.models as models
from model.laplacian import LapLoss
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
class EPE(nn.Module):
def __init__(self):
super(EPE, self).__init__()
def forward(self, flow, gt, loss_mask):
loss_map = (flow - gt.detach()) ** 2
loss_map = (loss_map.sum(1, True) + 1e-6) ** 0.5
return (loss_map * loss_mask)
class Ternary(nn.Module):
def __init__(self):
super(Ternary, self).__init__()
patch_size = 7
out_channels = patch_size * patch_size
self.w = np.eye(out_channels).reshape(
(patch_size, patch_size, 1, out_channels))
self.w = np.transpose(self.w, (3, 2, 0, 1))
self.w = torch.tensor(self.w).float().to(device)
def transform(self, img):
patches = F.conv2d(img, self.w, padding=3, bias=None)
transf = patches - img
transf_norm = transf / torch.sqrt(0.81 + transf**2)
return transf_norm
def rgb2gray(self, rgb):
r, g, b = rgb[:, 0:1, :, :], rgb[:, 1:2, :, :], rgb[:, 2:3, :, :]
gray = 0.2989 * r + 0.5870 * g + 0.1140 * b
return gray
def hamming(self, t1, t2):
dist = (t1 - t2) ** 2
dist_norm = torch.mean(dist / (0.1 + dist), 1, True)
return dist_norm
def valid_mask(self, t, padding):
n, _, h, w = t.size()
inner = torch.ones(n, 1, h - 2 * padding, w - 2 * padding).type_as(t)
mask = F.pad(inner, [padding] * 4)
return mask
def forward(self, img0, img1):
img0 = self.transform(self.rgb2gray(img0))
img1 = self.transform(self.rgb2gray(img1))
return self.hamming(img0, img1) * self.valid_mask(img0, 1)
class SOBEL(nn.Module):
def __init__(self):
super(SOBEL, self).__init__()
self.kernelX = torch.tensor([
[1, 0, -1],
[2, 0, -2],
[1, 0, -1],
]).float()
self.kernelY = self.kernelX.clone().T
self.kernelX = self.kernelX.unsqueeze(0).unsqueeze(0).to(device)
self.kernelY = self.kernelY.unsqueeze(0).unsqueeze(0).to(device)
def forward(self, pred, gt):
N, C, H, W = pred.shape[0], pred.shape[1], pred.shape[2], pred.shape[3]
img_stack = torch.cat(
[pred.reshape(N*C, 1, H, W), gt.reshape(N*C, 1, H, W)], 0)
sobel_stack_x = F.conv2d(img_stack, self.kernelX, padding=1)
sobel_stack_y = F.conv2d(img_stack, self.kernelY, padding=1)
pred_X, gt_X = sobel_stack_x[:N*C], sobel_stack_x[N*C:]
pred_Y, gt_Y = sobel_stack_y[:N*C], sobel_stack_y[N*C:]
L1X, L1Y = torch.abs(pred_X-gt_X), torch.abs(pred_Y-gt_Y)
loss = (L1X+L1Y)
return loss
class MeanShift(nn.Conv2d):
def __init__(self, data_mean, data_std, data_range=1, norm=True):
c = len(data_mean)
super(MeanShift, self).__init__(c, c, kernel_size=1)
std = torch.Tensor(data_std)
self.weight.data = torch.eye(c).view(c, c, 1, 1)
if norm:
self.weight.data.div_(std.view(c, 1, 1, 1))
self.bias.data = -1 * data_range * torch.Tensor(data_mean)
self.bias.data.div_(std)
else:
self.weight.data.mul_(std.view(c, 1, 1, 1))
self.bias.data = data_range * torch.Tensor(data_mean)
self.requires_grad = False
class VGGPerceptualLoss(torch.nn.Module):
def __init__(self, rank=0):
super(VGGPerceptualLoss, self).__init__()
blocks = []
pretrained = True
self.vgg_pretrained_features = models.vgg19(pretrained=pretrained).features
self.normalize = MeanShift([0.485, 0.456, 0.406], [0.229, 0.224, 0.225], norm=True).cuda()
for param in self.parameters():
param.requires_grad = False
def forward(self, X, Y, indices=None):
X = self.normalize(X)
Y = self.normalize(Y)
indices = [2, 7, 12, 21, 30]
weights = [1.0/2.6, 1.0/4.8, 1.0/3.7, 1.0/5.6, 10/1.5]
k = 0
loss = 0
for i in range(indices[-1]):
X = self.vgg_pretrained_features[i](X)
Y = self.vgg_pretrained_features[i](Y)
if (i+1) in indices:
loss += weights[k] * (X - Y.detach()).abs().mean() * 0.1
k += 1
return loss
# flow could have any channels.
# https://github.com/coolbeam/OIFlow/blob/main/utils/tools.py
def flow_smooth_delta(flow, if_second_order=False):
def gradient(x):
D_dx = x[:, :, :, 1:] - x[:, :, :, :-1]
D_dy = x[:, :, 1:] - x[:, :, :-1]
return D_dx, D_dy
dx, dy = gradient(flow)
# dx2, dxdy = gradient(dx)
# dydx, dy2 = gradient(dy)
if if_second_order:
dx2, dxdy = gradient(dx)
dydx, dy2 = gradient(dy)
smooth_loss = dx.abs().mean() + dy.abs().mean() + dx2.abs().mean() + dxdy.abs().mean() + dydx.abs().mean() + dy2.abs().mean()
else:
smooth_loss = dx.abs().mean() + dy.abs().mean()
# smooth_loss = dx.abs().mean() + dy.abs().mean() # + dx2.abs().mean() + dxdy.abs().mean() + dydx.abs().mean() + dy2.abs().mean()
# 暂时不上二阶的平滑损失,似乎加上以后就太猛了,无法降低photo loss TODO
return smooth_loss
# flow should have 4 channels.
# https://github.com/coolbeam/OIFlow/blob/main/utils/tools.py
# weight_type='exp' seems to perform better than 'gauss'.
def edge_aware_smoothness_order1(img0, img1, flow, constant=1.0, weight_type='exp', error_type='L1'):
def weight_fn(x):
if weight_type == 'gauss':
y = x ** 2
elif weight_type == 'exp':
y = torch.abs(x)
else:
raise ValueError('')
return y
def gradient_xy(img):
gx = img[:, :, :, :-1] - img[:, :, :, 1:]
gy = img[:, :, :-1, :] - img[:, :, 1:, :]
return gx, gy
def gradweight_xy(img0, img1):
img0_gx, img0_gy = gradient_xy(img0)
img1_gx, img1_gy = gradient_xy(img1)
img0_wx = torch.exp(-torch.mean(weight_fn(constant * img0_gx), 1, keepdim=True))
img0_wy = torch.exp(-torch.mean(weight_fn(constant * img0_gy), 1, keepdim=True))
img1_wx = torch.exp(-torch.mean(weight_fn(constant * img1_gx), 1, keepdim=True))
img1_wy = torch.exp(-torch.mean(weight_fn(constant * img1_gy), 1, keepdim=True))
# First two flow channels: 1->0 flow. So use img1 weights.
# Second two flow channels: 0->1 flow. So use img0 weights.
# weights_x and weights_y are for x and y's spatial gradients, respectively.
weights_x = torch.cat([img1_wx, img1_wx, img0_wx, img0_wx], dim=1)
weights_y = torch.cat([img1_wy, img0_wy, img0_wy, img1_wy], dim=1)
return weights_x, weights_y
def error_fn(x):
if error_type == 'L1':
y = torch.abs(x)
elif error_type == 'abs_robust':
y = (torch.abs(x) + 0.01).pow(0.4)
else:
raise ValueError('')
return y
# The flow gradients along x, y axes, respectively.
# flow_gx, flow_gy have the same number of channels as flow.
# No matter the flow is x- or y-flow, it should be smooth along both x and y axes.
# I.e., a y-flow should also be smooth along x-axis, and x-flow should also be smooth along y-axis.
flow_gx, flow_gy = gradient_xy(flow)
# weights_x, weights_y both have 4 channels, same as flow_gx and flow_gy (if the input flow has 4 channels).
weights_x, weights_y = gradweight_xy(img0, img1)
smoothness_x = error_fn(flow_gx) * weights_x
smoothness_y = error_fn(flow_gy) * weights_y
return torch.mean(smoothness_x) + torch.mean(smoothness_y)
# Dual teaching helps slightly.
def dual_teaching_loss(mid_gt, img_stu, flow_stu, img_tea, flow_tea):
loss_distill = 0
# Ws[0]: weight of teacher -> student.
# Ws[1]: weight of student -> teacher.
# Two directions could take different weights.
# Set Ws[1] to 0 to disable student -> teacher.
Ws = [1, 0.5]
use_lap_loss = False
# Laplacian loss performs better in earlier epochs, but worse in later epochs.
# Moreover, Laplacian loss is significantly slower.
if use_lap_loss:
loss_fun = LapLoss(max_levels=3, reduction='none')
else:
loss_fun = nn.L1Loss(reduction='none')
for i in range(2):
student_error = loss_fun(img_stu, mid_gt).mean(1, True)
teacher_error = loss_fun(img_tea, mid_gt).mean(1, True)
# distill_mask indicates where the warped images according to student's prediction
# is worse than that of the teacher.
# If at some points, the warped image of the teacher is better than the student,
# then regard the flow at these points are more accurate, and use them to teach the student.
distill_mask = (student_error > teacher_error + 0.01).float().detach()
# loss_distill is the sum of the distillation losses at 2 directions.
loss_distill += Ws[i] * ((flow_tea.detach() - flow_stu).abs() * distill_mask).mean()
# Swap student and teacher, and calculate the distillation loss again.
img_stu, flow_stu, img_tea, flow_tea = \
img_tea, flow_tea, img_stu, flow_stu
# The distillation loss from the student to the teacher is given a smaller weight.
# loss_distill = loss_distill / 2
return loss_distill
if __name__ == '__main__':
img0 = torch.zeros(3, 3, 256, 256).float().to(device)
img1 = torch.tensor(np.random.normal(
0, 1, (3, 3, 256, 256))).float().to(device)
ternary_loss = Ternary()
print(ternary_loss(img0, img1).shape)
| 2.125 | 2 |