max_stars_repo_path
string | max_stars_repo_name
string | max_stars_count
int64 | id
string | content
string | score
float64 | int_score
int64 |
---|---|---|---|---|---|---|
src/healthvaultlib/tests/testbase.py | rajeevs1992/pyhealthvault | 1 | 3151 | import unittest
import settings
from healthvaultlib.helpers.connection import Connection
class TestBase(unittest.TestCase):
def setUp(self):
self.connection = self.get_connection()
def get_connection(self):
conn = Connection(settings.HV_APPID, settings.HV_SERVICE_SERVER)
conn.thumbprint = settings.APP_THUMBPRINT
conn.publickey = settings.APP_PUBLIC_KEY
conn.privatekey = settings.APP_PRIVATE_KEY
conn.connect()
conn.set_person_and_record(settings.OFFLINE_PERSON_ID, settings.OFFLINE_RECORD_ID)
return conn
| 1.445313 | 1 |
webhooks/sentry/alerta_sentry.py | dunzoit/alerta-contrib | 0 | 3175 |
from alerta.models.alert import Alert
from alerta.webhooks import WebhookBase
class SentryWebhook(WebhookBase):
def incoming(self, query_string, payload):
# For Sentry v9
# Defaults to value before Sentry v9
if 'request' in payload.get('event'):
key = 'request'
else:
key = 'sentry.interfaces.Http'
if payload.get('event')[key]['env'].get('ENV', 'prod') == 'prod':
environment = 'Production'
else:
environment = 'Development'
if payload['level'] == 'error':
severity = 'critical'
else:
severity = 'ok'
return Alert(
resource=payload['culprit'],
event=payload['event']['event_id'],
environment=environment,
severity=severity,
service=[payload['project']],
group='Application',
value=payload['level'],
text='{}\n{}\n{}'.format(payload['message'], payload['event'].get('title', ''), payload['url']),
tags=['{}={}'.format(k, v) for k, v in payload['event']['tags']],
attributes={'modules': ['{}=={}'.format(k, v) for k, v in payload['event']['modules'].items()]},
origin='sentry.io',
raw_data=str(payload)
)
| 1.3125 | 1 |
visual_perception/Detection/yolov4/__init__.py | SSusantAchary/Visual-Perception | 0 | 3183 | """
MIT License
Copyright (c) 2020 <NAME> <<EMAIL>>
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
from visual_perception.Detection.yolov4.tf import YOLOv4 as yolo_main
import numpy as np
import cv2
labels = {0: 'person', 1: 'bicycle', 2: 'car', 3: 'motorcycle', 4: 'airplane', 5: 'bus', 6: 'train', 7: 'truck', 8: 'boat',
9: 'traffic light', 10: 'fire hydrant', 11: 'stop sign', 12: 'parking meter', 13: 'bench', 14: 'bird', 15: 'cat', 16: 'dog',
17: 'horse', 18: 'sheep', 19: 'cow', 20: 'elephant', 21: 'bear', 22: 'zebra', 23: 'giraffe', 24: 'backpack', 25: 'umbrella',
26: 'handbag', 27: 'tie', 28: 'suitcase', 29: 'frisbee', 30: 'skis', 31: 'snowboard', 32: 'sports ball', 33: 'kite',
34: 'baseball bat', 35: 'baseball glove', 36: 'skateboard', 37: 'surfboard', 38: 'tennis racket', 39: 'bottle', 40: 'wine glass',
41: 'cup', 42: 'fork', 43: 'knife', 44: 'spoon', 45: 'bowl', 46: 'banana', 47: 'apple', 48: 'sandwich', 49: 'orange',
50: 'broccoli', 51: 'carrot', 52: 'hot dog', 53: 'pizza', 54: 'donut', 55: 'cake', 56: 'chair', 57: 'couch', 58: 'potted plant',
59: 'bed', 60: 'dining table', 61: 'toilet', 62: 'tv', 63: 'laptop', 64: 'mouse', 65: 'remote', 66: 'keyboard', 67: 'cell phone',
68: 'microwave', 69: 'oven', 70: 'toaster', 71: 'sink', 72: 'refrigerator', 73: 'book', 74: 'clock', 75: 'vase', 76: 'scissors',
77: 'teddy bear', 78: 'hair drier', 79: 'toothbrush'}
class YOLOv4:
def __init__(self):
self.weights_path = ""
self.model = None
self.yolo_classes = ""
self.iou = 0
self.score = 0
self.input_shape = 0
self.output_path = ""
def load_model(self, weights_path:str = None, classes_path:str = None, input_shape:int = 608):
if (weights_path is None) or (classes_path is None):
raise RuntimeError ('weights_path AND classes_path should not be None.')
self.yolo_classes = classes_path
self.weights_path = weights_path
self.input_shape = input_shape
self.model = yolo_main(shape = self.input_shape)
self.model.classes = self.yolo_classes
self.model.make_model()
self.model.load_weights(self.weights_path, weights_type = 'yolo')
def predict(self, img:np.ndarray, output_path:str, iou = 0.45, score = 0.25, custom_objects:dict = None,
debug=True):
self.output_path = output_path
self.iou = iou
self.score = score
#img = np.array(Image.open(img))[..., ::-1]
pred_bboxes = self.model.predict(img, iou_threshold = self.iou, score_threshold = self.score)
boxes = []
if (custom_objects != None):
for i in range(len(pred_bboxes)):
check_name = labels[pred_bboxes[i][4]]
check = custom_objects.get(check_name, 'invalid')
if check == 'invalid':
continue
elif check == 'valid':
boxes.append(list(pred_bboxes[i]))
boxes = np.array(boxes)
res = self.model.draw_bboxes(img, boxes)
if debug:
cv2.imwrite(self.output_path, res)
else:
res = self.model.draw_bboxes(img, pred_bboxes)
if debug:
cv2.imwrite(self.output_path, res)
return res
class TinyYOLOv4:
def __init__(self):
self.weights_path = ""
self.model = None
self.yolo_classes = ""
self.iou = 0
self.score = 0
self.input_shape = 0
self.output_path = ""
def load_model(self, weights_path:str = None, classes_path:str = None, input_shape:int = 0):
if (weights_path is None) or (classes_path is None):
raise RuntimeError ('weights_path AND classes_path should not be None.')
self.yolo_classes = classes_path
self.weights_path = weights_path
self.input_shape = input_shape
self.model = yolo_main(tiny = True, shape = self.input_shape)
self.model.classes = self.yolo_classes
self.model.make_model()
self.model.load_weights(self.weights_path, weights_type = 'yolo')
def predict(self, img:np.ndarray, output_path:str, iou = 0.4, score = 0.07, custom_objects:dict = None,
debug=True):
self.output_path = output_path
self.iou = iou
self.score = score
#img = np.array(Image.open(img))[..., ::-1]
pred_bboxes = self.model.predict(img, iou_threshold = self.iou, score_threshold = self.score)
boxes = []
if (custom_objects != None):
for i in range(len(pred_bboxes)):
check_name = labels[pred_bboxes[i][4]]
check = custom_objects.get(check_name, 'invalid')
if check == 'invalid':
continue
elif check == 'valid':
boxes.append(list(pred_bboxes[i]))
boxes = np.array(boxes)
res = self.model.draw_bboxes(img, boxes)
if debug:
cv2.imwrite(self.output_path, res)
else:
res = self.model.draw_bboxes(img, pred_bboxes)
if debug:
cv2.imwrite(self.output_path, res)
return res
| 1.898438 | 2 |
src/librender/tests/test_mesh.py | tizian/layer-laboratory | 7 | 3207 | import mitsuba
import pytest
import enoki as ek
from enoki.dynamic import Float32 as Float
from mitsuba.python.test.util import fresolver_append_path
from mitsuba.python.util import traverse
def test01_create_mesh(variant_scalar_rgb):
from mitsuba.core import Struct, float_dtype
from mitsuba.render import Mesh
m = Mesh("MyMesh", 3, 2)
m.vertex_positions_buffer()[:] = [0.0, 0.0, 0.0, 1.0, 0.2, 0.0, 0.2, 1.0, 0.0]
m.faces_buffer()[:] = [0, 1, 2, 1, 2, 0]
m.parameters_changed()
assert str(m) == """Mesh[
name = "MyMesh",
bbox = BoundingBox3f[
min = [0, 0, 0],
max = [1, 1, 0]
],
vertex_count = 3,
vertices = [36 B of vertex data],
face_count = 2,
faces = [24 B of face data],
disable_vertex_normals = 0,
surface_area = 0.96
]"""
@fresolver_append_path
def test02_ply_triangle(variant_scalar_rgb):
from mitsuba.core import UInt32, Vector3f
from mitsuba.core.xml import load_string
m = load_string("""
<shape type="ply" version="0.5.0">
<string name="filename" value="data/triangle.ply"/>
<boolean name="face_normals" value="true"/>
</shape>
""")
positions = m.vertex_positions_buffer()
faces = m.faces_buffer()
assert not m.has_vertex_normals()
assert ek.slices(positions) == 9
assert ek.allclose(positions[0:3], [0, 0, 0])
assert ek.allclose(positions[3:6], [0, 0, 1])
assert ek.allclose(positions[6:9], [0, 1, 0])
assert ek.slices(faces) == 3
assert faces[0] == UInt32(0)
assert faces[1] == UInt32(1)
assert faces[2] == UInt32(2)
@fresolver_append_path
def test03_ply_computed_normals(variant_scalar_rgb):
from mitsuba.core import Vector3f
from mitsuba.core.xml import load_string
"""Checks(automatic) vertex normal computation for a PLY file that
doesn't have them."""
shape = load_string("""
<shape type="ply" version="0.5.0">
<string name="filename" value="data/triangle.ply"/>
</shape>
""")
normals = shape.vertex_normals_buffer()
assert shape.has_vertex_normals()
# Normals are stored in half precision
assert ek.allclose(normals[0:3], [-1, 0, 0])
assert ek.allclose(normals[3:6], [-1, 0, 0])
assert ek.allclose(normals[6:9], [-1, 0, 0])
def test04_normal_weighting_scheme(variant_scalar_rgb):
from mitsuba.core import Struct, float_dtype, Vector3f
from mitsuba.render import Mesh
import numpy as np
"""Tests the weighting scheme that is used to compute surface normals."""
m = Mesh("MyMesh", 5, 2, has_vertex_normals=True)
vertices = m.vertex_positions_buffer()
normals = m.vertex_normals_buffer()
a, b = 1.0, 0.5
vertices[:] = [0, 0, 0, -a, 1, 0, a, 1, 0, -b, 0, 1, b, 0, 1]
n0 = Vector3f(0.0, 0.0, -1.0)
n1 = Vector3f(0.0, 1.0, 0.0)
angle_0 = ek.pi / 2.0
angle_1 = ek.acos(3.0 / 5.0)
n2 = n0 * angle_0 + n1 * angle_1
n2 /= ek.norm(n2)
n = np.vstack([n2, n0, n0, n1, n1]).transpose()
m.faces_buffer()[:] = [0, 1, 2, 0, 3, 4]
m.recompute_vertex_normals()
for i in range(5):
assert ek.allclose(normals[i*3:(i+1)*3], n[:, i], 5e-4)
@fresolver_append_path
def test05_load_simple_mesh(variant_scalar_rgb):
from mitsuba.core.xml import load_string
"""Tests the OBJ and PLY loaders on a simple example."""
for mesh_format in ["obj", "ply"]:
shape = load_string("""
<shape type="{0}" version="2.0.0">
<string name="filename" value="resources/data/tests/{0}/cbox_smallbox.{0}"/>
</shape>
""".format(mesh_format))
positions = shape.vertex_positions_buffer()
faces = shape.faces_buffer()
assert shape.has_vertex_normals()
assert ek.slices(positions) == 72
assert ek.slices(faces) == 36
assert ek.allclose(faces[6:9], [4, 5, 6])
assert ek.allclose(positions[:5], [130, 165, 65, 82, 165])
@pytest.mark.parametrize('mesh_format', ['obj', 'ply', 'serialized'])
@pytest.mark.parametrize('features', ['normals', 'uv', 'normals_uv'])
@pytest.mark.parametrize('face_normals', [True, False])
def test06_load_various_features(variant_scalar_rgb, mesh_format, features, face_normals):
"""Tests the OBJ & PLY loaders with combinations of vertex / face normals,
presence and absence of UVs, etc.
"""
from mitsuba.core.xml import load_string
def test():
shape = load_string("""
<shape type="{0}" version="2.0.0">
<string name="filename" value="resources/data/tests/{0}/rectangle_{1}.{0}" />
<boolean name="face_normals" value="{2}" />
</shape>
""".format(mesh_format, features, str(face_normals).lower()))
assert shape.has_vertex_normals() == (not face_normals)
positions = shape.vertex_positions_buffer()
normals = shape.vertex_normals_buffer()
texcoords = shape.vertex_texcoords_buffer()
faces = shape.faces_buffer()
(v0, v2, v3) = [positions[i*3:(i+1)*3] for i in [0, 2, 3]]
assert ek.allclose(v0, [-2.85, 0.0, -7.600000], atol=1e-3)
assert ek.allclose(v2, [ 2.85, 0.0, 0.599999], atol=1e-3)
assert ek.allclose(v3, [ 2.85, 0.0, -7.600000], atol=1e-3)
if 'uv' in features:
assert shape.has_vertex_texcoords()
(uv0, uv2, uv3) = [texcoords[i*2:(i+1)*2] for i in [0, 2, 3]]
# For OBJs (and .serialized generated from OBJ), UV.y is flipped.
if mesh_format in ['obj', 'serialized']:
assert ek.allclose(uv0, [0.950589, 1-0.988416], atol=1e-3)
assert ek.allclose(uv2, [0.025105, 1-0.689127], atol=1e-3)
assert ek.allclose(uv3, [0.950589, 1-0.689127], atol=1e-3)
else:
assert ek.allclose(uv0, [0.950589, 0.988416], atol=1e-3)
assert ek.allclose(uv2, [0.025105, 0.689127], atol=1e-3)
assert ek.allclose(uv3, [0.950589, 0.689127], atol=1e-3)
if shape.has_vertex_normals():
for n in [normals[i*3:(i+1)*3] for i in [0, 2, 3]]:
assert ek.allclose(n, [0.0, 1.0, 0.0])
return fresolver_append_path(test)()
@fresolver_append_path
def test07_ply_stored_attribute(variant_scalar_rgb):
from mitsuba.core import Vector3f
from mitsuba.core.xml import load_string
m = load_string("""
<shape type="ply" version="0.5.0">
<string name="filename" value="data/triangle_face_colors.ply"/>
</shape>
""")
assert str(m) == """PLYMesh[
name = "triangle_face_colors.ply",
bbox = BoundingBox3f[
min = [0, 0, 0],
max = [0, 1, 1]
],
vertex_count = 3,
vertices = [72 B of vertex data],
face_count = 1,
faces = [24 B of face data],
disable_vertex_normals = 0,
surface_area = 0,
mesh attributes = [
face_color: 3 floats
]
]"""
def test08_mesh_add_attribute(variant_scalar_rgb):
from mitsuba.core import Struct, float_dtype
from mitsuba.render import Mesh
m = Mesh("MyMesh", 3, 2)
m.vertex_positions_buffer()[:] = [0.0, 0.0, 0.0, 1.0, 0.2, 0.0, 0.2, 1.0, 0.0]
m.faces_buffer()[:] = [0, 1, 2, 1, 2, 0]
m.parameters_changed()
m.add_attribute("vertex_color", 3)[:] = [0.0, 1.0, 1.0, 0.0, 0.0, 0.0, 1.0, 1.0, 0.0]
assert str(m) == """Mesh[
name = "MyMesh",
bbox = BoundingBox3f[
min = [0, 0, 0],
max = [1, 1, 0]
],
vertex_count = 3,
vertices = [72 B of vertex data],
face_count = 2,
faces = [24 B of face data],
disable_vertex_normals = 0,
surface_area = 0.96,
mesh attributes = [
vertex_color: 3 floats
]
]""" | 1.8125 | 2 |
7KYU/next_prime.py | yaznasivasai/python_codewars | 4 | 3215 | from math import sqrt
def is_simple(n: int) -> bool:
if n % 2 == 0 and n != 2:
return False
for i in range (3, int(sqrt(n)) + 2, 2):
if n % i == 0 and n != i:
return False
return True
def next_prime(n: int) -> int:
n += 1
if n <= 2:
return 2
else:
if n % 2 == 0:
n += 1
while not is_simple(n):
n += 2
return n | 2.90625 | 3 |
ddt/__init__.py | GawenChen/test_pytest | 0 | 3239 | # -*- coding: utf-8 -*-
"""
@Time : 2021/10/9 17:51
@Auth : 潇湘
@File :__init__.py.py
@IDE :PyCharm
@QQ : 810400085
""" | 0.462891 | 0 |
news_collector/collector/consumers.py | ridwaniyas/channels-examples | 0 | 3247 | import asyncio
import json
import datetime
from aiohttp import ClientSession
from channels.generic.http import AsyncHttpConsumer
from .constants import BLOGS
class NewsCollectorAsyncConsumer(AsyncHttpConsumer):
"""
Async HTTP consumer that fetches URLs.
"""
async def handle(self, body):
# Adapted from:
# "Making 1 million requests with python-aiohttp"
# https://pawelmhm.github.io/asyncio/python/aiohttp/2016/04/22/asyncio-aiohttp.html
async def fetch(url, session):
async with session.get(url) as response:
return await response.read()
tasks = []
loop = asyncio.get_event_loop()
# aiohttp allows a ClientSession object to link all requests together
t0 = datetime.datetime.now()
async with ClientSession() as session:
for name, url in BLOGS.items():
print('Start downloading "%s"' % name)
# Launch a coroutine for each URL fetch
task = loop.create_task(fetch(url, session))
tasks.append(task)
# Wait on, and then gather, all responses
responses = await asyncio.gather(*tasks)
dt = (datetime.datetime.now() - t0).total_seconds()
print('All downloads completed; elapsed time: {} [s]'.format(dt))
# asyncio.gather returns results in the order of the original sequence,
# so we can safely zip these together.
data = dict(zip(BLOGS.keys(), [r.decode('utf-8') for r in responses]))
text = json.dumps(data)
# We have to send a response using send_response rather than returning
# it in Channels' async HTTP consumer
await self.send_response(200,
text.encode(),
headers=[
("Content-Type", "application/json"),
]
)
| 2.0625 | 2 |
mmdet/models/losses/ranking_losses.py | VietDunghacker/VarifocalNet | 0 | 3255 | import torch
class RankSort(torch.autograd.Function):
@staticmethod
def forward(ctx, logits, targets, delta_RS=0.50, eps=1e-10):
classification_grads=torch.zeros(logits.shape).cuda()
#Filter fg logits
fg_labels = (targets > 0.)
fg_logits = logits[fg_labels]
fg_targets = targets[fg_labels]
fg_num = len(fg_logits)
#Do not use bg with scores less than minimum fg logit
#since changing its score does not have an effect on precision
threshold_logit = torch.min(fg_logits)-delta_RS
relevant_bg_labels=((targets==0) & (logits>=threshold_logit))
relevant_bg_logits = logits[relevant_bg_labels]
relevant_bg_grad=torch.zeros(len(relevant_bg_logits)).cuda()
sorting_error=torch.zeros(fg_num).cuda()
ranking_error=torch.zeros(fg_num).cuda()
fg_grad=torch.zeros(fg_num).cuda()
#sort the fg logits
order=torch.argsort(fg_logits)
#Loops over each positive following the order
for ii in order:
# Difference Transforms (x_ij)
fg_relations=fg_logits-fg_logits[ii]
bg_relations=relevant_bg_logits-fg_logits[ii]
if delta_RS > 0:
fg_relations=torch.clamp(fg_relations/(2*delta_RS)+0.5,min=0,max=1)
bg_relations=torch.clamp(bg_relations/(2*delta_RS)+0.5,min=0,max=1)
else:
fg_relations = (fg_relations >= 0).float()
bg_relations = (bg_relations >= 0).float()
# Rank of ii among pos and false positive number (bg with larger scores)
rank_pos=torch.sum(fg_relations)
FP_num=torch.sum(bg_relations)
# Rank of ii among all examples
rank=rank_pos+FP_num
# Ranking error of example ii. target_ranking_error is always 0. (Eq. 7)
ranking_error[ii]=FP_num/rank
# Current sorting error of example ii. (Eq. 7)
current_sorting_error = torch.sum(fg_relations*(1-fg_targets))/rank_pos
#Find examples in the target sorted order for example ii
iou_relations = (fg_targets >= fg_targets[ii])
target_sorted_order = iou_relations * fg_relations
#The rank of ii among positives in sorted order
rank_pos_target = torch.sum(target_sorted_order)
#Compute target sorting error. (Eq. 8)
#Since target ranking error is 0, this is also total target error
target_sorting_error= torch.sum(target_sorted_order*(1-fg_targets))/rank_pos_target
#Compute sorting error on example ii
sorting_error[ii] = current_sorting_error - target_sorting_error
#Identity Update for Ranking Error
if FP_num > eps:
#For ii the update is the ranking error
fg_grad[ii] -= ranking_error[ii]
#For negatives, distribute error via ranking pmf (i.e. bg_relations/FP_num)
relevant_bg_grad += (bg_relations*(ranking_error[ii]/FP_num))
#Find the positives that are misranked (the cause of the error)
#These are the ones with smaller IoU but larger logits
missorted_examples = (~ iou_relations) * fg_relations
#Denominotor of sorting pmf
sorting_pmf_denom = torch.sum(missorted_examples)
#Identity Update for Sorting Error
if sorting_pmf_denom > eps:
#For ii the update is the sorting error
fg_grad[ii] -= sorting_error[ii]
#For positives, distribute error via sorting pmf (i.e. missorted_examples/sorting_pmf_denom)
fg_grad += (missorted_examples*(sorting_error[ii]/sorting_pmf_denom))
#Normalize gradients by number of positives
classification_grads[fg_labels]= (fg_grad/fg_num)
classification_grads[relevant_bg_labels]= (relevant_bg_grad/fg_num)
ctx.save_for_backward(classification_grads)
return ranking_error.mean(), sorting_error.mean()
@staticmethod
def backward(ctx, out_grad1, out_grad2):
g1, =ctx.saved_tensors
return g1*out_grad1, None, None, None
class aLRPLoss(torch.autograd.Function):
@staticmethod
def forward(ctx, logits, targets, regression_losses, delta=1., eps=1e-5):
classification_grads=torch.zeros(logits.shape).cuda()
#Filter fg logits
fg_labels = (targets == 1)
fg_logits = logits[fg_labels]
fg_num = len(fg_logits)
#Do not use bg with scores less than minimum fg logit
#since changing its score does not have an effect on precision
threshold_logit = torch.min(fg_logits)-delta
#Get valid bg logits
relevant_bg_labels=((targets==0)&(logits>=threshold_logit))
relevant_bg_logits=logits[relevant_bg_labels]
relevant_bg_grad=torch.zeros(len(relevant_bg_logits)).cuda()
rank=torch.zeros(fg_num).cuda()
prec=torch.zeros(fg_num).cuda()
fg_grad=torch.zeros(fg_num).cuda()
max_prec=0
#sort the fg logits
order=torch.argsort(fg_logits)
#Loops over each positive following the order
for ii in order:
#x_ij s as score differences with fgs
fg_relations=fg_logits-fg_logits[ii]
#Apply piecewise linear function and determine relations with fgs
fg_relations=torch.clamp(fg_relations/(2*delta)+0.5,min=0,max=1)
#Discard i=j in the summation in rank_pos
fg_relations[ii]=0
#x_ij s as score differences with bgs
bg_relations=relevant_bg_logits-fg_logits[ii]
#Apply piecewise linear function and determine relations with bgs
bg_relations=torch.clamp(bg_relations/(2*delta)+0.5,min=0,max=1)
#Compute the rank of the example within fgs and number of bgs with larger scores
rank_pos=1+torch.sum(fg_relations)
FP_num=torch.sum(bg_relations)
#Store the total since it is normalizer also for aLRP Regression error
rank[ii]=rank_pos+FP_num
#Compute precision for this example to compute classification loss
prec[ii]=rank_pos/rank[ii]
#For stability, set eps to a infinitesmall value (e.g. 1e-6), then compute grads
if FP_num > eps:
fg_grad[ii] = -(torch.sum(fg_relations*regression_losses)+FP_num)/rank[ii]
relevant_bg_grad += (bg_relations*(-fg_grad[ii]/FP_num))
#aLRP with grad formulation fg gradient
classification_grads[fg_labels]= fg_grad
#aLRP with grad formulation bg gradient
classification_grads[relevant_bg_labels]= relevant_bg_grad
classification_grads /= (fg_num)
cls_loss=1-prec.mean()
ctx.save_for_backward(classification_grads)
return cls_loss, rank, order
@staticmethod
def backward(ctx, out_grad1, out_grad2, out_grad3):
g1, =ctx.saved_tensors
return g1*out_grad1, None, None, None, None
class APLoss(torch.autograd.Function):
@staticmethod
def forward(ctx, logits, targets, delta=1.):
classification_grads=torch.zeros(logits.shape).cuda()
#Filter fg logits
fg_labels = (targets == 1)
fg_logits = logits[fg_labels]
fg_num = len(fg_logits)
#Do not use bg with scores less than minimum fg logit
#since changing its score does not have an effect on precision
threshold_logit = torch.min(fg_logits)-delta
#Get valid bg logits
relevant_bg_labels=((targets==0)&(logits>=threshold_logit))
relevant_bg_logits=logits[relevant_bg_labels]
relevant_bg_grad=torch.zeros(len(relevant_bg_logits)).cuda()
rank=torch.zeros(fg_num).cuda()
prec=torch.zeros(fg_num).cuda()
fg_grad=torch.zeros(fg_num).cuda()
max_prec=0
#sort the fg logits
order=torch.argsort(fg_logits)
#Loops over each positive following the order
for ii in order:
#x_ij s as score differences with fgs
fg_relations=fg_logits-fg_logits[ii]
#Apply piecewise linear function and determine relations with fgs
fg_relations=torch.clamp(fg_relations/(2*delta)+0.5,min=0,max=1)
#Discard i=j in the summation in rank_pos
fg_relations[ii]=0
#x_ij s as score differences with bgs
bg_relations=relevant_bg_logits-fg_logits[ii]
#Apply piecewise linear function and determine relations with bgs
bg_relations=torch.clamp(bg_relations/(2*delta)+0.5,min=0,max=1)
#Compute the rank of the example within fgs and number of bgs with larger scores
rank_pos=1+torch.sum(fg_relations)
FP_num=torch.sum(bg_relations)
#Store the total since it is normalizer also for aLRP Regression error
rank[ii]=rank_pos+FP_num
#Compute precision for this example
current_prec=rank_pos/rank[ii]
#Compute interpolated AP and store gradients for relevant bg examples
if (max_prec<=current_prec):
max_prec=current_prec
relevant_bg_grad += (bg_relations/rank[ii])
else:
relevant_bg_grad += (bg_relations/rank[ii])*(((1-max_prec)/(1-current_prec)))
#Store fg gradients
fg_grad[ii]=-(1-max_prec)
prec[ii]=max_prec
#aLRP with grad formulation fg gradient
classification_grads[fg_labels]= fg_grad
#aLRP with grad formulation bg gradient
classification_grads[relevant_bg_labels]= relevant_bg_grad
classification_grads /= fg_num
cls_loss=1-prec.mean()
ctx.save_for_backward(classification_grads)
return cls_loss
@staticmethod
def backward(ctx, out_grad1):
g1, =ctx.saved_tensors
return g1*out_grad1, None, None
| 2.140625 | 2 |
ipyvolume/astro.py | larsoner/ipyvolume | 1 | 3271 | import numpy as np
import PIL.Image
import pythreejs
import ipyvolume as ipv
from .datasets import UrlCached
def _randomSO3():
"""return random rotatation matrix, algo by <NAME>"""
u1 = np.random.random()
u2 = np.random.random()
u3 = np.random.random()
R = np.array([[np.cos(2*np.pi*u1), np.sin(2*np.pi*u1), 0], [-np.sin(2*np.pi*u1), np.cos(2*np.pi*u1), 0], [0, 0, 1]])
v = np.array([np.cos(2*np.pi*u2)*np.sqrt(u3), np.sin(2*np.pi*u2)*np.sqrt(u3), np.sqrt(1-u3)])
H = np.identity(3)-2*v*np.transpose([v])
return - np.dot(H, R)
def spherical_galaxy_orbit(orbit_x, orbit_y, orbit_z, N_stars=100, sigma_r=1, orbit_visible=False, orbit_line_interpolate=5, N_star_orbits=10, color=[255, 220, 200], size_star=1, scatter_kwargs={}):
"""Create a fake galaxy around the points orbit_x/y/z with N_stars around it"""
if orbit_line_interpolate > 1:
import scipy.interpolate
x = np.linspace(0, 1, len(orbit_x))
x_smooth = np.linspace(0, 1, len(orbit_x)*orbit_line_interpolate)
kind = 'quadratic'
orbit_x_line = scipy.interpolate.interp1d(x, orbit_x, kind)(x_smooth)
orbit_y_line = scipy.interpolate.interp1d(x, orbit_y, kind)(x_smooth)
orbit_z_line = scipy.interpolate.interp1d(x, orbit_z, kind)(x_smooth)
else:
orbit_x_line = orbit_x
orbit_y_line = orbit_y
orbit_z_line = orbit_z
line = ipv.plot(orbit_x_line, orbit_y_line, orbit_z_line, visible=orbit_visible)
x = np.repeat(orbit_x, N_stars).reshape((-1, N_stars))
y = np.repeat(orbit_y, N_stars).reshape((-1, N_stars))
z = np.repeat(orbit_z, N_stars).reshape((-1, N_stars))
xr, yr, zr = np.random.normal(0, scale=sigma_r, size=(3, N_stars))# +
r = np.sqrt(xr**2 + yr**2 + zr**2)
for i in range(N_stars):
a = np.linspace(0, 1, x.shape[0]) * 2 * np.pi * N_star_orbits
xo = r[i] * np.sin(a)
yo = r[i] * np.cos(a)
zo = a * 0
xo, yo, zo = np.dot(_randomSO3(), [xo, yo, zo])
#print(x.shape, xo.shape)
x[:, i] += xo
y[:, i] += yo
z[:, i] += zo
sprite = ipv.scatter(x, y, z, texture=radial_sprite((64, 64), color), marker='square_2d', size=size_star, **scatter_kwargs)
with sprite.material.hold_sync():
sprite.material.blending = pythreejs.BlendingMode.CustomBlending
sprite.material.blendSrc = pythreejs.BlendFactors.SrcColorFactor
sprite.material.blendDst = pythreejs.BlendFactors.OneFactor
sprite.material.blendEquation = 'AddEquation'
sprite.material.transparent = True
sprite.material.depthWrite = False
sprite.material.alphaTest = 0.1
return sprite, line
def radial_sprite(shape, color):
color = np.array(color)
ara = np.zeros(shape[:2] + (4,), dtype=np.uint8)
x = np.linspace(-1, 1, shape[0])
y = np.linspace(-1, 1, shape[1])
x, y = np.meshgrid(x, y)
s = 0.5
radius = np.sqrt(x**2+y**2)
amplitude = np.maximum(0, np.exp(-radius**2/s**2)).T
ara[...,3] = (amplitude * 255)
ara[...,:3] = color * amplitude.reshape(shape + (1,))
im = PIL.Image.fromarray(ara, 'RGBA')
return im
def stars(N=1000, radius=100000, thickness=3, seed=42, color=[255, 240, 240]):
import ipyvolume as ipv
rng = np.random.RandomState(seed)
x, y, z = rng.normal(size=(3, N))
r = np.sqrt(x**2 + y**2 + z**2)/(radius + thickness * radius * np.random.random(N))
x /= r
y /= r
z /= r
return ipv.scatter(x, y, z, texture=radial_sprite((64, 64), color), marker='square_2d', grow_limits=False, size=radius*0.7/100)
milkyway_url = 'https://www.nasa.gov/sites/default/files/images/620057main_milkyway_full.jpg'
milkyway_image = UrlCached(milkyway_url)
def plot_milkyway(R_sun=8, size=100):
mw_image = PIL.Image.open(milkyway_image.fetch())
rescale = 40
t = np.linspace(0, 1, 100)
xmw = np.linspace(0, 1, 10)
ymw = np.linspace(0, 1, 10)
xmw, ymw = np.meshgrid(xmw, ymw)
zmw = xmw * 0 + 0.01
mw = mesh = ipv.plot_mesh((xmw-0.5)*rescale, (ymw-0.5)*rescale+R_sun, zmw, u=xmw, v=ymw, texture=mw_image, wireframe=False)
mw.material.blending = pythreejs.BlendingMode.CustomBlending
mw.material.blendSrc = pythreejs.BlendFactors.SrcColorFactor
mw.material.blendDst = pythreejs.BlendFactors.OneFactor
mw.material.blendEquation = 'AddEquation'
mw.material.transparent = True
mw.material.depthWrite = False
mw.material.alphaTest = 0.1
ipv.xyzlim(size)
return mesh | 2.640625 | 3 |
examples/catapi/feeder.py | IniZio/py-skygear | 8 | 3295 | def pick_food(name):
if name == "chima":
return "chicken"
else:
return "dry food"
| 1.28125 | 1 |
util/eval.py | jhong93/vpd | 7 | 3343 | import matplotlib.pyplot as plt
from sklearn.metrics import ConfusionMatrixDisplay, confusion_matrix
def save_confusion_matrix(truth, pred, out_file, norm=None):
label_names = list(set(truth) | set(pred))
label_names.sort()
truth_compact = [label_names.index(x) for x in truth]
pred_compact = [label_names.index(x) for x in pred]
cm = confusion_matrix(
truth_compact, pred_compact, labels=list(range(len(label_names))),
normalize=norm)
if norm is not None:
cm *= 100
fig = plt.figure(figsize=(20, 20))
ax = fig.add_subplot(111)
disp = ConfusionMatrixDisplay(
confusion_matrix=cm, display_labels=label_names)
disp.plot(ax=ax, xticks_rotation='vertical',
values_format='.1f' if norm is not None else 'd')
plt.tight_layout()
plt.savefig(out_file)
plt.close(fig)
| 1.945313 | 2 |
netdisco/discoverables/nanoleaf_aurora.py | jjlawren/netdisco | 1 | 3351 | """Discover Nanoleaf Aurora devices."""
from . import MDNSDiscoverable
class Discoverable(MDNSDiscoverable):
"""Add support for discovering Nanoleaf Aurora devices."""
def __init__(self, nd):
super(Discoverable, self).__init__(nd, '_nanoleafapi._tcp.local.')
| 0.777344 | 1 |
appliance_catalog/migrations/0015_appliance_icon_py3.py | ChameleonCloud/portal | 3 | 3367 | # -*- coding: utf-8 -*-
# Generated by Django 1.11.29 on 2021-02-25 20:32
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
"""Updates ImageField syntax for later version.
"""
dependencies = [
('appliance_catalog', '0014_auto_20180625_1104'),
]
operations = [
migrations.AlterField(
model_name='appliance',
name='appliance_icon',
field=models.ImageField(blank=True, upload_to='appliance_catalog/icons/'),
),
]
| 0.773438 | 1 |
pyrevolve/experiment_management.py | MRebolle/Battery-Robot | 0 | 3375 | import os
import shutil
import numpy as np
from pyrevolve.custom_logging.logger import logger
import sys
class ExperimentManagement:
# ids of robots in the name of all types of files are always phenotype ids, and the standard for id is 'robot_ID'
def __init__(self, settings):
self.settings = settings
manager_folder = os.path.dirname(self.settings.manager)
self._experiment_folder = os.path.join(manager_folder, 'data', self.settings.experiment_name, self.settings.run)
self._data_folder = os.path.join(self._experiment_folder, 'data_fullevolution')
self._gen_num = 0
def create_exp_folders(self):
if os.path.exists(self.experiment_folder):
shutil.rmtree(self.experiment_folder)
os.makedirs(self.experiment_folder)
os.mkdir(self.data_folder)
folders = ['genotypes', 'phenotypes', 'descriptors', 'objectives', 'fitness',
'battery', 'phenotype_images', 'failed_eval_robots']
for folder in folders:
os.mkdir(os.path.join(self.data_folder, folder))
@property
def experiment_folder(self):
return self._experiment_folder
@property
def data_folder(self):
return self._data_folder
def export_genotype(self, individual):
if self.settings.recovery_enabled:
individual.export_genotype(self.data_folder)
def export_phenotype(self, individual):
if self.settings.export_phenotype:
individual.export_phenotype(self.data_folder)
def export_fitnesses(self, individuals):
folder = self.data_folder
for individual in individuals:
individual.export_fitness(folder)
def export_fitness(self, individual):
folder = os.path.join(self.data_folder, 'fitness')
individual.export_fitness(folder)
def export_objectives(self, individual):
folder = os.path.join(self.data_folder, 'objectives')
individual.export_objectives(folder)
def export_battery(self, individual):
folder = os.path.join(self.data_folder, 'battery')
individual.export_battery(folder)
def export_behavior_measures(self, _id, measures):
filename = os.path.join(self.data_folder, 'descriptors', f'behavior_desc_{_id}.txt')
with open(filename, "w") as f:
if measures is None:
f.write(str(None))
else:
for key, val in measures.items():
f.write(f"{key} {val}\n")
def export_phenotype_images(self, dirpath, individual):
individual.phenotype.render_body(os.path.join(self.experiment_folder, dirpath, f'body_{individual.phenotype.id}.png'))
individual.phenotype.render_brain(os.path.join(self.experiment_folder, dirpath, f'brain_{individual.phenotype.id}.png'))
def export_failed_eval_robot(self, individual):
individual.genotype.export_genotype(os.path.join(self.data_folder, 'failed_eval_robots', f'genotype_{individual.phenotype.id}.txt'))
individual.phenotype.save_file(os.path.join(self.data_folder, 'failed_eval_robots', f'phenotype_{individual.phenotype.id}.yaml'))
individual.phenotype.save_file(os.path.join(self.data_folder, 'failed_eval_robots', f'phenotype_{individual.phenotype.id}.sdf'), conf_type='sdf')
def export_snapshots(self, individuals, gen_num):
self._gen_num = gen_num
if self.settings.recovery_enabled:
path = os.path.join(self.experiment_folder, f'selectedpop_{gen_num}')
if os.path.exists(path):
shutil.rmtree(path)
os.mkdir(path)
for ind in individuals:
self.export_phenotype_images(f'selectedpop_{str(gen_num)}', ind)
logger.info(f'Exported snapshot {str(gen_num)} with {str(len(individuals))} individuals')
def experiment_is_new(self):
if not os.path.exists(self.experiment_folder):
return True
path, dirs, files = next(os.walk(os.path.join(self.data_folder, 'fitness')))
if len(files) == 0:
return True
else:
return False
def read_recovery_state(self, population_size, offspring_size):
snapshots = []
for r, d, f in os.walk(self.experiment_folder):
for dir in d:
if 'selectedpop' in dir:
exported_files = len([name for name in os.listdir(os.path.join(self.experiment_folder, dir)) if os.path.isfile(os.path.join(self.experiment_folder, dir, name))])
if exported_files == (population_size * 2): # body and brain files
snapshots.append(int(dir.split('_')[1]))
if len(snapshots) > 0:
# the latest complete snapshot
last_snapshot = np.sort(snapshots)[-1]
# number of robots expected until the snapshot
n_robots = population_size + last_snapshot * offspring_size
else:
last_snapshot = -1
n_robots = 0
robot_ids = []
for r, d, f in os.walk(os.path.join(self.data_folder, 'fitness')):
for file in f:
robot_ids.append(int(file.split('.')[0].split('_')[-1]))
last_id = np.sort(robot_ids)[-1]
# if there are more robots to recover than the number expected in this snapshot
if last_id > n_robots:
# then recover also this partial offspring
has_offspring = True
else:
has_offspring = False
return last_snapshot, has_offspring, last_id+1
def plot_path(self, data_source: str, filename: str, file_extension=".png"):
data_folder = os.path.join(self._data_folder, data_source)
if not os.path.exists(data_folder):
os.mkdir(data_folder)
return os.path.join(data_folder, filename + str(self._gen_num) + file_extension)
| 1.59375 | 2 |
docs/updatedoc.py | JukeboxPipeline/jukedj | 2 | 3391 | #!/usr/bin/env python
"""Builds the documentaion. First it runs gendoc to create rst files for the source code. Then it runs sphinx make.
.. Warning:: This will delete the content of the output directory first! So you might loose data.
You can use updatedoc.py -nod.
Usage, just call::
updatedoc.py -h
"""
import argparse
import os
import shutil
import sys
import gendoc
thisdir = os.path.abspath(os.path.dirname(__file__))
def setup_argparse():
"""Sets up the argument parser and returns it
:returns: the parser
:rtype: :class:`argparse.ArgumentParser`
:raises: None
"""
parser = argparse.ArgumentParser(
description="Builds the documentaion. First it runs gendoc to create rst files\
for the source code. Then it runs sphinx make.\
WARNING: this will delete the contents of the output dirs. You can use -nod.")
ipath = os.path.join(thisdir, '../src')
ipath = os.path.abspath(ipath)
idefault = [ipath]
parser.add_argument('-i', '--input', nargs='+', default=idefault,
help='list of input directories. gendoc is called for every\
source dir.\
Default is \'%s\'.' % ', '.join(idefault))
opath = os.path.join(thisdir, 'reference')
opath = os.path.abspath(opath)
odefault = [opath]
parser.add_argument('-o', '--output', nargs='+', default=odefault,
help='list of output directories. if you have multiple source\
directories, the corresponding output directorie is used.\
if there are less dirs than for source, the last output dir\
is used for the remaining source dirs.\
WARNING: the output directories are emptied by default. See -nod.\
Default is \'%s\'.' % ', '.join(odefault))
gadefault = ['-T', '-f', '-e', '-o']
parser.add_argument('-ga', '--gendocargs', nargs='*', default=gadefault,
help="list of arguments to pass to gendoc. use -gh for info.\
Default is \'%s\'" % ', '.join(gadefault))
parser.add_argument('-nod', '--nodelete', action='store_true',
help='Do not empty the output directories first.')
parser.add_argument('-gh', '--gendochelp', action='store_true',
help='print the help for gendoc and exit')
return parser
def prepare_dir(directory, delete=True):
"""Create apidoc dir, delete contents if delete is True.
:param directory: the apidoc directory. you can use relative paths here
:type directory: str
:param delete: if True, deletes the contents of apidoc. This acts like an override switch.
:type delete: bool
:returns: None
:rtype: None
:raises: None
"""
if os.path.exists(directory):
if delete:
assert directory != thisdir, 'Trying to delete docs! Specify other output dir!'
print 'Deleting %s' % directory
shutil.rmtree(directory)
print 'Creating %s' % directory
os.mkdir(directory)
else:
print 'Creating %s' % directory
os.mkdir(directory)
def run_gendoc(source, dest, args):
"""Starts gendoc which reads source and creates rst files in dest with the given args.
:param source: The python source directory for gendoc. Can be a relative path.
:type source: str
:param dest: The destination for the rst files. Can be a relative path.
:type dest: str
:param args: Arguments for gendoc. See gendoc for more information.
:type args: list
:returns: None
:rtype: None
:raises: SystemExit
"""
args.insert(0, 'gendoc.py')
args.append(dest)
args.append(source)
print 'Running gendoc.main with: %s' % args
gendoc.main(args)
def main(argv=sys.argv[1:]):
"""Parse commandline arguments and run the tool
:param argv: the commandline arguments.
:type argv: list
:returns: None
:rtype: None
:raises: None
"""
parser = setup_argparse()
args = parser.parse_args(argv)
if args.gendochelp:
sys.argv[0] = 'gendoc.py'
genparser = gendoc.setup_parser()
genparser.print_help()
sys.exit(0)
print 'Preparing output directories'
print '='*80
for odir in args.output:
prepare_dir(odir, not args.nodelete)
print '\nRunning gendoc'
print '='*80
for i, idir in enumerate(args.input):
if i >= len(args.output):
odir = args.output[-1]
else:
odir = args.output[i]
run_gendoc(idir, odir, args.gendocargs)
if __name__ == '__main__':
main() | 1.429688 | 1 |
resdata/TensorFlow/RNN_Prediction/stockPrediction202005201318.py | yuwenxianglong/zhxsh.github.io | 0 | 3439 | # -*- coding: utf-8 -*-
"""
@Project : RNN_Prediction
@Author : <NAME>
@Filename: stockPrediction202005201318.py
@IDE : PyCharm
@Time1 : 2020-05-20 13:18:46
@Time2 : 2020/5/20 13:18
@Month1 : 5月
@Month2 : 五月
"""
import tushare as ts
import tensorflow as tf
import pandas as pd
from sklearn.model_selection import train_test_split
import matplotlib.pyplot as plt
stock_catl = ts.get_hist_data('300750')
stock_catl = stock_catl.sort_index(ascending=True)
stock_catl = (stock_catl - stock_catl.mean()) / \
(stock_catl.max() - stock_catl.min())
# train, val = train_test_split(stock_catl, test_size=0.5)
# train = train.sort_index(ascending=True)
# val = val.sort_index(ascending=True)
train = stock_catl.iloc[:-60, :]
val = stock_catl.iloc[-60:, :]
window_size = 30
column = 'high'
epoches = 300
def batch_dataset(dataset):
dataset_batched = dataset.batch(window_size, drop_remainder=True)
return dataset_batched
def zip_ds(dataset):
ds_data = tf.constant(dataset.values, dtype=tf.float32)
ds_data = tf.data.Dataset.from_tensor_slices(ds_data). \
window(window_size, shift=1).flat_map(batch_dataset)
ds_label = tf.constant(dataset.values[window_size:], dtype=tf.float32)
ds_label = tf.data.Dataset.from_tensor_slices(ds_label)
ds_train = tf.data.Dataset.zip((ds_data, ds_label)).batch(128).repeat()
return ds_train
ds_train = zip_ds(train)
ds_val = zip_ds(val)
model = tf.keras.Sequential(
[
tf.keras.layers.LSTM(128, return_sequences=True, activation='relu'),
tf.keras.layers.LSTM(128, activation='relu'),
tf.keras.layers.Dense(13)
]
)
optimizer = tf.keras.optimizers.Adam(learning_rate=0.01)
model.compile(optimizer=optimizer, loss='mse')
history = model.fit(
ds_train, epochs=epoches,
steps_per_epoch=5,
validation_data=ds_val,
validation_steps=1
)
model.save('stockLSTM')
# Plot loss function
plt.figure(figsize=(19, 9))
ax = plt.gca()
plt.plot(range(len(history.history['loss'])), history.history['loss'])
plt.plot(range(len(history.history['val_loss'])), history.history['val_loss'])
ax.set_yscale('log')
plt.show()
# Compare fitting and real values.
dff = pd.DataFrame()
for i in range(len(stock_catl) - window_size):
fits = model.predict(tf.constant(tf.expand_dims(stock_catl.values[i:i + window_size, :], axis=0)))
dffits = pd.DataFrame(fits, columns=stock_catl.columns)
dff = dff.append(dffits)
dff.index = stock_catl.index[window_size:]
plt.figure(figsize=(19, 9))
dff[column].plot()
stock_catl.iloc[window_size:, :][column].plot(style='-o')
plt.show()
# To predict future 100 business days.
dfp = stock_catl.copy()
for i in range(100):
pres = model.predict(tf.constant(tf.expand_dims(dfp.values[-1 * window_size:], axis=0)))
dfpres = pd.DataFrame(pres, columns=stock_catl.columns)
dfp = dfp.append(dfpres, ignore_index=True)
dfp[column].plot()
plt.show()
| 1.742188 | 2 |
test/library/draft/DataFrames/psahabu/AddSeries.py | jhh67/chapel | 1,602 | 3447 | import pandas as pd
I = ["A", "B", "C", "D", "E"]
oneDigit = pd.Series([1, 2, 3, 4, 5], pd.Index(I))
twoDigit = pd.Series([10, 20, 30, 40, 50], pd.Index(I))
print "addends:"
print oneDigit
print twoDigit
print
print "sum:"
print oneDigit + twoDigit
print
I2 = ["A", "B", "C"]
I3 = ["B", "C", "D", "E"]
X = pd.Series([0, 1, 2], pd.Index(I2))
Y = pd.Series([10, 20, 0, 0], pd.Index(I3))
print "addends:"
print X
print Y
print
print "sum:"
print X + Y
print
A = pd.Series(["hello ", "my ", "name", "is", "brad"])
B = pd.Series(["world", "real"])
print "addends:"
print A
print B
print
print "sum: "
print A + B
| 2.6875 | 3 |
setup.py | ooreilly/mydocstring | 13 | 3455 | from setuptools import setup
setup(name='mydocstring',
version='0.2.7',
description="""A tool for extracting and converting Google-style docstrings to
plain-text, markdown, and JSON.""",
url='http://github.com/ooreilly/mydocstring',
author="<NAME>",
license='MIT',
packages=['mydocstring'],
install_requires=['mako', 'docopt'],
entry_points = {
'console_scripts': [
'mydocstring=mydocstring.docstring:main',
],},
package_data={'mydocstring': ['templates/google_docstring.md']},
zip_safe=False)
| 1.398438 | 1 |
src/ezdxf/math/bulge.py | dmtvanzanten/ezdxf | 0 | 3463 | # Copyright (c) 2018-2021 <NAME>
# License: MIT License
# source: http://www.lee-mac.com/bulgeconversion.html
# source: http://www.afralisp.net/archive/lisp/Bulges1.htm
from typing import Any, TYPE_CHECKING, Tuple
import math
from ezdxf.math import Vec2
if TYPE_CHECKING:
from ezdxf.eztypes import Vertex
__all__ = [
"bulge_to_arc", "bulge_3_points", "bulge_center", "bulge_radius",
"arc_to_bulge"
]
def polar(p: Any, angle: float, distance: float) -> Vec2:
""" Returns the point at a specified `angle` and `distance` from point `p`.
Args:
p: point as :class:`Vec2` compatible object
angle: angle in radians
distance: distance
"""
return Vec2(p) + Vec2.from_angle(angle, distance)
def angle(p1: Any, p2: Any) -> float:
""" Returns angle a line defined by two endpoints and x-axis in radians.
Args:
p1: start point as :class:`Vec2` compatible object
p2: end point as :class:`Vec2` compatible object
"""
return (Vec2(p2) - Vec2(p1)).angle
def arc_to_bulge(center: 'Vertex', start_angle: float, end_angle: float,
radius: float) -> Tuple['Vec2', 'Vec2', float]:
"""
Returns bulge parameters from arc parameters.
Args:
center: circle center point as :class:`Vec2` compatible object
start_angle: start angle in radians
end_angle: end angle in radians
radius: circle radius
Returns:
tuple: (start_point, end_point, bulge)
"""
start_point = polar(center, start_angle, radius)
end_point = polar(center, end_angle, radius)
pi2 = math.pi * 2
a = math.fmod((pi2 + (end_angle - start_angle)), pi2) / 4.
bulge = math.sin(a) / math.cos(a)
return start_point, end_point, bulge
def bulge_3_points(start_point: 'Vertex', end_point: 'Vertex',
point: 'Vertex') -> float:
""" Returns bulge value defined by three points.
Based on 3-Points to Bulge by `Lee Mac`_.
Args:
start_point: start point as :class:`Vec2` compatible object
end_point: end point as :class:`Vec2` compatible object
point: arbitrary point as :class:`Vec2` compatible object
"""
a = (math.pi - angle(point, start_point) + angle(point, end_point)) / 2
return math.sin(a) / math.cos(a)
def bulge_to_arc(start_point: 'Vertex',
end_point: 'Vertex',
bulge: float) -> Tuple['Vec2', float, float, float]:
""" Returns arc parameters from bulge parameters.
The arcs defined by bulge values of :class:`~ezdxf.entities.LWPolyline`
and 2D :class:`~ezdxf.entities.Polyline` entities start at the vertex which
includes the bulge value and ends at the following vertex.
Based on Bulge to Arc by `Lee Mac`_.
Args:
start_point: start vertex as :class:`Vec2` compatible object
end_point: end vertex as :class:`Vec2` compatible object
bulge: bulge value
Returns:
Tuple: (center, start_angle, end_angle, radius)
"""
r = signed_bulge_radius(start_point, end_point, bulge)
a = angle(start_point, end_point) + (math.pi / 2 - math.atan(bulge) * 2)
c = polar(start_point, a, r)
if bulge < 0:
return c, angle(c, end_point), angle(c, start_point), abs(r)
else:
return c, angle(c, start_point), angle(c, end_point), abs(r)
def bulge_center(start_point: 'Vertex', end_point: 'Vertex',
bulge: float) -> 'Vec2':
""" Returns center of arc described by the given bulge parameters.
Based on Bulge Center by `<NAME>`_.
Args:
start_point: start point as :class:`Vec2` compatible object
end_point: end point as :class:`Vec2` compatible object
bulge: bulge value as float
"""
start_point = Vec2(start_point)
a = angle(start_point, end_point) + (math.pi / 2. - math.atan(bulge) * 2.)
return start_point + Vec2.from_angle(a, signed_bulge_radius(start_point,
end_point,
bulge))
def signed_bulge_radius(start_point: 'Vertex', end_point: 'Vertex',
bulge: float) -> float:
return Vec2(start_point).distance(Vec2(end_point)) * (
1. + (bulge * bulge)) / 4. / bulge
def bulge_radius(start_point: 'Vertex', end_point: 'Vertex',
bulge: float) -> float:
""" Returns radius of arc defined by the given bulge parameters.
Based on Bulge Radius by `<NAME>`_
Args:
start_point: start point as :class:`Vec2` compatible object
end_point: end point as :class:`Vec2` compatible object
bulge: bulge value
"""
return abs(signed_bulge_radius(start_point, end_point, bulge))
| 2.625 | 3 |
lib/SeparateDriver/CgwshDeviceDriverSetParameterECDB.py | multi-service-fabric/element-manager | 0 | 3487 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright(c) 2019 Nippon Telegraph and Telephone Corporation
# Filename: CgwshDeviceDriverSetParameterECDB.py
'''
Parameter module for Cgwsh driver configuration
'''
import GlobalModule
from EmCommonLog import decorater_log
from DriverSetParameterECDB import DriverSetParameterECDB
class CgwshDeviceDriverSetParameterECDB(DriverSetParameterECDB):
'''
Parameter class for Cgwsh driver configuration
'''
@decorater_log
def __init__(self,
device_name=None,
ec_message=None,
db_info=None):
'''
Constructor
'''
super(CgwshDeviceDriverSetParameterECDB, self).__init__(device_name,
ec_message,
db_info)
self.ec_message = self.ec_message["device"]
@decorater_log
def get_service_info(self):
'''
Service information is acquired.
'''
pass
@decorater_log
def get_management_info(self):
'''
Management information is acquired.
'''
get_info = {}
get_info["device_name"] = self.ec_message.get("name")
GlobalModule.EM_LOGGER.debug("get management_info = %s" % (get_info,))
return get_info
@decorater_log
def get_static_route_info(self):
'''
Static route information is acquired.
acquired dict:
{
static_route:[{
ip_address:str,
subnet_mask:str,
gateway_address:str
}]
}
'''
get_info = {}
tmp_list = []
routes = self.ec_message.get("serviceInfo", {}).get("staticRoute", ())
for route in routes:
tmp_item = {}
tmp_item["ip_address"] = route.get("ipAddress")
tmp_item["subnet_mask"] = route.get("subnetMask")
tmp_item["gateway_address"] = route.get("gatewayIpAddress")
tmp_list.append(tmp_item)
get_info["static_route"] = tmp_list
GlobalModule.EM_LOGGER.debug("get static_route = %s" % (get_info,))
return get_info
@decorater_log
def get_tunnel_if_info(self):
'''
Tunnel interface information is acquired.
acquired dict:
{
tunnel_if:[{
vrf_name:str,
if_name:str,
uni_if_name:str,
uni_vlan_id:str,
tunnel_source:str,
}]
}
'''
get_info = {}
tmp_list = []
tunnel_uni = self.ec_message.get("serviceInfo", {}).get("uni", ())
tunnel_officeInfo = self.ec_message.get(
"serviceInfo", {}).get("officeInfo", ())
vrf_name = tunnel_uni.get("vrfName")
uni_if_name = tunnel_uni.get("ifName")
uni_vlan_id = tunnel_uni.get("vlanId")
for tunnel in tunnel_officeInfo:
tmp_item = {}
tmp_item["vrf_name"] = vrf_name
tmp_item["if_name"] = tunnel.get("tunnelIfName")
tmp_item["uni_if_name"] = uni_if_name
tmp_item["uni_vlan_id"] = uni_vlan_id
tmp_item["tunnel_source"] = tunnel.get(
"tunnelSrcIpAddress")
tmp_list.append(tmp_item)
get_info["tunnel_if"] = tmp_list
GlobalModule.EM_LOGGER.debug("get tunnel_if = %s" % (get_info,))
return get_info
@decorater_log
def get_pppoe_info(self):
'''
PPPoE information is acquired.
acquired dict:
{
pppoe:[{
username:str,
password:str,
tenant:str,
pp_no:str
}]
}
'''
get_info = {}
tmp_list = []
ppp_infos = self.ec_message.get("serviceInfo", {}).get("pppInfo", ())
for ppp_info in ppp_infos:
tmp_item = {}
tmp_item["username"] = ppp_info.get("connectionId")
tmp_item["password"] = <PASSWORD>.get("connectionPassword")
tmp_item["tenant"] = ppp_info.get("corporationId")
tmp_item["pp_no"] = ppp_info.get("ppId")
tmp_list.append(tmp_item)
get_info["pppoe"] = tmp_list
GlobalModule.EM_LOGGER.debug("get pppoe = %s" % (get_info,))
return get_info
| 1.460938 | 1 |
kindler/solver/optimizer.py | mingruimingrui/kindler | 0 | 3519 | import torch
def make_sgd_optimizer(
model,
base_lr=0.001,
bias_lr_factor=2.0,
momentum=0.9,
weight_decay=0.0005,
weight_decay_bias=0.0,
):
params = []
for key, value in model.named_parameters():
if not value.requires_grad:
continue
param_lr = base_lr
param_weight_decay = weight_decay
if "bias" in key:
param_lr = base_lr * bias_lr_factor
param_weight_decay = weight_decay_bias
params.append({
'params': [value],
'lr': param_lr,
'weight_decay': param_weight_decay
})
optimizer = torch.optim.SGD(params, base_lr, momentum=momentum)
return optimizer
| 1.375 | 1 |
utils/src/adventofcode/utils/Point3D.py | dh256/adventofcode | 0 | 3527 | class Point3D:
def __init__(self,x,y,z):
self.x = x
self.y = y
self.z = z
'''
Returns the distance between two 3D points
'''
def distance(self, value):
return abs(self.x - value.x) + abs(self.y - value.y) + abs(self.z - value.z)
def __eq__(self, value):
return self.x == value.x and self.y == value.y and self.z == value.z
def __hash__(self):
return hash((self.x,self.y,self.z))
def __repr__(self):
return f'({self.x},{self.y},{self.z})'
def __add__(self,value):
return Point3D(self.x + value.x, self.y + value.y, self.z + value.z) | 2.40625 | 2 |
querybuilder/tests/window_tests.py | wesokes/django-query-builder | 110 | 3535 | from querybuilder.fields import (
RankField, RowNumberField, DenseRankField, PercentRankField, CumeDistField, NTileField, LagField,
LeadField, FirstValueField, LastValueField, NthValueField, NumStdDevField
)
from querybuilder.query import QueryWindow, Query
from querybuilder.tests.models import Order
from querybuilder.tests.query_tests import QueryTestCase, get_comparison_str
class QueryWindowTest(QueryTestCase):
def test_query_window(self):
query_window = QueryWindow()
query_str = query_window.get_sql()
expected_query = 'OVER ()'
self.assertEqual(query_str, expected_query, get_comparison_str(query_str, expected_query))
def test_query_window_partition(self):
query_window = QueryWindow().partition_by('field_one')
query_str = query_window.get_sql()
expected_query = 'OVER (PARTITION BY field_one)'
self.assertEqual(query_str, expected_query, get_comparison_str(query_str, expected_query))
def test_query_window_order(self):
query_window = QueryWindow().order_by('field_one')
query_str = query_window.get_sql()
expected_query = 'OVER (ORDER BY field_one ASC)'
self.assertEqual(query_str, expected_query, get_comparison_str(query_str, expected_query))
def test_query_window_partition_order(self):
query_window = QueryWindow().partition_by(
'field_one'
).order_by(
'field_one'
)
query_str = query_window.get_sql()
expected_query = 'OVER (PARTITION BY field_one ORDER BY field_one ASC)'
self.assertEqual(query_str, expected_query, get_comparison_str(query_str, expected_query))
def test_query_window_partition_order_many(self):
query_window = QueryWindow().partition_by(
'field_one'
).partition_by(
'field_two'
).order_by(
'field_one'
).order_by(
'-field_two'
)
query_str = query_window.get_sql()
expected_query = 'OVER (PARTITION BY field_one, field_two ORDER BY field_one ASC, field_two DESC)'
self.assertEqual(query_str, expected_query, get_comparison_str(query_str, expected_query))
class WindowFunctionTest(QueryTestCase):
def test_rank_no_over(self):
query = Query().from_table(
table=Order,
fields=[
RankField()
]
)
query_str = query.get_sql()
expected_query = 'SELECT RANK() AS "rank" FROM querybuilder_tests_order'
self.assertEqual(query_str, expected_query, get_comparison_str(query_str, expected_query))
def test_rank_over(self):
query = Query().from_table(
table=Order,
fields=[
RankField(
over=QueryWindow()
)
]
)
query_str = query.get_sql()
expected_query = 'SELECT RANK() OVER () AS "rank" FROM querybuilder_tests_order'
self.assertEqual(query_str, expected_query, get_comparison_str(query_str, expected_query))
def test_rank_over_order(self):
query = Query().from_table(
table=Order,
fields=[
'id',
RankField(
over=QueryWindow().order_by(
'id'
)
)
]
)
query_str = query.get_sql()
expected_query = (
'SELECT querybuilder_tests_order.id, RANK() OVER (ORDER BY id ASC) AS "rank" FROM querybuilder_tests_order'
)
self.assertEqual(query_str, expected_query, get_comparison_str(query_str, expected_query))
def test_rank_over_partition(self):
query = Query().from_table(
table=Order,
fields=[
'id',
RankField(
over=QueryWindow().partition_by(
'account_id'
)
)
]
)
query_str = query.get_sql()
expected_query = (
'SELECT querybuilder_tests_order.id, RANK() OVER (PARTITION BY account_id) AS "rank" FROM '
'querybuilder_tests_order'
)
self.assertEqual(query_str, expected_query, get_comparison_str(query_str, expected_query))
def test_row_number(self):
query = Query().from_table(
table=Order,
fields=[
'*',
RowNumberField(
over=QueryWindow().order_by(
'-margin'
)
)
]
).order_by(
'row_number'
)
query_str = query.get_sql()
expected_query = (
'SELECT querybuilder_tests_order.*, '
'ROW_NUMBER() OVER (ORDER BY margin DESC) AS "row_number" '
'FROM querybuilder_tests_order '
'ORDER BY row_number '
'ASC'
)
self.assertEqual(query_str, expected_query, get_comparison_str(query_str, expected_query))
def test_rank(self):
query = Query().from_table(
table=Order,
fields=[
'id',
RankField(
over=QueryWindow().partition_by(
'account_id'
).order_by(
'id'
)
)
]
).order_by(
'-rank'
)
query_str = query.get_sql()
expected_query = (
'SELECT querybuilder_tests_order.id, '
'RANK() OVER (PARTITION BY account_id ORDER BY id ASC) AS "rank" '
'FROM querybuilder_tests_order '
'ORDER BY rank '
'DESC'
)
self.assertEqual(query_str, expected_query, get_comparison_str(query_str, expected_query))
def test_dense_rank(self):
query = Query().from_table(
table=Order,
fields=[
'*',
DenseRankField(
over=QueryWindow().order_by(
'-margin'
)
)
]
).order_by(
'dense_rank'
)
query_str = query.get_sql()
expected_query = (
'SELECT querybuilder_tests_order.*, '
'DENSE_RANK() OVER (ORDER BY margin DESC) AS "dense_rank" '
'FROM querybuilder_tests_order '
'ORDER BY dense_rank '
'ASC'
)
self.assertEqual(query_str, expected_query, get_comparison_str(query_str, expected_query))
def test_rank_percent(self):
query = Query().from_table(
table=Order,
fields=[
'*',
PercentRankField(
over=QueryWindow().order_by(
'-margin'
)
)
]
).order_by(
'percent_rank'
)
query_str = query.get_sql()
expected_query = (
'SELECT querybuilder_tests_order.*, '
'PERCENT_RANK() OVER (ORDER BY margin DESC) AS "percent_rank" '
'FROM querybuilder_tests_order '
'ORDER BY percent_rank '
'ASC'
)
self.assertEqual(query_str, expected_query, get_comparison_str(query_str, expected_query))
def test_cume_dist(self):
query = Query().from_table(
table=Order,
fields=[
'*',
CumeDistField(
over=QueryWindow().order_by(
'-margin'
)
)
]
).order_by(
'cume_dist'
)
query_str = query.get_sql()
expected_query = (
'SELECT querybuilder_tests_order.*, '
'CUME_DIST() OVER (ORDER BY margin DESC) AS "cume_dist" '
'FROM querybuilder_tests_order '
'ORDER BY cume_dist '
'ASC'
)
self.assertEqual(query_str, expected_query, get_comparison_str(query_str, expected_query))
def test_ntile(self):
query = Query().from_table(
table=Order,
fields=[
'*',
NTileField(
num_buckets=2,
over=QueryWindow().order_by(
'-margin'
)
)
]
).order_by(
'ntile'
)
query_str = query.get_sql()
expected_query = (
'SELECT querybuilder_tests_order.*, '
'NTILE(2) OVER (ORDER BY margin DESC) AS "ntile" '
'FROM querybuilder_tests_order '
'ORDER BY ntile '
'ASC'
)
self.assertEqual(query_str, expected_query, get_comparison_str(query_str, expected_query))
def test_lag(self):
query = Query().from_table(
table=Order,
fields=[
'*',
LagField(
'margin',
over=QueryWindow().order_by(
'-margin'
)
)
]
)
query_str = query.get_sql()
expected_query = (
'SELECT querybuilder_tests_order.*, '
'LAG(querybuilder_tests_order.margin, 1) OVER (ORDER BY margin DESC) AS "margin_lag" '
'FROM querybuilder_tests_order'
)
self.assertEqual(query_str, expected_query, get_comparison_str(query_str, expected_query))
def test_lag_default(self):
query = Query().from_table(
table=Order,
fields=[
'*',
LagField(
'margin',
default=0,
over=QueryWindow().order_by(
'-margin'
)
)
]
)
query_str = query.get_sql()
expected_query = (
'SELECT querybuilder_tests_order.*, '
'LAG(querybuilder_tests_order.margin, 1, \'0\') OVER (ORDER BY margin DESC) AS "margin_lag" '
'FROM querybuilder_tests_order'
)
self.assertEqual(query_str, expected_query, get_comparison_str(query_str, expected_query))
def test_lead(self):
query = Query().from_table(
table=Order,
fields=[
'*',
LeadField(
'margin',
over=QueryWindow().order_by(
'-margin'
)
)
]
)
query_str = query.get_sql()
expected_query = (
'SELECT querybuilder_tests_order.*, '
'LEAD(querybuilder_tests_order.margin, 1) OVER (ORDER BY margin DESC) AS "margin_lead" '
'FROM querybuilder_tests_order'
)
self.assertEqual(query_str, expected_query, get_comparison_str(query_str, expected_query))
def test_first_value(self):
query = Query().from_table(
table=Order,
fields=[
'*',
FirstValueField(
'margin',
over=QueryWindow().order_by(
'-margin'
)
)
]
)
query_str = query.get_sql()
expected_query = (
'SELECT querybuilder_tests_order.*, '
'FIRST_VALUE(querybuilder_tests_order.margin) OVER (ORDER BY margin DESC) AS "margin_first_value" '
'FROM querybuilder_tests_order'
)
self.assertEqual(query_str, expected_query, get_comparison_str(query_str, expected_query))
def test_last_value(self):
query = Query().from_table(
table=Order,
fields=[
'*',
LastValueField(
'margin',
over=QueryWindow().order_by(
'margin'
)
)
]
)
query_str = query.get_sql()
expected_query = (
'SELECT querybuilder_tests_order.*, '
'LAST_VALUE(querybuilder_tests_order.margin) OVER (ORDER BY margin ASC) AS "margin_last_value" '
'FROM querybuilder_tests_order'
)
self.assertEqual(query_str, expected_query, get_comparison_str(query_str, expected_query))
def test_nth_value(self):
query = Query().from_table(
table=Order,
fields=[
'*',
NthValueField(
'margin',
n=2,
over=QueryWindow().order_by(
'-margin'
)
)
]
)
query_str = query.get_sql()
expected_query = (
'SELECT querybuilder_tests_order.*, '
'NTH_VALUE(querybuilder_tests_order.margin, 2) OVER (ORDER BY margin DESC) AS "margin_nth_value" '
'FROM querybuilder_tests_order'
)
self.assertEqual(query_str, expected_query, get_comparison_str(query_str, expected_query))
def test_num_stddev(self):
query = Query().from_table(
table=Order,
fields=[
'*',
NumStdDevField(
'margin',
over=QueryWindow()
)
]
).order_by(
'-margin_num_stddev'
)
query_str = query.get_sql()
expected_query = (
'SELECT querybuilder_tests_order.*, '
'(CASE WHEN (STDDEV(querybuilder_tests_order.margin) OVER ()) <> 0 '
'THEN ((querybuilder_tests_order.margin - ('
'AVG(querybuilder_tests_order.margin) OVER ())) / (STDDEV(querybuilder_tests_order.margin) OVER ())) '
'ELSE 0 '
'END) '
'AS "margin_num_stddev" '
'FROM querybuilder_tests_order '
'ORDER BY margin_num_stddev '
'DESC'
)
self.assertEqual(query_str, expected_query, get_comparison_str(query_str, expected_query))
| 1.164063 | 1 |
distill.py | Lukeming-tsinghua/Interpretable-NN-for-IBD-diagnosis | 0 | 3543 | import os
from collections import namedtuple
import torch
import torch.nn as nn
import torch.nn.functional as F
from sklearn.metrics import classification_report
from torch.optim import Adam
from tqdm import tqdm
from data import DataIteratorDistill
from loss import FocalLoss
from model import CNN
from torchtext import data, vocab
from args import get_args, print_args
from config import ConfigBinaryClassification
from config import ConfigBinaryClassificationDistill
from config import ConfigTripleClassification
if __name__ == "__main__":
args = get_args()
print_args(args)
if args.class_num == 2:
cfg = ConfigBinaryClassificationDistill()
elif args.class_num == 3:
cfg = ConfigTripleClassification()
else:
raise ValueError("wrong class num")
device = torch.device("cuda:%d" % args.cuda)
Data = DataIteratorDistill(config=cfg, train_batchsize=args.batch_size)
model = torch.load("checkpoints/CNN-29", map_location=device)
optimizer = Adam(model.parameters(), lr=args.lr)
criterion = FocalLoss(classes=args.class_num, device=device).to(device)
criterion_kv = nn.KLDivLoss().to(device)
alpha = 0.2
T = 2
for epoch in range(args.epoch_num):
print(epoch)
for sample in Data.train_iter:
model.train()
optimizer.zero_grad()
output = model(sample.text.permute(1, 0).to(device))
loss_f = criterion(output, sample.label.to(device))
output = F.log_softmax(output/T, 1)
score = torch.cat((sample.pred0.unsqueeze(1).to(device),
sample.pred1.unsqueeze(1).to(device)), dim=1)
score = F.softmax(score/T,1)
loss_kv = criterion_kv(output, score.to(device)) * T * T
loss = alpha * loss_f + (1 - alpha) * loss_kv
#print(loss_f.item(), loss_kv.item())
loss.backward()
optimizer.step()
with torch.no_grad():
model.eval()
preds = []
labels = []
for sample in Data.valid_iter:
output = model(sample.text.permute(1, 0).to(device))
p = output.argmax(1).cpu().tolist()
l = sample.label.tolist()
preds += p
labels += l
report = classification_report(preds, labels)
print(report)
torch.save(model, os.path.join(args.save_dir, args.save_config + str(epoch)))
| 1.8125 | 2 |
esphome/voluptuous_schema.py | TheEggi/esphomeyaml | 0 | 3575 | import difflib
import itertools
import voluptuous as vol
from esphome.py_compat import string_types
class ExtraKeysInvalid(vol.Invalid):
def __init__(self, *arg, **kwargs):
self.candidates = kwargs.pop('candidates')
vol.Invalid.__init__(self, *arg, **kwargs)
def ensure_multiple_invalid(err):
if isinstance(err, vol.MultipleInvalid):
return err
return vol.MultipleInvalid(err)
# pylint: disable=protected-access, unidiomatic-typecheck
class _Schema(vol.Schema):
"""Custom cv.Schema that prints similar keys on error."""
def __init__(self, schema, extra=vol.PREVENT_EXTRA, extra_schemas=None):
super(_Schema, self).__init__(schema, extra=extra)
# List of extra schemas to apply after validation
# Should be used sparingly, as it's not a very voluptuous-way/clean way of
# doing things.
self._extra_schemas = extra_schemas or []
def __call__(self, data):
res = super(_Schema, self).__call__(data)
for extra in self._extra_schemas:
try:
res = extra(res)
except vol.Invalid as err:
raise ensure_multiple_invalid(err)
return res
def _compile_mapping(self, schema, invalid_msg=None):
invalid_msg = invalid_msg or 'mapping value'
# Check some things that ESPHome's schemas do not allow
# mostly to keep the logic in this method sane (so these may be re-added if needed).
for key in schema:
if key is vol.Extra:
raise ValueError("ESPHome does not allow vol.Extra")
if isinstance(key, vol.Remove):
raise ValueError("ESPHome does not allow vol.Remove")
if isinstance(key, vol.primitive_types):
raise ValueError("All schema keys must be wrapped in cv.Required or cv.Optional")
# Keys that may be required
all_required_keys = set(key for key in schema if isinstance(key, vol.Required))
# Keys that may have defaults
all_default_keys = set(key for key in schema if isinstance(key, vol.Optional))
# Recursively compile schema
_compiled_schema = {}
for skey, svalue in vol.iteritems(schema):
new_key = self._compile(skey)
new_value = self._compile(svalue)
_compiled_schema[skey] = (new_key, new_value)
# Sort compiled schema (probably not necessary for esphome, but leave it here just in case)
candidates = list(vol.schema_builder._iterate_mapping_candidates(_compiled_schema))
# After we have the list of candidates in the correct order, we want to apply some
# optimization so that each
# key in the data being validated will be matched against the relevant schema keys only.
# No point in matching against different keys
additional_candidates = []
candidates_by_key = {}
for skey, (ckey, cvalue) in candidates:
if type(skey) in vol.primitive_types:
candidates_by_key.setdefault(skey, []).append((skey, (ckey, cvalue)))
elif isinstance(skey, vol.Marker) and type(skey.schema) in vol.primitive_types:
candidates_by_key.setdefault(skey.schema, []).append((skey, (ckey, cvalue)))
else:
# These are wildcards such as 'int', 'str', 'Remove' and others which should be
# applied to all keys
additional_candidates.append((skey, (ckey, cvalue)))
key_names = []
for skey in schema:
if isinstance(skey, string_types):
key_names.append(skey)
elif isinstance(skey, vol.Marker) and isinstance(skey.schema, string_types):
key_names.append(skey.schema)
def validate_mapping(path, iterable, out):
required_keys = all_required_keys.copy()
# Build a map of all provided key-value pairs.
# The type(out) is used to retain ordering in case a ordered
# map type is provided as input.
key_value_map = type(out)()
for key, value in iterable:
key_value_map[key] = value
# Insert default values for non-existing keys.
for key in all_default_keys:
if not isinstance(key.default, vol.Undefined) and key.schema not in key_value_map:
# A default value has been specified for this missing key, insert it.
key_value_map[key.schema] = key.default()
error = None
errors = []
for key, value in key_value_map.items():
key_path = path + [key]
# Optimization. Validate against the matching key first, then fallback to the rest
relevant_candidates = itertools.chain(candidates_by_key.get(key, []),
additional_candidates)
# compare each given key/value against all compiled key/values
# schema key, (compiled key, compiled value)
for skey, (ckey, cvalue) in relevant_candidates:
try:
new_key = ckey(key_path, key)
except vol.Invalid as e:
if len(e.path) > len(key_path):
raise
if not error or len(e.path) > len(error.path):
error = e
continue
# Backtracking is not performed once a key is selected, so if
# the value is invalid we immediately throw an exception.
exception_errors = []
try:
cval = cvalue(key_path, value)
out[new_key] = cval
except vol.MultipleInvalid as e:
exception_errors.extend(e.errors)
except vol.Invalid as e:
exception_errors.append(e)
if exception_errors:
for err in exception_errors:
if len(err.path) <= len(key_path):
err.error_type = invalid_msg
errors.append(err)
# If there is a validation error for a required
# key, this means that the key was provided.
# Discard the required key so it does not
# create an additional, noisy exception.
required_keys.discard(skey)
break
# Key and value okay, mark as found in case it was
# a Required() field.
required_keys.discard(skey)
break
else:
if self.extra == vol.ALLOW_EXTRA:
out[key] = value
elif self.extra != vol.REMOVE_EXTRA:
if isinstance(key, string_types) and key_names:
matches = difflib.get_close_matches(key, key_names)
errors.append(ExtraKeysInvalid('extra keys not allowed', key_path,
candidates=matches))
else:
errors.append(vol.Invalid('extra keys not allowed', key_path))
# for any required keys left that weren't found and don't have defaults:
for key in required_keys:
msg = getattr(key, 'msg', None) or 'required key not provided'
errors.append(vol.RequiredFieldInvalid(msg, path + [key]))
if errors:
raise vol.MultipleInvalid(errors)
return out
return validate_mapping
def add_extra(self, validator):
validator = _Schema(validator)
self._extra_schemas.append(validator)
return self
# pylint: disable=arguments-differ
def extend(self, *schemas, **kwargs):
extra = kwargs.pop('extra', None)
if kwargs:
raise ValueError
if not schemas:
return self.extend({})
if len(schemas) != 1:
ret = self
for schema in schemas:
ret = ret.extend(schema)
return ret
schema = schemas[0]
if isinstance(schema, vol.Schema):
schema = schema.schema
ret = super(_Schema, self).extend(schema, extra=extra)
return _Schema(ret.schema, extra=ret.extra, extra_schemas=self._extra_schemas)
| 1.789063 | 2 |
withings_api/const.py | tiloc/python_withings_api | 0 | 3583 | """Constant values."""
STATUS_SUCCESS = (0,)
STATUS_AUTH_FAILED = (100, 101, 102, 200, 401)
STATUS_INVALID_PARAMS = (
201,
202,
203,
204,
205,
206,
207,
208,
209,
210,
211,
212,
213,
216,
217,
218,
220,
221,
223,
225,
227,
228,
229,
230,
234,
235,
236,
238,
240,
241,
242,
243,
244,
245,
246,
247,
248,
249,
250,
251,
252,
254,
260,
261,
262,
263,
264,
265,
266,
267,
271,
272,
275,
276,
283,
284,
285,
286,
287,
288,
290,
293,
294,
295,
297,
300,
301,
302,
303,
304,
321,
323,
324,
325,
326,
327,
328,
329,
330,
331,
332,
333,
334,
335,
336,
337,
338,
339,
340,
341,
342,
343,
344,
345,
346,
347,
348,
349,
350,
351,
352,
353,
380,
381,
382,
400,
501,
502,
503,
504,
505,
506,
509,
510,
511,
523,
532,
3017,
3018,
3019,
)
STATUS_UNAUTHORIZED = (214, 277, 2553, 2554, 2555)
STATUS_ERROR_OCCURRED = (
215,
219,
222,
224,
226,
231,
233,
237,
253,
255,
256,
257,
258,
259,
268,
269,
270,
273,
274,
278,
279,
280,
281,
282,
289,
291,
292,
296,
298,
305,
306,
308,
309,
310,
311,
312,
313,
314,
315,
316,
317,
318,
319,
320,
322,
370,
371,
372,
373,
374,
375,
383,
391,
402,
516,
517,
518,
519,
520,
521,
525,
526,
527,
528,
529,
530,
531,
533,
602,
700,
1051,
1052,
1053,
1054,
2551,
2552,
2556,
2557,
2558,
2559,
3000,
3001,
3002,
3003,
3004,
3005,
3006,
3007,
3008,
3009,
3010,
3011,
3012,
3013,
3014,
3015,
3016,
3020,
3021,
3022,
3023,
3024,
5000,
5001,
5005,
5006,
6000,
6010,
6011,
9000,
10000,
)
STATUS_TIMEOUT = (522,)
STATUS_BAD_STATE = (524,)
STATUS_TOO_MANY_REQUESTS = (601,)
| 0.388672 | 0 |
eust/tables/data.py | rasmuse/eust | 1 | 3591 | # -*- coding: utf-8 -*-
import re
import gzip
import pandas as pd
import numpy as np
from eust.core import _download_file, conf
_DIMENSION_NAME_RE = re.compile(r"^[a-z_0-9]+$")
_YEAR_RE = re.compile(r"^(1|2)[0-9]{3}$")
def _is_valid_dimension_name(s: str) -> bool:
return bool(_DIMENSION_NAME_RE.match(s))
def _split_values_flags(series: pd.Series) -> pd.DataFrame:
split = series.str.split(" ")
df = pd.DataFrame(
{
"value": split.apply(lambda l: l[0] if l else None),
"flag": split.apply(lambda l: l[1] if l and len(l) > 1 else None),
}
)
return df
def _set_multiindex_dtype(index, level, type_):
index_df = index.to_frame()
index_df[level] = index_df[level].astype(type_)
new_index = index_df.set_index(index.names).index
return new_index
def _read_tsv(path_or_buffer) -> pd.DataFrame:
d = pd.read_csv(path_or_buffer, sep="\t", header=0, dtype=str)
top_left_cell = d.columns[0]
row_dimension_names, header_dimension_name = top_left_cell.split("\\")
row_dimension_names = row_dimension_names.split(",")
index_data = d[top_left_cell]
del d[top_left_cell]
assert len(set(index_data)) == len(index_data) # no duplicates
assert len(row_dimension_names) >= 1
d.columns.name = header_dimension_name
index_data = index_data.apply(lambda s: s.split(","))
d.index = pd.MultiIndex.from_arrays(
list(zip(*index_data)), names=row_dimension_names,
)
# cannot handle multidimensional column labels
d = d.stack()
assert set(d.apply(type)) == {str}
assert isinstance(d, pd.Series), d.columns
assert all(map(_is_valid_dimension_name, d.index.names))
d.index.set_levels(
[level.str.strip() for level in d.index.levels], inplace=True
)
d = _split_values_flags(d)
d.loc[d["value"] == ":", "value"] = np.nan
d["value"] = d["value"].astype(float)
if "time" in d.index.names:
time_strings = d.index.unique("time")
matches_year = (_YEAR_RE.match(s) for s in time_strings)
if all(matches_year):
d.index = _set_multiindex_dtype(d.index, "time", int)
d = d.sort_index()
return d
_TSV_GZ_FILENAME = "data.tsv.gz"
_HDF_FILENAME = "data.h5"
_HDF_TABLE_PATH = "eurostat_table"
def _read_tsv_gz(path_or_buffer) -> pd.DataFrame:
with gzip.open(path_or_buffer, "rb") as f:
return _read_tsv(f)
def _download_tsv_gz(url, dst_dir):
path = dst_dir / _TSV_GZ_FILENAME
_download_file(url, path)
def _read(the_dir):
hdf_path = the_dir / _HDF_FILENAME
tsv_gz_path = the_dir / _TSV_GZ_FILENAME
try:
data = pd.read_hdf(hdf_path, _HDF_TABLE_PATH)
except FileNotFoundError:
data = _read_tsv_gz(tsv_gz_path)
data.to_hdf(
hdf_path,
_HDF_TABLE_PATH,
complevel=conf["hdf_complevel"],
complib=conf["hdf_complib"],
)
# Replace empty flags by None (issue #3)
#
# Doing it at this point so that the null flag is saved in the HDF
# file as a string, for performance reasons.
# This is a pandas PerformanceWarning:
# "your performance may suffer as PyTables will pickle object types
# that it cannot map directly to c-types
# [inferred_type->mixed,key->block0_values] [items->['flag']]"
data["flag"] = data["flag"].replace({"": None})
return data
| 1.875 | 2 |
starteMessung.py | jkerpe/TroubleBubble | 0 | 3615 | from datetime import datetime
from pypylon import pylon
import nimmAuf
import smbus2
import os
import argparse
import bestimmeVolumen
from threading import Thread
import time
programmstart = time.time()
# Argumente parsen (bei Aufruf im Terminal z.B. 'starteMessung.py -n 100' eingeben)
ap = argparse.ArgumentParser(description="""Skript zum Aufnehmen von Bildern der Teststrecke und der
Volumenbestimmung von Luftblasen""")
ap.add_argument("-n", "--number", default=400, type=int, help="Anzahl an Frames die aufgenommen werden sollen. Default: 400 Bilder")
ap.add_argument("-fr", "--framerate", default=100, type=int, help="Framerate in fps. Richtwerte: <Flow 3 ml/s:50 fps, 3-6ml/s:100 fps, >6ml/s:200 fps; Default: 100 fps")
args = vars(ap.parse_args())
# Argumente des Parsers extrahieren
numberOfImagesToGrab = args['number']
framerate = args['framerate']
if __name__ == '__main__':
startzeit = time.time()
#Test ob Kamera angeschlossen ist
devices = pylon.TlFactory.GetInstance().EnumerateDevices()
if len(devices) == 0:
print("Keine Kamera angeschlossen oder Kamera woanders geöffnet.")
return False
# Test ob Drucksensor angeschlossen ist
try:
bus = smbus2.SMBus(0)
bus.read_i2c_block_data(0x40, 0, 2) # 2 Bytes empfangen
except OSError:
print("Kein Drucksensor angeschlossen")
exit()
# Aus der aktuellen Zeit und den Parametern einen individuellen Ordnernamen generieren
dirname = f'{datetime.now().strftime("%Y-%m-%d-%H-%M-%S")}'
os.mkdir(dirname) # Ordner erstellen
print(f"Ordnername: {dirname}")
beginn = time.time()-programmstart
# Threads zum Aufnehmen und Verarbeiten starten
t_aufnahme = Thread(target=nimmAuf.starte, args=(dirname, numberOfImagesToGrab, framerate, startzeit))
t_tracke = Thread(target=bestimmeVolumen.tracke, args=(dirname, numberOfImagesToGrab))
t_aufnahme.start()
t_tracke.start()
t_aufnahme.join()
t_tracke.join()
| 1.734375 | 2 |
test/IECoreMaya/ImageConverterTest.py | bradleyhenke/cortex | 386 | 3623 | ##########################################################################
#
# Copyright (c) 2011, Image Engine Design Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# * Neither the name of Image Engine Design nor the names of any
# other contributors to this software may be used to endorse or
# promote products derived from this software without specific prior
# written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
# IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
##########################################################################
import maya.cmds
import IECore
import IECoreImage
import IECoreMaya
class ImageConverterTest( IECoreMaya.TestCase ) :
def test( self ) :
imageA = IECore.Reader.create( "test/IECoreImage/data/exr/colorBarsWithAlpha.exr" ).read()
toMaya = IECoreMaya.ToMayaImageConverter( imageA )
mImage = maya.OpenMaya.MImage()
toMaya.convert( mImage )
fromMaya = IECoreMaya.FromMayaImageConverter( mImage )
imageB = fromMaya.convert()
self.assertFalse(
IECoreImage.ImageDiffOp()( imageA=imageA, imageB=imageB, maxError=1.0/256 ).value
)
if __name__ == "__main__":
IECoreMaya.TestProgram()
| 1.335938 | 1 |
src/oslibs/cocos/cocos-src/tools/cocos2d-console/plugins/framework/framework_add.py | dios-game/dios-cocos | 1 | 3655 |
import cocos
from MultiLanguage import MultiLanguage
from package.helper import ProjectHelper
class FrameworkAdd(cocos.CCPlugin):
@staticmethod
def plugin_name():
return "add-framework"
@staticmethod
def brief_description():
return MultiLanguage.get_string('FRAMEWORK_ADD_BRIEF')
# parse arguments
def parse_args(self, argv):
from argparse import ArgumentParser
parser = ArgumentParser(prog="cocos %s" % self.__class__.plugin_name(),
description=self.__class__.brief_description())
parser.add_argument("name", metavar="NAME", help=MultiLanguage.get_string('FRAMEWORK_ADD_ARG_NAME'))
return parser.parse_args(argv)
def run(self, argv):
args = self.parse_args(argv)
name = args.name
project = ProjectHelper.get_current_project()
ProjectHelper.add_framework(project, name)
| 1.484375 | 1 |
genesis/project.py | genialis/genesis-genapi | 3 | 3671 | """Project"""
from __future__ import absolute_import, division, print_function, unicode_literals
class GenProject(object):
"""Genesais project annotation."""
def __init__(self, data, gencloud):
for field in data:
setattr(self, field, data[field])
self.gencloud = gencloud
self.id = getattr(self, 'id', None) # pylint: disable=invalid-name
self.name = getattr(self, 'name', None)
def data_types(self):
"""Return a list of data types."""
data = self.gencloud.project_data(self.id)
return sorted(set(d.type for d in data))
def data(self, **query):
"""Query for Data object annotation."""
data = self.gencloud.project_data(self.id)
query['case_ids__contains'] = self.id
ids = set(d['id'] for d in self.gencloud.api.dataid.get(**query)['objects'])
return [d for d in data if d.id in ids]
def find(self, filter_str):
"""Filter Data object annotation."""
raise NotImplementedError()
def __str__(self):
return self.name or 'n/a'
def __repr__(self):
return u"GenProject: {} - {}".format(self.id, self.name)
| 1.765625 | 2 |
sdk/python/pulumi_google_native/testing/v1/test_matrix.py | AaronFriel/pulumi-google-native | 44 | 3679 | # coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from ... import _utilities
from . import outputs
from ._enums import *
from ._inputs import *
__all__ = ['TestMatrixArgs', 'TestMatrix']
@pulumi.input_type
class TestMatrixArgs:
def __init__(__self__, *,
environment_matrix: pulumi.Input['EnvironmentMatrixArgs'],
result_storage: pulumi.Input['ResultStorageArgs'],
test_specification: pulumi.Input['TestSpecificationArgs'],
client_info: Optional[pulumi.Input['ClientInfoArgs']] = None,
fail_fast: Optional[pulumi.Input[bool]] = None,
flaky_test_attempts: Optional[pulumi.Input[int]] = None,
project: Optional[pulumi.Input[str]] = None,
request_id: Optional[pulumi.Input[str]] = None):
"""
The set of arguments for constructing a TestMatrix resource.
:param pulumi.Input['EnvironmentMatrixArgs'] environment_matrix: The devices the tests are being executed on.
:param pulumi.Input['ResultStorageArgs'] result_storage: Where the results for the matrix are written.
:param pulumi.Input['TestSpecificationArgs'] test_specification: How to run the test.
:param pulumi.Input['ClientInfoArgs'] client_info: Information about the client which invoked the test.
:param pulumi.Input[bool] fail_fast: If true, only a single attempt at most will be made to run each execution/shard in the matrix. Flaky test attempts are not affected. Normally, 2 or more attempts are made if a potential infrastructure issue is detected. This feature is for latency sensitive workloads. The incidence of execution failures may be significantly greater for fail-fast matrices and support is more limited because of that expectation.
:param pulumi.Input[int] flaky_test_attempts: The number of times a TestExecution should be re-attempted if one or more of its test cases fail for any reason. The maximum number of reruns allowed is 10. Default is 0, which implies no reruns.
:param pulumi.Input[str] project: The cloud project that owns the test matrix.
"""
pulumi.set(__self__, "environment_matrix", environment_matrix)
pulumi.set(__self__, "result_storage", result_storage)
pulumi.set(__self__, "test_specification", test_specification)
if client_info is not None:
pulumi.set(__self__, "client_info", client_info)
if fail_fast is not None:
pulumi.set(__self__, "fail_fast", fail_fast)
if flaky_test_attempts is not None:
pulumi.set(__self__, "flaky_test_attempts", flaky_test_attempts)
if project is not None:
pulumi.set(__self__, "project", project)
if request_id is not None:
pulumi.set(__self__, "request_id", request_id)
@property
@pulumi.getter(name="environmentMatrix")
def environment_matrix(self) -> pulumi.Input['EnvironmentMatrixArgs']:
"""
The devices the tests are being executed on.
"""
return pulumi.get(self, "environment_matrix")
@environment_matrix.setter
def environment_matrix(self, value: pulumi.Input['EnvironmentMatrixArgs']):
pulumi.set(self, "environment_matrix", value)
@property
@pulumi.getter(name="resultStorage")
def result_storage(self) -> pulumi.Input['ResultStorageArgs']:
"""
Where the results for the matrix are written.
"""
return pulumi.get(self, "result_storage")
@result_storage.setter
def result_storage(self, value: pulumi.Input['ResultStorageArgs']):
pulumi.set(self, "result_storage", value)
@property
@pulumi.getter(name="testSpecification")
def test_specification(self) -> pulumi.Input['TestSpecificationArgs']:
"""
How to run the test.
"""
return pulumi.get(self, "test_specification")
@test_specification.setter
def test_specification(self, value: pulumi.Input['TestSpecificationArgs']):
pulumi.set(self, "test_specification", value)
@property
@pulumi.getter(name="clientInfo")
def client_info(self) -> Optional[pulumi.Input['ClientInfoArgs']]:
"""
Information about the client which invoked the test.
"""
return pulumi.get(self, "client_info")
@client_info.setter
def client_info(self, value: Optional[pulumi.Input['ClientInfoArgs']]):
pulumi.set(self, "client_info", value)
@property
@pulumi.getter(name="failFast")
def fail_fast(self) -> Optional[pulumi.Input[bool]]:
"""
If true, only a single attempt at most will be made to run each execution/shard in the matrix. Flaky test attempts are not affected. Normally, 2 or more attempts are made if a potential infrastructure issue is detected. This feature is for latency sensitive workloads. The incidence of execution failures may be significantly greater for fail-fast matrices and support is more limited because of that expectation.
"""
return pulumi.get(self, "fail_fast")
@fail_fast.setter
def fail_fast(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "fail_fast", value)
@property
@pulumi.getter(name="flakyTestAttempts")
def flaky_test_attempts(self) -> Optional[pulumi.Input[int]]:
"""
The number of times a TestExecution should be re-attempted if one or more of its test cases fail for any reason. The maximum number of reruns allowed is 10. Default is 0, which implies no reruns.
"""
return pulumi.get(self, "flaky_test_attempts")
@flaky_test_attempts.setter
def flaky_test_attempts(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "flaky_test_attempts", value)
@property
@pulumi.getter
def project(self) -> Optional[pulumi.Input[str]]:
"""
The cloud project that owns the test matrix.
"""
return pulumi.get(self, "project")
@project.setter
def project(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "project", value)
@property
@pulumi.getter(name="requestId")
def request_id(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "request_id")
@request_id.setter
def request_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "request_id", value)
class TestMatrix(pulumi.CustomResource):
@overload
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
client_info: Optional[pulumi.Input[pulumi.InputType['ClientInfoArgs']]] = None,
environment_matrix: Optional[pulumi.Input[pulumi.InputType['EnvironmentMatrixArgs']]] = None,
fail_fast: Optional[pulumi.Input[bool]] = None,
flaky_test_attempts: Optional[pulumi.Input[int]] = None,
project: Optional[pulumi.Input[str]] = None,
request_id: Optional[pulumi.Input[str]] = None,
result_storage: Optional[pulumi.Input[pulumi.InputType['ResultStorageArgs']]] = None,
test_specification: Optional[pulumi.Input[pulumi.InputType['TestSpecificationArgs']]] = None,
__props__=None):
"""
Creates and runs a matrix of tests according to the given specifications. Unsupported environments will be returned in the state UNSUPPORTED. A test matrix is limited to use at most 2000 devices in parallel. May return any of the following canonical error codes: - PERMISSION_DENIED - if the user is not authorized to write to project - INVALID_ARGUMENT - if the request is malformed or if the matrix tries to use too many simultaneous devices.
Auto-naming is currently not supported for this resource.
Note - this resource's API doesn't support deletion. When deleted, the resource will persist
on Google Cloud even though it will be deleted from Pulumi state.
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[pulumi.InputType['ClientInfoArgs']] client_info: Information about the client which invoked the test.
:param pulumi.Input[pulumi.InputType['EnvironmentMatrixArgs']] environment_matrix: The devices the tests are being executed on.
:param pulumi.Input[bool] fail_fast: If true, only a single attempt at most will be made to run each execution/shard in the matrix. Flaky test attempts are not affected. Normally, 2 or more attempts are made if a potential infrastructure issue is detected. This feature is for latency sensitive workloads. The incidence of execution failures may be significantly greater for fail-fast matrices and support is more limited because of that expectation.
:param pulumi.Input[int] flaky_test_attempts: The number of times a TestExecution should be re-attempted if one or more of its test cases fail for any reason. The maximum number of reruns allowed is 10. Default is 0, which implies no reruns.
:param pulumi.Input[str] project: The cloud project that owns the test matrix.
:param pulumi.Input[pulumi.InputType['ResultStorageArgs']] result_storage: Where the results for the matrix are written.
:param pulumi.Input[pulumi.InputType['TestSpecificationArgs']] test_specification: How to run the test.
"""
...
@overload
def __init__(__self__,
resource_name: str,
args: TestMatrixArgs,
opts: Optional[pulumi.ResourceOptions] = None):
"""
Creates and runs a matrix of tests according to the given specifications. Unsupported environments will be returned in the state UNSUPPORTED. A test matrix is limited to use at most 2000 devices in parallel. May return any of the following canonical error codes: - PERMISSION_DENIED - if the user is not authorized to write to project - INVALID_ARGUMENT - if the request is malformed or if the matrix tries to use too many simultaneous devices.
Auto-naming is currently not supported for this resource.
Note - this resource's API doesn't support deletion. When deleted, the resource will persist
on Google Cloud even though it will be deleted from Pulumi state.
:param str resource_name: The name of the resource.
:param TestMatrixArgs args: The arguments to use to populate this resource's properties.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
...
def __init__(__self__, resource_name: str, *args, **kwargs):
resource_args, opts = _utilities.get_resource_args_opts(TestMatrixArgs, pulumi.ResourceOptions, *args, **kwargs)
if resource_args is not None:
__self__._internal_init(resource_name, opts, **resource_args.__dict__)
else:
__self__._internal_init(resource_name, *args, **kwargs)
def _internal_init(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
client_info: Optional[pulumi.Input[pulumi.InputType['ClientInfoArgs']]] = None,
environment_matrix: Optional[pulumi.Input[pulumi.InputType['EnvironmentMatrixArgs']]] = None,
fail_fast: Optional[pulumi.Input[bool]] = None,
flaky_test_attempts: Optional[pulumi.Input[int]] = None,
project: Optional[pulumi.Input[str]] = None,
request_id: Optional[pulumi.Input[str]] = None,
result_storage: Optional[pulumi.Input[pulumi.InputType['ResultStorageArgs']]] = None,
test_specification: Optional[pulumi.Input[pulumi.InputType['TestSpecificationArgs']]] = None,
__props__=None):
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = _utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = TestMatrixArgs.__new__(TestMatrixArgs)
__props__.__dict__["client_info"] = client_info
if environment_matrix is None and not opts.urn:
raise TypeError("Missing required property 'environment_matrix'")
__props__.__dict__["environment_matrix"] = environment_matrix
__props__.__dict__["fail_fast"] = fail_fast
__props__.__dict__["flaky_test_attempts"] = flaky_test_attempts
__props__.__dict__["project"] = project
__props__.__dict__["request_id"] = request_id
if result_storage is None and not opts.urn:
raise TypeError("Missing required property 'result_storage'")
__props__.__dict__["result_storage"] = result_storage
if test_specification is None and not opts.urn:
raise TypeError("Missing required property 'test_specification'")
__props__.__dict__["test_specification"] = test_specification
__props__.__dict__["invalid_matrix_details"] = None
__props__.__dict__["outcome_summary"] = None
__props__.__dict__["state"] = None
__props__.__dict__["test_executions"] = None
__props__.__dict__["test_matrix_id"] = None
__props__.__dict__["timestamp"] = None
super(TestMatrix, __self__).__init__(
'google-native:testing/v1:TestMatrix',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name: str,
id: pulumi.Input[str],
opts: Optional[pulumi.ResourceOptions] = None) -> 'TestMatrix':
"""
Get an existing TestMatrix resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param pulumi.Input[str] id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = TestMatrixArgs.__new__(TestMatrixArgs)
__props__.__dict__["client_info"] = None
__props__.__dict__["environment_matrix"] = None
__props__.__dict__["fail_fast"] = None
__props__.__dict__["flaky_test_attempts"] = None
__props__.__dict__["invalid_matrix_details"] = None
__props__.__dict__["outcome_summary"] = None
__props__.__dict__["project"] = None
__props__.__dict__["result_storage"] = None
__props__.__dict__["state"] = None
__props__.__dict__["test_executions"] = None
__props__.__dict__["test_matrix_id"] = None
__props__.__dict__["test_specification"] = None
__props__.__dict__["timestamp"] = None
return TestMatrix(resource_name, opts=opts, __props__=__props__)
@property
@pulumi.getter(name="clientInfo")
def client_info(self) -> pulumi.Output['outputs.ClientInfoResponse']:
"""
Information about the client which invoked the test.
"""
return pulumi.get(self, "client_info")
@property
@pulumi.getter(name="environmentMatrix")
def environment_matrix(self) -> pulumi.Output['outputs.EnvironmentMatrixResponse']:
"""
The devices the tests are being executed on.
"""
return pulumi.get(self, "environment_matrix")
@property
@pulumi.getter(name="failFast")
def fail_fast(self) -> pulumi.Output[bool]:
"""
If true, only a single attempt at most will be made to run each execution/shard in the matrix. Flaky test attempts are not affected. Normally, 2 or more attempts are made if a potential infrastructure issue is detected. This feature is for latency sensitive workloads. The incidence of execution failures may be significantly greater for fail-fast matrices and support is more limited because of that expectation.
"""
return pulumi.get(self, "fail_fast")
@property
@pulumi.getter(name="flakyTestAttempts")
def flaky_test_attempts(self) -> pulumi.Output[int]:
"""
The number of times a TestExecution should be re-attempted if one or more of its test cases fail for any reason. The maximum number of reruns allowed is 10. Default is 0, which implies no reruns.
"""
return pulumi.get(self, "flaky_test_attempts")
@property
@pulumi.getter(name="invalidMatrixDetails")
def invalid_matrix_details(self) -> pulumi.Output[str]:
"""
Describes why the matrix is considered invalid. Only useful for matrices in the INVALID state.
"""
return pulumi.get(self, "invalid_matrix_details")
@property
@pulumi.getter(name="outcomeSummary")
def outcome_summary(self) -> pulumi.Output[str]:
"""
Output Only. The overall outcome of the test. Only set when the test matrix state is FINISHED.
"""
return pulumi.get(self, "outcome_summary")
@property
@pulumi.getter
def project(self) -> pulumi.Output[str]:
"""
The cloud project that owns the test matrix.
"""
return pulumi.get(self, "project")
@property
@pulumi.getter(name="resultStorage")
def result_storage(self) -> pulumi.Output['outputs.ResultStorageResponse']:
"""
Where the results for the matrix are written.
"""
return pulumi.get(self, "result_storage")
@property
@pulumi.getter
def state(self) -> pulumi.Output[str]:
"""
Indicates the current progress of the test matrix.
"""
return pulumi.get(self, "state")
@property
@pulumi.getter(name="testExecutions")
def test_executions(self) -> pulumi.Output[Sequence['outputs.TestExecutionResponse']]:
"""
The list of test executions that the service creates for this matrix.
"""
return pulumi.get(self, "test_executions")
@property
@pulumi.getter(name="testMatrixId")
def test_matrix_id(self) -> pulumi.Output[str]:
"""
Unique id set by the service.
"""
return pulumi.get(self, "test_matrix_id")
@property
@pulumi.getter(name="testSpecification")
def test_specification(self) -> pulumi.Output['outputs.TestSpecificationResponse']:
"""
How to run the test.
"""
return pulumi.get(self, "test_specification")
@property
@pulumi.getter
def timestamp(self) -> pulumi.Output[str]:
"""
The time this test matrix was initially created.
"""
return pulumi.get(self, "timestamp")
| 1.445313 | 1 |
src/sot_talos_balance/test/test_feet_admittance.py | imaroger/sot-talos-balance | 0 | 3687 | '''Test feet admittance control'''
from sot_talos_balance.utils.run_test_utils import run_ft_calibration, run_test, runCommandClient
try:
# Python 2
input = raw_input # noqa
except NameError:
pass
run_test('appli_feet_admittance.py')
run_ft_calibration('robot.ftc')
input("Wait before running the test")
print('Set saturation value')
runCommandClient('robot.admBF_dqSaturation.sin.value = [0.0, 0.0, 0.01, 0.0, 0.0, 0.0]')
input("Wait before dumping the data")
runCommandClient('dump_tracer(robot.tracer)')
| 0.933594 | 1 |
apex/fp16_utils/fused_weight_norm.py | mcarilli/apex | 1 | 3695 | import torch
from torch.autograd import Variable
from torch.autograd.function import Function, once_differentiable
import apex_C
def check_contig_cuda(tensors, names):
for tensor, name in zip(tensors, names):
if not tensor.is_contiguous():
raise RuntimeError(name+" with size {} is not contiguous"
.format(tensor.size()))
if not tensor.is_cuda:
raise RuntimeError(name+".is_cuda = False."
"Currently, only cuda tensors are supported.")
class Fused_Weight_Norm(Function):
"""
Custom autograd function that implements weight norm, as presented in
`<https://arxiv.org/abs/1602.07868>`_,
along a tensor's slowest or
fastest dimension using fused kernel launches for the forward and backward passes.
Accepts fp32 or fp16 input; the output type will match the input type.
Within the kernels, all calculations are performed in fp32 for numerical stability, regardless
of input/output precision.
"""
@staticmethod
def forward(ctx, input, g, dim=0):
"""
Args:
input(torch.cuda.FloatTensor or torch.cuda.HalfTensor): input tensor corresponding to **v** in the paper. ``input`` should be contiguous.
g(torch.cuda.FloatTensor or torch.cuda.HalfTensor): input tensor corresponding to **g** in the paper. ``g`` should be the same type as ``input``.
dim(int, optional, default=0): Dimension across which to perform weightnorm. Currently, only the first or last dimension of the input tensor is supported.
Returns:
Output tensor corresponding to **w** in the paper. Output type and precision will match
type and precision of ``input``.
"""
# torch.cuda.nvtx.range_push("FusedNorm.forward, input.size() = {}"
# .format(input.size()))
check_contig_cuda((input,g),("input","g"))
"""
This is ok, new() treats a torch.Size object properly.
No need to unpack with an asterisk via new(*input.size()).
"""
output = input.new(input.size()).contiguous()
"""
For output with size (slow, faster, faster, ...fastest), we want
norms with size (slow, 1, 1, ...1), so that if you want retrieve norms
and apply the same normalizing factors to another Tensor "t" with the
same size as output, "t/norms" will broadcast each element of norms
across the corresponding slowest dim of t.
"""
if dim == 0:
norm_size = (output.size(0),) + (1,)*(output.dim() - 1)
elif dim == output.dim() - 1:
norm_size = (1,)*(output.dim() - 1) + (output.size(-1),)
else:
raise RuntimeError("Currently, Fused_Weight_Norm only supports first or last dimension.")
norms = torch.cuda.FloatTensor(*norm_size).contiguous()
"""
Beware: If you call the following:
norms = torch.cuda.FloatTensor(norm_size).contiguous()
the constructor sees a tuple:
FloatTensor( (output_size(0),1,1,...) )
and creates a 1D tensor with values from the tuple:
[output_size(0),1,1,...].
"""
apex_C.weight_norm_fwd(output, norms, input, g, dim)
ctx.save_for_backward(input, g)
# save_for_backward can only save input or output tensors,
# use ctx state to save the norms and dimension:
ctx.norms = norms
ctx.dim = dim
return output
@staticmethod
@once_differentiable
def backward(ctx, grad_output):
"""
Args:
grad_output(torch.cuda.FloatTensor or torch.cuda.HalfTensor): Gradient of loss with respect to output **w**. ``grad_output`` should be contiguous for performance.
Returns:
Gradient of loss with respect to ``input`` and ``g``. The precision of these gradients will match the precision of ``grad_input``.
"""
check_contig_cuda((grad_output), ("grad_output"))
savedInput, savedg = ctx.saved_tensors
savedNorms = ctx.norms
# We expect that these .contiguous() calls will be no-ops. They're present for safety.
grad_output_contig = grad_output.contiguous()
grad_input = grad_output_contig.new(grad_output.size()).contiguous()
grad_g = savedg.new(savedg.size()).contiguous()
apex_C.weight_norm_bwd(grad_input,
grad_g,
grad_output_contig,
savedInput,
savedg,
savedNorms,
ctx.dim)
return grad_input, grad_g, None
| 2.5 | 2 |
Sorting/insertion_sort.py | lakshyarawal/pythonPractice | 0 | 3711 | """ Insertion Sort Algorithm:"""
"""Implementation"""
def insertion_sort(arr) -> list:
n = len(arr)
for i in range(1, n):
swap_index = i
for j in range(i-1, -1, -1):
if arr[swap_index] < arr[j]:
arr[swap_index], arr[j] = arr[j], arr[swap_index]
swap_index -= 1
else:
break
return arr
def main():
arr_input = [10, 5, 30, 1, 2, 5, 10, 10]
a2 = insertion_sort(arr_input)
print(a2)
# Using the special variable
# __name__
if __name__ == "__main__":
main()
| 2.75 | 3 |
test/test_simple_compression.py | jayvdb/brotlipy | 0 | 3735 | # -*- coding: utf-8 -*-
"""
test_simple_compression
~~~~~~~~~~~~~~~~~~~~~~~~~
Tests for compression of single chunks.
"""
import brotli
import pytest
from hypothesis import given
from hypothesis.strategies import binary, integers, sampled_from, one_of
def test_roundtrip_compression_with_files(simple_compressed_file):
"""
Roundtripping data through the compressor works correctly.
"""
with open(simple_compressed_file[0], 'rb') as f:
uncompressed_data = f.read()
assert brotli.decompress(
brotli.compress(uncompressed_data)
) == uncompressed_data
@given(
chunk_size=integers(min_value=1, max_value=2**12),
mode=sampled_from(list(brotli.BrotliEncoderMode)),
quality=integers(min_value=0, max_value=11),
lgwin=integers(min_value=10, max_value=24),
lgblock=one_of(
integers(min_value=0, max_value=0),
integers(min_value=16, max_value=24)
),
)
def test_streaming_compression(one_compressed_file,
chunk_size,
mode,
quality,
lgwin,
lgblock):
"""
Confirm that the streaming compressor works as expected.
"""
compressed_chunks = []
c = brotli.Compressor(
mode=mode, quality=quality, lgwin=lgwin, lgblock=lgblock
)
with open(one_compressed_file, 'rb') as f:
while True:
next_data = f.read(chunk_size)
if not next_data:
break
compressed_chunks.append(c.compress(next_data))
compressed_chunks.append(c.finish())
decompressed = brotli.decompress(b''.join(compressed_chunks))
with open(one_compressed_file, 'rb') as f:
assert decompressed == f.read()
@given(
chunk_size=integers(min_value=1, max_value=2**12),
mode=sampled_from(list(brotli.BrotliEncoderMode)),
quality=integers(min_value=0, max_value=11),
lgwin=integers(min_value=10, max_value=24),
lgblock=one_of(
integers(min_value=0, max_value=0),
integers(min_value=16, max_value=24)
),
)
def test_streaming_compression_flush(one_compressed_file,
chunk_size,
mode,
quality,
lgwin,
lgblock):
"""
Confirm that the streaming compressor works as expected, including flushes
after each chunk.
"""
compressed_chunks = []
c = brotli.Compressor(
mode=mode, quality=quality, lgwin=lgwin, lgblock=lgblock
)
with open(one_compressed_file, 'rb') as f:
while True:
next_data = f.read(chunk_size)
if not next_data:
break
compressed_chunks.append(c.compress(next_data))
compressed_chunks.append(c.flush())
compressed_chunks.append(c.finish())
decompressed = brotli.decompress(b''.join(compressed_chunks))
with open(one_compressed_file, 'rb') as f:
assert decompressed == f.read()
@given(binary())
def test_compressed_data_roundtrips(s):
assert brotli.decompress(brotli.compress(s)) == s
@given(binary(), binary())
def test_compressed_data_with_dictionaries(s, dictionary):
d = brotli.Decompressor(dictionary)
compressed = brotli.compress(s, dictionary=dictionary)
uncompressed = d.decompress(compressed)
assert uncompressed == s
@pytest.mark.parametrize(
"params",
[
{"mode": 52},
{"quality": 52},
{"lgwin": 52},
{"lgblock": 52},
]
)
@pytest.mark.parametrize("exception_cls", [brotli.Error, brotli.error])
def test_bad_compressor_parameters(params, exception_cls):
with pytest.raises(exception_cls):
brotli.Compressor(**params)
| 1.914063 | 2 |
lib/python3.7/site-packages/ldap/controls/deref.py | aonrobot/MSC-thug-auth-provider | 1 | 3751 | # -*- coding: utf-8 -*-
"""
ldap.controls.deref - classes for
(see https://tools.ietf.org/html/draft-masarati-ldap-deref)
See https://www.python-ldap.org/ for project details.
"""
__all__ = [
'DEREF_CONTROL_OID',
'DereferenceControl',
]
import ldap.controls
from ldap.controls import LDAPControl,KNOWN_RESPONSE_CONTROLS
import pyasn1_modules.rfc2251
from pyasn1.type import namedtype,univ,tag
from pyasn1.codec.ber import encoder,decoder
from pyasn1_modules.rfc2251 import LDAPDN,AttributeDescription,AttributeDescriptionList,AttributeValue
DEREF_CONTROL_OID = '1.3.6.1.4.1.4203.666.5.16'
# Request types
#---------------------------------------------------------------------------
# For compatibility with ASN.1 declaration in I-D
AttributeList = AttributeDescriptionList
class DerefSpec(univ.Sequence):
componentType = namedtype.NamedTypes(
namedtype.NamedType(
'derefAttr',
AttributeDescription()
),
namedtype.NamedType(
'attributes',
AttributeList()
),
)
class DerefSpecs(univ.SequenceOf):
componentType = DerefSpec()
# Response types
#---------------------------------------------------------------------------
class AttributeValues(univ.SetOf):
componentType = AttributeValue()
class PartialAttribute(univ.Sequence):
componentType = namedtype.NamedTypes(
namedtype.NamedType('type', AttributeDescription()),
namedtype.NamedType('vals', AttributeValues()),
)
class PartialAttributeList(univ.SequenceOf):
componentType = PartialAttribute()
tagSet = univ.Sequence.tagSet.tagImplicitly(
tag.Tag(tag.tagClassContext,tag.tagFormatConstructed,0)
)
class DerefRes(univ.Sequence):
componentType = namedtype.NamedTypes(
namedtype.NamedType('derefAttr', AttributeDescription()),
namedtype.NamedType('derefVal', LDAPDN()),
namedtype.OptionalNamedType('attrVals', PartialAttributeList()),
)
class DerefResultControlValue(univ.SequenceOf):
componentType = DerefRes()
class DereferenceControl(LDAPControl):
controlType = DEREF_CONTROL_OID
def __init__(self,criticality=False,derefSpecs=None):
LDAPControl.__init__(self,self.controlType,criticality)
self.derefSpecs = derefSpecs or {}
def _derefSpecs(self):
deref_specs = DerefSpecs()
i = 0
for deref_attr,deref_attribute_names in self.derefSpecs.items():
deref_spec = DerefSpec()
deref_attributes = AttributeList()
for j in range(len(deref_attribute_names)):
deref_attributes.setComponentByPosition(j,deref_attribute_names[j])
deref_spec.setComponentByName('derefAttr',AttributeDescription(deref_attr))
deref_spec.setComponentByName('attributes',deref_attributes)
deref_specs.setComponentByPosition(i,deref_spec)
i += 1
return deref_specs
def encodeControlValue(self):
return encoder.encode(self._derefSpecs())
def decodeControlValue(self,encodedControlValue):
decodedValue,_ = decoder.decode(encodedControlValue,asn1Spec=DerefResultControlValue())
self.derefRes = {}
for deref_res in decodedValue:
deref_attr,deref_val,deref_vals = deref_res[0],deref_res[1],deref_res[2]
partial_attrs_dict = {
str(tv[0]): [str(v) for v in tv[1]]
for tv in deref_vals or []
}
try:
self.derefRes[str(deref_attr)].append((str(deref_val),partial_attrs_dict))
except KeyError:
self.derefRes[str(deref_attr)] = [(str(deref_val),partial_attrs_dict)]
KNOWN_RESPONSE_CONTROLS[DereferenceControl.controlType] = DereferenceControl
| 1.304688 | 1 |
wagtail/wagtailadmin/menu.py | digitalmarmalade/wagtail | 1 | 3759 | from django.utils.text import slugify
from django.utils.html import format_html
class MenuItem(object):
def __init__(self, label, url, name=None, classnames='', order=1000):
self.label = label
self.url = url
self.classnames = classnames
self.name = (name or slugify(unicode(label)))
self.order = order
def render_html(self):
return format_html(
u"""<li class="menu-{0}"><a href="{1}" class="{2}">{3}</a></li>""",
self.name, self.url, self.classnames, self.label)
| 1.546875 | 2 |
tests/test_dice.py | mehulsatardekar/dice-on-demand | 1 | 3799 | import unittest
import app
def test_test():
assert app.test() == "Works!"
| 0.636719 | 1 |
fetch_data.py | bitfag/bt-macd-binance | 0 | 3815 | #!/usr/bin/env python
from btmacd.binance_fetcher import BinanceFetcher
def main():
fetcher = BinanceFetcher("BTCUSDT", filename="binance_ohlc.csv", start_date="01.01.2018")
fetcher.fetch()
if __name__ == "__main__":
main()
| 1.0625 | 1 |
matrixprofile/algorithms/snippets.py | KSaiRahul21/matrixprofile | 0 | 3831 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
range = getattr(__builtins__, 'xrange', range)
# end of py2 compatability boilerplate
import numpy as np
from matrixprofile import core
from matrixprofile.algorithms.mpdist import mpdist_vector
def snippets(ts, snippet_size, num_snippets=2, window_size=None):
"""
The snippets algorithm is used to summarize your time series by
identifying N number of representative subsequences. If you want to
identify typical patterns in your time series, then this is the algorithm
to use.
Parameters
----------
ts : array_like
The time series.
snippet_size : int
The size of snippet desired.
num_snippets : int, Default 2
The number of snippets you would like to find.
window_size : int, Default (snippet_size / 2)
The window size.
Returns
-------
list : snippets
A list of snippets as dictionary objects with the following structure.
>>> {
>>> fraction: fraction of the snippet,
>>> index: the index of the snippet,
>>> snippet: the snippet values
>>> }
"""
ts = core.to_np_array(ts).astype('d')
n = len(ts)
if not isinstance(snippet_size, int) or snippet_size < 4:
raise ValueError('snippet_size must be an integer >= 4')
if n < (2 * snippet_size):
raise ValueError('Time series is too short relative to snippet length')
if not window_size:
window_size = int(np.floor(snippet_size / 2))
if window_size >= snippet_size:
raise ValueError('window_size must be smaller than snippet_size')
# pad end of time series with zeros
num_zeros = int(snippet_size * np.ceil(n / snippet_size) - n)
ts = np.append(ts, np.zeros(num_zeros))
# compute all profiles
indices = np.arange(0, len(ts) - snippet_size, snippet_size)
distances = []
for j, i in enumerate(indices):
distance = mpdist_vector(ts, ts[i:(i + snippet_size - 1)], int(window_size))
distances.append(distance)
distances = np.array(distances)
# find N snippets
snippets = []
minis = np.inf
total_min = None
for n in range(num_snippets):
minims = np.inf
for i in range(len(indices)):
s = np.sum(np.minimum(distances[i, :], minis))
if minims > s:
minims = s
index = i
minis = np.minimum(distances[index, :], minis)
actual_index = indices[index]
snippet = ts[actual_index:actual_index + snippet_size]
snippet_distance = distances[index]
snippets.append({
'index': actual_index,
'snippet': snippet,
'distance': snippet_distance
})
if isinstance(total_min, type(None)):
total_min = snippet_distance
else:
total_min = np.minimum(total_min, snippet_distance)
# compute the fraction of each snippet
for snippet in snippets:
mask = (snippet['distance'] <= total_min)
snippet['fraction'] = mask.sum() / (len(ts) - snippet_size)
total_min = total_min - mask
del snippet['distance']
return snippets
| 2.984375 | 3 |
core/views.py | moiyad/image | 0 | 3839 | from django.core.files.storage import FileSystemStorage
from django.shortcuts import render, redirect
from core.forms import DocumentForm
from core.models import Document
from media import image_cv2
def home(request):
documents = Document.objects.all()
number = len(image_cv2.myList)
return render(request, 'core/home.html', {'documents': documents, 'number': number})
def simple_upload(request):
if request.method == 'POST' and request.FILES['myfile']:
myfile = request.FILES['myfile']
fs = FileSystemStorage()
filename = fs.save(myfile.name, myfile)
uploaded_file_url = fs.url(filename)
return render(request, 'core/simple_upload.html', {
'uploaded_file_url': uploaded_file_url
})
return render(request, 'core/simple_upload.html')
def model_form_upload(request):
if request.method == 'POST':
form = DocumentForm(request.POST, request.FILES)
if form.is_valid():
form.save()
return redirect('home')
else:
form = DocumentForm()
return render(request, 'core/model_form_upload.html', {
'form': form
})
| 1.390625 | 1 |
scipy/weave/examples/swig2_example.py | lesserwhirls/scipy-cwt | 8 | 3863 | """Simple example to show how to use weave.inline on SWIG2 wrapped
objects. SWIG2 refers to SWIG versions >= 1.3.
To run this example you must build the trivial SWIG2 extension called
swig2_ext. To do this you need to do something like this::
$ swig -c++ -python -I. -o swig2_ext_wrap.cxx swig2_ext.i
$ g++ -Wall -O2 -I/usr/include/python2.3 -fPIC -I. -c \
-o swig2_ext_wrap.os swig2_ext_wrap.cxx
$ g++ -shared -o _swig2_ext.so swig2_ext_wrap.os \
-L/usr/lib/python2.3/config
The files swig2_ext.i and swig2_ext.h are included in the same
directory that contains this file.
Note that weave's SWIG2 support works fine whether SWIG_COBJECT_TYPES
are used or not.
Author: <NAME>
Copyright (c) 2004, <NAME>
License: BSD Style.
"""
# Import our SWIG2 wrapped library
import swig2_ext
import scipy.weave as weave
from scipy.weave import swig2_spec, converters
# SWIG2 support is not enabled by default. We do this by adding the
# swig2 converter to the default list of converters.
converters.default.insert(0, swig2_spec.swig2_converter())
def test():
"""Instantiate the SWIG wrapped object and then call its method
from C++ using weave.inline
"""
a = swig2_ext.A()
b = swig2_ext.foo() # This will be an APtr instance.
b.thisown = 1 # Prevent memory leaks.
code = """a->f();
b->f();
"""
weave.inline(code, ['a', 'b'], include_dirs=['.'],
headers=['"swig2_ext.h"'], verbose=1)
if __name__ == "__main__":
test()
| 1.476563 | 1 |
src/cms/forms/languages/language_form.py | S10MC2015/cms-django | 0 | 3879 | from django import forms
from ...models import Language
class LanguageForm(forms.ModelForm):
"""
Form for creating and modifying language objects
"""
class Meta:
model = Language
fields = [
"code",
"english_name",
"native_name",
"text_direction",
]
| 1.273438 | 1 |
aspx2url/aspx2url.py | marcocucinato/aspx2url | 0 | 3895 | from __future__ import print_function
import re, sys, glob, getopt, os
def usage():
print('aspx2url v1.0')
print('Usage:')
print(sys.argv[0]+' -d -h filename(s)')
print('-d : Delete original file')
print('-h : This help')
def main():
try:
opts, args = getopt.getopt(sys.argv[1:], "hd")
except getopt.GetoptError as err:
print(str(err))
usage()
sys.exit(2)
deleteOriginal = False
for option,value in opts:
if option == '-h':
usage()
sys.exit()
elif option == '-d':
deleteOriginal = True
for origFilename in args:
with open(origFilename, "r") as f:
html_doc = f.read()
prog = re.compile('\<mso\:URL.*?\>(.*?),.*?\<\/mso\:URL\>', re.M)
result = prog.search(html_doc)
url = result.group(1);
filename = re.search('(.*?)\.aspx',origFilename).group(1)
fullFilename = filename+'.url'
with open(fullFilename, 'w') as out:
out.write('[InternetShortcut]\n')
out.write('URL='+url)
out.write('\n')
if deleteOriginal:
os.remove(origFilename)
if __name__ == '__main__':
main()
| 1.796875 | 2 |
src/consensus.py | dschwoerer/samscripts | 0 | 3903 | #! /usr/bin/env python
# Copyright <NAME>, 2015. www.sovic.org
#
# Creates a pileup from a given SAM/BAM file, and calls consensus bases (or variants).
import os
import sys
import operator
import subprocess
def increase_in_dict(dict_counter, value):
try:
dict_counter[value] += 1
except:
dict_counter[value] = 1
def process_mpileup_line(
line,
line_number,
ret_variant_list,
ret_vcf_list,
ret_snp_count,
ret_insertion_count,
ret_deletion_count,
ret_num_undercovered_bases,
ret_num_called_bases,
ret_num_correct_bases,
ret_coverage_sum,
coverage_threshold,
verbose=False,
):
# Split the line, and perform a sanity check.
split_line = line.strip().split("\t")
if len(split_line) < 5 or len(split_line) > 6:
sys.stderr.write(line + "\n")
return 0
ref_name = split_line[0]
position = split_line[1]
ref_base = split_line[2]
coverage = split_line[3]
original_bases = split_line[4]
if len(split_line) == 6:
qualities = split_line[5]
bases = ""
# Replace the '.' and ',' signs with the actual reference base.
i = 0
while i < len(original_bases):
if original_bases[i] == "." or original_bases[i] == ",":
bases += ref_base
else:
bases += original_bases[i]
i += 1
base_counts = {}
insertion_count = 0
current_base_deletion_count = 0
deletion_count = 0
insertion_event_counts = {}
deletion_event_counts = {}
end_counts = 0
# print 'position: %s' % position;
# print 'bases: "%s"' % bases;
# print 'line_number: %d' % line_number;
# print line;
# print '';
# sys.stdout.flush();
i = 0
while i < len(bases):
base = bases[i]
if base == r"^":
# This is the starting position of a read. It encodes two
# symbols: '^' marking the read start and a char marking the
# mapping quality of the read.
# increase_in_dict(base_counts, bases[i + 1].upper());
i += 1
# Increase only by 1, because we have i += 1 down there.
elif base == r"$":
# This marks the end of a read.
end_counts += 1
elif base == r"*":
# This is a deletion, just count it.
current_base_deletion_count += 1
elif base == r"-":
# This marks the occurance of deletions. It is a composite object
# consisting of: the special character '-', the number of the deleted bases
# and the actual bases that are deleted (these bases follow the current position).
# In our approach, we ignore this case, because we count deletions one by one
# through the '*' character.
# Get the number of bases that need to be skipped in the string.
j = i + 1
while bases[j] in "0123456789":
j += 1
num_bases = int(bases[(i + 1) : j])
skip_bases = (j - i) + num_bases - 1
deletion_count += 1
deletion = bases[j : (j + num_bases)].upper()
increase_in_dict(deletion_event_counts, deletion)
# Skip the length of the numeric entry plus the actual number of bases
# that need to be skipped.
i += skip_bases
elif base == r"+":
# This marks the occurance of an insertion. It is a composite object
# consisting of: the special character '+', the number of the inserted bases
# and the actual bases that are inserted (these bases follow the current position).
# Similar to the deletion marking, but here we actually care about the bases,
# and we need to make an allele aware count.
# Get the number of bases that are inserted;
j = i + 1
while bases[j] in "0123456789":
j += 1
num_bases = int(bases[(i + 1) : j])
skip_bases = (j - i) + num_bases - 1
insertion_count += 1
insertion = bases[j : (j + num_bases)].upper()
increase_in_dict(insertion_event_counts, insertion)
i += skip_bases
else:
increase_in_dict(base_counts, bases[i].upper())
i += 1
# TODO: An additional problematic case, discovered this on 03.11.2014., when analyzing BWA-MEM's mpileup.
# There are pileup bases that do not have any actual bases, but only the '*' symbols. How should this be handled properly?
# Example line from the mpileup file:
# gi|48994873|gb|U00096.2|_Escherichia_coli_str._K-12_substr._MG1655,_complete_genome 1938202 T 20 ******************** 8,2*#-;)$B>2$1&D-
# I chose to handle them as undercovered bases.
non_indel_coverage_current_base = int(coverage) - current_base_deletion_count
if verbose == True:
sys.stdout.write("%s\nbase_counts: %s\n" % (line.strip(), str(base_counts)))
# EDIT: Previously I compared the total coverage of the current base with the coverage threshold.
# However, the total coverage also accounts for the deletions denoted with the '*' sign, which I think
# isn't relevant, as deletions are counted prior to occuring, and at that point is already decided if there is going
# to be a deletion event. If we wound up at this base (i.e. this base didn't get skipped because of a deletion
# consensus), then the deletions on this base are ignored.
# if (int(coverage) < coverage_threshold or int(coverage) == current_base_deletion_count):
# if (non_indel_coverage_current_base < coverage_threshold):
if int(coverage) < coverage_threshold:
ret_num_undercovered_bases[0] += 1
# ret_coverage_sum[0] += 0;
ret_coverage_sum[0] += int(coverage)
# TODO: Should I count total coverage of this base, or the non_indel_coverage_current_base?
sorted_base_counts = [["A", 0], ["C", 0], ["T", 0], ["G", 0]]
sorted_base_counts = sorted(
list(base_counts.items()), key=operator.itemgetter(1)
)
try:
most_common_base_count = sorted_base_counts[-1][1]
except Exception as e:
most_common_base_count = 0
pass
# variant_line = 'undercovered1\tpos = %s\tcoverage = %d\tnon_indel_cov_curr = %d\tmost_common_base_count = %d\tref_base = %s\tcons_base = %s\tbase_counts = %s\tinsertion_counts = %s\tdeletion_counts = %s\t%s' % (position, int(coverage), non_indel_coverage_current_base, most_common_base_count, ref_base, sorted_base_counts[-1][0], str(sorted_base_counts), str(insertion_event_counts), str(deletion_event_counts), line.strip());
# ret_variant_list.append(variant_line);
variant_line = (
"undercovered1\tpos = %s\tref = %s\tcoverage = %d\tbase_counts = %s\tinsertion_counts = %s\tdeletion_counts = %s"
% (
position,
ref_name,
int(coverage),
str(sorted_base_counts),
str(insertion_event_counts),
str(deletion_event_counts),
)
)
ret_variant_list.append(variant_line)
### VCF output ###
qual = 1000
info = "DP=%s;TYPE=snp" % (coverage)
ref_field = ref_base
alt_field = "N"
vcf_line = "%s\t%s\t.\t%s\t%s\t%d\tPASS\t%s" % (
ref_name,
position,
ref_field,
alt_field,
qual,
info,
)
ret_vcf_list.append(vcf_line)
##################
else:
ret_num_called_bases[0] += 1
ret_coverage_sum[0] += int(coverage)
# TODO: Should I count total coverage of this base, or the non_indel_coverage_current_base?
most_common_base_count = 0
### Handling base consensus.
sorted_base_counts = sorted(
list(base_counts.items()), key=operator.itemgetter(1)
)
try:
most_common_base_count = sorted_base_counts[-1][1]
except Exception as e:
pass
# sys.stderr.write(str(e) + '\n');
# sys.stderr.write('sorted_base_counts:\n');
# sys.stderr.write(str(sorted_base_counts) + '\n');
# sys.stderr.write('base_counts:\n');
# sys.stderr.write(str(base_counts) + '\n');
# sys.stderr.write('original_bases:\n');
# sys.stderr.write(str(original_bases) + '\n');
# sys.stderr.write('line:\n');
# sys.stderr.write(line.strip() + '\n');
# most_common_base_count = 0;
# Allow for the case where there are multiple equally good choices.
# In this case, we prefer the choice which is equal to the reference.
is_good = False
for base_count in sorted_base_counts:
if base_count[1] == most_common_base_count:
if base_count[0] == ref_base:
is_good = True
break
if is_good == False:
if len(sorted_base_counts) > 0:
ret_snp_count[0] += 1
# ret_variant_list.append(line_number);
variant_line = (
"SNP\tpos = %s\tref = %s\tcoverage = %d\tnon_indel_cov_curr = %d\tmost_common_base_count = %d\tref_base = %s\tcons_base = %s\tbase_counts = %s\tinsertion_counts = %s\tdeletion_counts = %s\t%s"
% (
position,
ref_name,
int(coverage),
non_indel_coverage_current_base,
most_common_base_count,
ref_base,
("{}")
if (len(sorted_base_counts) == 0)
else (str(sorted_base_counts[-1][0])),
str(sorted_base_counts),
str(insertion_event_counts),
str(deletion_event_counts),
line.strip(),
)
)
ret_variant_list.append(variant_line)
### VCF output ###
alt_base = (
("{}")
if (len(sorted_base_counts) == 0)
else (str(sorted_base_counts[-1][0]))
)
qual = 1000
info = "DP=%s;TYPE=snp" % (coverage)
ref_field = ref_base
alt_field = alt_base
vcf_line = "%s\t%s\t.\t%s\t%s\t%d\tPASS\t%s" % (
ref_name,
position,
ref_field,
alt_field,
qual,
info,
)
ret_vcf_list.append(vcf_line)
##################
else:
sys.stderr.write(
"\nWarning: a SNP was detected, but there were no bases in the sorted_base_counts!"
)
variant_line = (
"SNP\tpos = %s\tref = %s\tcoverage = %d\tnon_indel_cov_curr = %d\tmost_common_base_count = %d\tref_base = %s\tcons_base = %s\tbase_counts = %s\tinsertion_counts = %s\tdeletion_counts = %s\t%s"
% (
position,
ref_name,
int(coverage),
non_indel_coverage_current_base,
most_common_base_count,
ref_base,
("{}")
if (len(sorted_base_counts) == 0)
else (str(sorted_base_counts[-1][0])),
str(sorted_base_counts),
str(insertion_event_counts),
str(deletion_event_counts),
line.strip(),
)
)
sys.stderr.write("\n")
else:
ret_num_correct_bases[0] += 1
if verbose == True:
sys.stdout.write("Reference base: %s\n" % (ref_base))
sys.stdout.write("Consensus base: %s\n\n" % (base_count[0]))
# if (int(position) == 100000 or int(position) == 1000000 or int(position) == 2000000 or int(position) == 3000000 or int(position) == 4000000):
# print '\nTEST\tpos = %s\tcoverage = %d\tnon_indel_cov_curr = %d\tmost_common_base_count = %d\tref_base = %s\tcons_base = %s\tbase_counts = %s\tinsertion_counts = %s\tdeletion_counts = %s\t%s\n' % (position, int(coverage), non_indel_coverage_current_base, most_common_base_count, ref_base, sorted_base_counts[-1][0], str(sorted_base_counts), str(insertion_event_counts), str(deletion_event_counts), line.strip());
### Handling indel consensus.
### Put a different coverage threshold. Here we are interested even in the reads
### which had a '*' at the current position (because we don't know where it ends).
non_indel_coverage_next_base = (
int(coverage) - end_counts - deletion_count - insertion_count
)
if (
non_indel_coverage_next_base + deletion_count + insertion_count
) > coverage_threshold:
# Sanity check, just to see if there actually were any insertions (to avoid index out of bounds error).
# If there are insertions, get the most common one.
if len(list(insertion_event_counts.keys())) > 0:
sorted_insertion_counts = sorted(
list(insertion_event_counts.items()), key=operator.itemgetter(1)
)
most_common_insertion_count = sorted_insertion_counts[-1][1]
most_common_insertion_length = len(sorted_insertion_counts[-1][0])
insertion_unique = (
True
if (
sum(
[
int(insertion_count[1] == most_common_insertion_count)
for insertion_count in sorted_insertion_counts
]
)
== 1
)
else False
)
else:
most_common_insertion_count = 0
most_common_insertion_length = 0
insertion_unique = False
# Sanity check, just to see if there actually were any deletions (to avoid index out of bounds error).
# If there are deletions, get the most common one.
if len(list(deletion_event_counts.keys())) > 0:
sorted_deletion_counts = sorted(
list(deletion_event_counts.items()), key=operator.itemgetter(1)
)
most_common_deletion_count = sorted_deletion_counts[-1][1]
most_common_deletion_length = len(sorted_deletion_counts[-1][0])
deletion_unique = (
True
if (
sum(
[
int(deletion_count[1] == most_common_deletion_count)
for deletion_count in sorted_deletion_counts
]
)
== 1
)
else False
)
else:
most_common_deletion_count = 0
most_common_deletion_length = 0
deletion_unique = False
if (
most_common_insertion_count > most_common_deletion_count
and most_common_insertion_count > non_indel_coverage_next_base
):
# In this case, insertions are a clear winner.
if insertion_unique == True:
# ret_insertion_count[0] += most_common_insertion_length;
ret_insertion_count[0] += 1
ret_num_called_bases[0] += most_common_insertion_length
# variant_line = 'insertion\t%d\t%s\t%s\t%s\t%s' % (most_common_insertion_count, str(sorted_base_counts), str(insertion_event_counts), str(deletion_event_counts), line.strip());
# ret_variant_list.append(variant_line);
try:
temp_sorted_bc = sorted_base_counts[-1][0]
except:
temp_sorted_bc = 0
indel_length = most_common_insertion_length
variant_line = (
"ins\tpos = %s\tref = %s\tnon_indel_cov_next = %d\tnon_indel_cov_curr = %d\tmost_common_insertion_count = %d\tref_base = %s\tcons_base = %s\tbase_counts = %s\tinsertion_counts = %s\tdeletion_counts = %s\t%s"
% (
position,
ref_name,
non_indel_coverage_next_base,
non_indel_coverage_current_base,
most_common_insertion_count,
ref_base,
temp_sorted_bc,
str(sorted_base_counts),
str(insertion_event_counts),
str(deletion_event_counts),
line.strip(),
)
)
ret_variant_list.append(variant_line)
### Insertions in the VCF format specifies the position where a insertion occurs. The ref position should contain the base which is the same as ref, but the alt field contains the ref base + the insertion event.
### VCF output ###
alt_base = (
("{}")
if (len(sorted_base_counts) == 0)
else (str(sorted_base_counts[-1][0]))
)
qual = 1000
info = "DP=%s;TYPE=ins" % (coverage)
ref_field = ref_base
alt_field = "%s%s" % (ref_base, sorted_insertion_counts[-1][0])
vcf_line = "%s\t%s\t.\t%s\t%s\t%d\tPASS\t%s" % (
ref_name,
position,
ref_field,
alt_field,
qual,
info,
)
ret_vcf_list.append(vcf_line)
##################
elif (
most_common_deletion_count > most_common_insertion_count
and most_common_deletion_count > non_indel_coverage_next_base
):
# In this case, deletions are a clear winner.
if deletion_unique == True:
# ret_deletion_count[0] += most_common_deletion_length;
ret_deletion_count[0] += 1
# variant_line = 'deletion\t%d\t%s\t%s\t%s\t%s' % (most_common_deletion_count, str(sorted_base_counts), str(insertion_event_counts), str(deletion_event_counts), line.strip());
# ret_variant_list.append(variant_line);
# return most_common_deletion_length;
variant_line = (
"del\tpos = %s\tref = %s\tnon_indel_cov_next = %d\tnon_indel_cov_curr = %d\tmost_common_deletion_count = %d\tref_base = %s\tcons_base = %s\tbase_counts = %s\tinsertion_counts = %s\tdeletion_counts = %s\t%s"
% (
position,
ref_name,
non_indel_coverage_next_base,
non_indel_coverage_current_base,
most_common_deletion_count,
ref_base,
sorted_base_counts[-1][0],
str(sorted_base_counts),
str(insertion_event_counts),
str(deletion_event_counts),
line.strip(),
)
)
ret_variant_list.append(variant_line)
### Deletions in the VCF format specifies the position where a deletion occurs, with the first base being non-deletion, and the following bases being a deletion event.
### VCF output ###
alt_base = (
("{}")
if (len(sorted_base_counts) == 0)
else (str(sorted_base_counts[-1][0]))
)
qual = 1000
info = "DP=%s;TYPE=del" % (coverage)
ref_field = "%s%s" % (ref_base, sorted_deletion_counts[-1][0])
alt_field = ref_base
vcf_line = "%s\t%s\t.\t%s\t%s\t%d\tPASS\t%s" % (
ref_name,
position,
ref_field,
alt_field,
qual,
info,
)
ret_vcf_list.append(vcf_line)
##################
return most_common_deletion_length
else:
# In this case, either the base count consensus wins, or the
# insertion/deletion count is ambiguous.
pass
return 0
def process_mpileup(
alignments_path,
reference_path,
mpileup_path,
coverage_threshold,
output_prefix,
thread_id=0,
bed_position="",
):
fp = None
try:
fp = open(mpileup_path, "r")
except IOError:
sys.stderr.write(
'ERROR: Could not open file "%s" for reading!\n' % mpileup_path
)
return None
ret_variant_list = []
ret_vcf_list = []
ret_snp_count = [0]
ret_insertion_count = [0]
ret_deletion_count = [0]
ret_num_undercovered_bases = [0]
ret_num_called_bases = [0]
ret_num_correct_bases = [0]
ret_coverage_sum = [0]
# lines = fp.readlines();
fp_variant = None
fp_vcf = None
if output_prefix != "":
if not os.path.exists(os.path.dirname(output_prefix)):
os.makedirs(os.path.dirname(output_prefix))
variant_file = "%s-cov_%d.variant.csv" % (output_prefix, coverage_threshold)
fp_variant = open(variant_file, "w")
vcf_file = "%s-cov_%d.variant.vcf" % (output_prefix, coverage_threshold)
fp_vcf = open(vcf_file, "w")
fp_vcf.write("##fileformat=VCFv4.0\n")
fp_vcf.write("##fileDate=20150409\n")
fp_vcf.write("##source=%s\n" % (" ".join(sys.argv)))
fp_vcf.write("##reference=%s\n" % reference_path)
fp_vcf.write('##INFO=<ID=DP,Number=1,Type=Integer,Description="Raw Depth">\n')
fp_vcf.write(
'##INFO=<ID=TYPE,Number=A,Type=String,Description="Type of each allele (snp, ins, del, mnp, complex)">\n'
)
fp_vcf.write(
'##INFO=<ID=AF,Number=1,Type=Float,Description="Allele Frequency">\n'
)
fp_vcf.write(
'##INFO=<ID=SB,Number=1,Type=Integer,Description="Phred-scaled strand bias at this position">\n'
)
fp_vcf.write(
'##INFO=<ID=DP4,Number=4,Type=Integer,Description="Counts for ref-forward bases, ref-reverse, alt-forward and alt-reverse bases">\n'
)
fp_vcf.write(
'##INFO=<ID=INDEL,Number=0,Type=Flag,Description="Indicates that the variant is an INDEL.">\n'
)
fp_vcf.write(
'##INFO=<ID=CONSVAR,Number=0,Type=Flag,Description="Indicates that the variant is a consensus variant (as opposed to a low frequency variant).">\n'
)
fp_vcf.write(
'##INFO=<ID=HRUN,Number=1,Type=Integer,Description="Homopolymer length to the right of report indel position">\n'
)
fp_vcf.write("#CHROM\tPOS\tID\tREF\tALT\tQUAL\tFILTER\tINFO\n")
fp_vcf.flush()
use_bed = False
bed_chromosome = ""
bed_pos_start = 0
# bed_pos_end = len(lines);
bed_pos_end = -1
if bed_position != "":
bed_split = bed_position.split(":")
if len(bed_split) != 2:
use_bed = False
else:
bed_chromosome = bed_split[0]
bed_pos_split = bed_split[1].split("-")
if len(bed_pos_split) != 2:
use_bed = False
else:
bed_pos_start = int(bed_pos_split[0])
bed_pos_end = int(bed_pos_split[1])
use_bed = True
sys.stderr.write("Using location specified through commandline:\n")
sys.stderr.write('\tChromosome: "%s"\n' % bed_chromosome)
sys.stderr.write("\tStart: %d\n" % bed_pos_start)
sys.stderr.write("\tEnd: %d\n\n" % bed_pos_end)
# i = 0;
i = 0 if (use_bed == False) else max((bed_pos_start - 10), 0)
j = 0
# while (i < bed_pos_end): # len(lines)):
num_bases_to_skip = 0
for line in fp:
# line = lines[i];
if num_bases_to_skip > 0:
num_bases_to_skip -= 1
continue
if use_bed == True:
line_split = line.strip().split("\t")
if len(line_split) > 2 and line_split[0] == bed_chromosome:
current_pos = int(line_split[1])
if current_pos < bed_pos_start or current_pos >= bed_pos_end:
i += 1
j += 1
continue
else:
# print line_split[0];
# print bed_chromosome;
i += 1
j += 1
continue
if thread_id == 0:
if (j % 1000) == 0:
sys.stderr.write(
"\r[%d] snps = %d, insertions = %d, deletions = %d, undercovered = %d, coverage = %.2f"
% (
i,
ret_snp_count[0],
ret_insertion_count[0],
ret_deletion_count[0],
ret_num_undercovered_bases[0],
(float(ret_coverage_sum[0]) / float((i + 1))),
)
)
sys.stderr.flush()
variant_list_length = len(ret_variant_list)
vcf_list_length = len(ret_vcf_list)
num_bases_to_skip = process_mpileup_line(
line,
i,
ret_variant_list,
ret_vcf_list,
ret_snp_count,
ret_insertion_count,
ret_deletion_count,
ret_num_undercovered_bases,
ret_num_called_bases,
ret_num_correct_bases,
ret_coverage_sum,
coverage_threshold,
verbose=use_bed,
)
if len(ret_variant_list) > variant_list_length and fp_variant != None:
fp_variant.write("\n".join(ret_variant_list[variant_list_length:]) + "\n")
fp_variant.flush()
if len(ret_vcf_list) > vcf_list_length and fp_vcf != None:
fp_vcf.write("\n".join(ret_vcf_list[vcf_list_length:]) + "\n")
fp_vcf.flush()
i += num_bases_to_skip
i += 1
j += 1
# if (i > 10000):
# break;
fp.close()
sys.stderr.write("\n")
if fp_variant != None:
fp_variant.close()
if fp_vcf != None:
fp_vcf.close()
summary_lines = ""
summary_lines += "alignments_file: %s\n" % alignments_path
summary_lines += "mpileup_file: %s\n" % mpileup_path
summary_lines += "coverage_threshold: %d\n" % coverage_threshold
summary_lines += "snp_count: %d\n" % ret_snp_count[0]
summary_lines += "insertion_count: %d\n" % ret_insertion_count[0]
summary_lines += "deletion_count: %d\n" % ret_deletion_count[0]
summary_lines += "num_undercovered_bases: %d\n" % ret_num_undercovered_bases[0]
summary_lines += "num_called_bases: %d\n" % ret_num_called_bases[0]
summary_lines += "num_correct_bases: %d\n" % ret_num_correct_bases[0]
summary_lines += "average_coverage: %.2f\n" % (
(float(ret_coverage_sum[0]) / float((i + 1)))
)
sys.stderr.write(summary_lines + "\n")
sys.stderr.write("\n")
if output_prefix != "":
# summary_file = output_prefix + '.conssum';
summary_file = "%s-cov_%d.variant.sum" % (output_prefix, coverage_threshold)
try:
fp_sum = open(summary_file, "w")
fp_sum.write(summary_lines)
fp_sum.close()
return summary_file
except IOError:
sys.stderr.write(
'ERROR: Could not open file "%s" for writing!\n' % (summary_file)
)
return None
return None
def main(
alignments_path,
reference_path,
coverage_threshold,
output_prefix,
thread_id=0,
bed_position="",
):
# Sanity checking the existence of the file, and the correctness of its extension.
# Also, if input file is a SAM file, then convert it to a sorted BAM.
alignments_path_bam = alignments_path
if os.path.exists(alignments_path) == False:
sys.stderr.write('ERROR: File "%s" does not exist!\n' % alignments_path)
return
if alignments_path.endswith("sam"):
# Determine the path where the new BAM file will be generated.
dir_name = os.path.dirname(alignments_path)
if dir_name == "":
dir_name = "."
alignments_path_bam = (
dir_name
+ "/"
+ os.path.splitext(os.path.basename(alignments_path))[0]
+ ".bam"
)
alignments_path_bam_exists = os.path.exists(alignments_path_bam)
# Check if a BAM file with the given name already exists.
if alignments_path_bam_exists == False or (
alignments_path_bam_exists == True
and os.path.getmtime(alignments_path)
> os.path.getmtime(alignments_path_bam)
):
# Convert the SAM file to a sorted BAM file.
command = "samtools view -bS %s | samtools sort - %s" % (
alignments_path,
os.path.splitext(alignments_path_bam)[0],
)
sys.stderr.write(command + "\n")
subprocess.call(command, shell="True")
# Create the BAM index file.
command = "samtools index %s %s.bai" % (
alignments_path_bam,
alignments_path_bam,
)
subprocess.call(command, shell="True")
elif alignments_path.endswith("bam") == False:
sys.stderr.write(
'ERROR: File extension needs to be either .sam or .bam! Input file path: "%s".\n'
% alignments_path
)
return
# Convert the sorted BAM file to a mpileup file if it doesn't exist yet.
mpileup_path = "%s.mpileup" % alignments_path_bam
mpileup_exists = os.path.exists(mpileup_path)
if mpileup_exists == False or (
mpileup_exists == True
and os.path.getmtime(alignments_path) > os.path.getmtime(mpileup_path)
):
command = "samtools mpileup -B -d 1000000 -Q 0 -A -f %s %s > %s.mpileup" % (
reference_path,
alignments_path_bam,
alignments_path_bam,
)
subprocess.call(command, shell="True")
sys.stderr.write('Processing file "%s"...\n' % alignments_path)
sys.stderr.write('Reference file "%s"...\n' % reference_path)
sys.stderr.write("Coverage threshold: %d\n" % coverage_threshold)
summary_file = process_mpileup(
alignments_path,
reference_path,
("%s.mpileup" % alignments_path_bam),
coverage_threshold,
output_prefix,
thread_id,
bed_position,
)
def CollectSummaries(
sam_files, prefix_for_intermediate_results, collective_output_file
):
fp_collect = None
try:
fp_collect = open(collective_output_file, "w")
except IOError:
sys.stderr.write(
'ERROR: Could not open file "%s" for writing!\n' % collective_output_file
)
return
for sam_file in sam_files:
summary_file = prefix_for_intermediate_results + ".sum"
try:
fp_sum = open(summary_file, "r")
lines = fp_sum.readlines()
fp_sum.close()
except IOError:
sys.stderr.write(
'ERROR: Could not open file "%s" for reading!\n' % summary_file
)
continue
fp_collect.write("".join(lines) + "\n")
fp_collect.close()
if __name__ == "__main__":
# if (len(sys.argv) < 5):
# sys.stderr.write('Usage:\n');
# sys.stderr.write('\t%s <reference_file_path> coverage_threshold <collective_output_file> <{sb}am_file_1> [<{sb}am_file_2> <{sb}am_file_3> ...]\n' % sys.argv[0]);
# sys.stderr.write('\t(If <collective_output_file> is equal to "-", no files will be written to disk.)\n');
# exit(1);
if len(sys.argv) < 5:
sys.stderr.write("Usage:\n")
sys.stderr.write(
"\t%s <reference_file_path> coverage_threshold <output_prefix> <{sb}am_file_> [position]\n"
% sys.argv[0]
)
sys.stderr.write(
'\t(If <collective_output_file> is equal to "-", no files will be written to disk.)\n'
)
sys.stderr.write(
'\tPosition parameter is a string specifying "chromosome:start-end"\n\n'
)
exit(1)
reference_file = sys.argv[1]
coverage_threshold = int(sys.argv[2])
output_prefix = sys.argv[3]
sam_file = sys.argv[4]
bed_position = ""
if len(sys.argv) > 5:
bed_position = sys.argv[5]
# sys.stderr.write('bed_position: "%s"\n\n' % bed_position);
processes = []
if output_prefix == "-":
output_prefix = os.path.splitext(sam_file)[0]
main(sam_file, reference_file, coverage_threshold, output_prefix, 0, bed_position)
# if (output_prefix != '-'):
# CollectSummaries([sam_file], output_prefix, output_prefix + '.variant.sum');
| 1.710938 | 2 |
api/src/opentrons/protocol_engine/commands/thermocycler/open_lid.py | Opentrons/protocol_framework | 0 | 3919 | """Command models to open a Thermocycler's lid."""
from __future__ import annotations
from typing import Optional, TYPE_CHECKING
from typing_extensions import Literal, Type
from pydantic import BaseModel, Field
from ..command import AbstractCommandImpl, BaseCommand, BaseCommandCreate
from opentrons.protocol_engine.types import MotorAxis
if TYPE_CHECKING:
from opentrons.protocol_engine.state import StateView
from opentrons.protocol_engine.execution import EquipmentHandler, MovementHandler
OpenLidCommandType = Literal["thermocycler/openLid"]
class OpenLidParams(BaseModel):
"""Input parameters to open a Thermocycler's lid."""
moduleId: str = Field(..., description="Unique ID of the Thermocycler.")
class OpenLidResult(BaseModel):
"""Result data from opening a Thermocycler's lid."""
class OpenLidImpl(AbstractCommandImpl[OpenLidParams, OpenLidResult]):
"""Execution implementation of a Thermocycler's open lid command."""
def __init__(
self,
state_view: StateView,
equipment: EquipmentHandler,
movement: MovementHandler,
**unused_dependencies: object,
) -> None:
self._state_view = state_view
self._equipment = equipment
self._movement = movement
async def execute(self, params: OpenLidParams) -> OpenLidResult:
"""Open a Thermocycler's lid."""
thermocycler_state = self._state_view.modules.get_thermocycler_module_substate(
params.moduleId
)
thermocycler_hardware = self._equipment.get_module_hardware_api(
thermocycler_state.module_id
)
# move the pipettes and gantry over the trash
# do not home plunger axes because pipettes may be holding liquid
await self._movement.home(
[
MotorAxis.X,
MotorAxis.Y,
MotorAxis.RIGHT_Z,
MotorAxis.LEFT_Z,
]
)
if thermocycler_hardware is not None:
await thermocycler_hardware.open()
return OpenLidResult()
class OpenLid(BaseCommand[OpenLidParams, OpenLidResult]):
"""A command to open a Thermocycler's lid."""
commandType: OpenLidCommandType = "thermocycler/openLid"
params: OpenLidParams
result: Optional[OpenLidResult]
_ImplementationCls: Type[OpenLidImpl] = OpenLidImpl
class OpenLidCreate(BaseCommandCreate[OpenLidParams]):
"""A request to open a Thermocycler's lid."""
commandType: OpenLidCommandType = "thermocycler/openLid"
params: OpenLidParams
_CommandCls: Type[OpenLid] = OpenLid
| 1.773438 | 2 |
pfm/pf_command/update.py | takahi-i/pfm | 9 | 3927 | import json
from pfm.pf_command.base import BaseCommand
from pfm.util.log import logger
class UpdateCommand(BaseCommand):
def __init__(self, name, forward_type,
remote_host, remote_port, local_port,
ssh_server, server_port, login_user, config):
super(UpdateCommand, self).__init__(config)
self.name = name
self.forward_type = forward_type
self.remote_host = remote_host
self.remote_port = remote_port
self.local_port = local_port
self.ssh_server = ssh_server
self.server_port = server_port
self.login_user = login_user
def run(self):
f = open(self.config_path, 'r')
targets = json.load(f)
if self.name in targets:
target = targets[self.name]
self.update(target)
else:
logger.warn("Port forward setting named " + self.name + "is not registered")
# write the target
f = open(self.config_path, 'w')
f.write(json.dumps(targets, indent=4))
f.close()
def update(self, target):
if self.forward_type is not None:
target["type"] = self.forward_type
if self.remote_host is not None:
target["remote_host"] = self.remote_host
if self.remote_port is not None:
target["remote_port"] = self.remote_port
if self.local_port is not None:
target["local_port"] = self.local_port
if self.ssh_server is not None:
target["ssh_server"] = self.ssh_server
if self.server_port is not None:
target["server_port"] = self.server_port
if self.login_user is not None:
target["login_user"] = self.login_user
| 1.6875 | 2 |
ragweed/framework.py | soumyakoduri/ragweed | 0 | 3959 | import sys
import os
import boto
import boto.s3.connection
import json
import inspect
import pickle
import bunch
import yaml
import ConfigParser
import rados
from boto.s3.key import Key
from nose.plugins.attrib import attr
from nose.tools import eq_ as eq
from .reqs import _make_admin_request
ragweed_env = None
suite = None
class RGWConnection:
def __init__(self, access_key, secret_key, host, port, is_secure):
self.host = host
self.port = port
self.is_secure = is_secure
self.conn = boto.connect_s3(
aws_access_key_id = access_key,
aws_secret_access_key = secret_key,
host=host,
port=port,
is_secure=is_secure,
calling_format = boto.s3.connection.OrdinaryCallingFormat(),
)
def create_bucket(self, name):
return self.conn.create_bucket(name)
def get_bucket(self, name, validate=True):
return self.conn.get_bucket(name, validate=validate)
class RGWRESTAdmin:
def __init__(self, connection):
self.conn = connection
def get_resource(self, path, params):
r = _make_admin_request(self.conn, "GET", path, params)
if r.status != 200:
raise boto.exception.S3ResponseError(r.status, r.reason)
return bunch.bunchify(json.loads(r.read()))
def read_meta_key(self, key):
return self.get_resource('/admin/metadata', {'key': key})
def get_bucket_entrypoint(self, bucket_name):
return self.read_meta_key('bucket:' + bucket_name)
def get_bucket_instance_info(self, bucket_name, bucket_id = None):
if not bucket_id:
ep = self.get_bucket_entrypoint(bucket_name)
print ep
bucket_id = ep.data.bucket.bucket_id
result = self.read_meta_key('bucket.instance:' + bucket_name + ":" + bucket_id)
return result.data.bucket_info
def check_bucket_index(self, bucket_name):
return self.get_resource('/admin/bucket',{'index' : None, 'bucket':bucket_name})
def get_obj_layout(self, key):
path = '/' + key.bucket.name + '/' + key.name
params = {'layout': None}
if key.version_id is not None:
params['versionId'] = key.version_id
print params
return self.get_resource(path, params)
def get_zone_params(self):
return self.get_resource('/admin/config', {'type': 'zone'})
class RSuite:
def __init__(self, name, bucket_prefix, zone, suite_step):
self.name = name
self.bucket_prefix = bucket_prefix
self.zone = zone
self.config_bucket = None
self.rtests = []
self.do_preparing = False
self.do_check = False
for step in suite_step.split(','):
if step == 'prepare':
self.do_preparing = True
self.config_bucket = self.zone.create_raw_bucket(self.get_bucket_name('conf'))
if step == 'check' or step == 'test':
self.do_check = True
self.config_bucket = self.zone.get_raw_bucket(self.get_bucket_name('conf'))
def get_bucket_name(self, suffix):
return self.bucket_prefix + '-' + suffix
def register_test(self, t):
self.rtests.append(t)
def write_test_data(self, test):
k = Key(self.config_bucket)
k.key = 'tests/' + test._name
k.set_contents_from_string(test.to_json())
def read_test_data(self, test):
k = Key(self.config_bucket)
k.key = 'tests/' + test._name
s = k.get_contents_as_string()
print 'read_test_data=', s
test.from_json(s)
def is_preparing(self):
return self.do_preparing
def is_checking(self):
return self.do_check
class RTestJSONSerialize(json.JSONEncoder):
def default(self, obj):
if isinstance(obj, (list, dict, str, unicode, int, float, bool, type(None))):
return JSONEncoder.default(self, obj)
return {'__pickle': pickle.dumps(obj)}
def rtest_decode_json(d):
if '__pickle' in d:
return pickle.loads(str(d['__pickle']))
return d
class RPlacementRule:
def __init__(self, rule):
r = rule.split('/', 1)
self.placement_id = r[0]
if (len(r) == 2):
self.storage_class=r[1]
else:
self.storage_class = 'STANDARD'
class RBucket:
def __init__(self, zone, bucket, bucket_info):
self.zone = zone
self.bucket = bucket
self.name = bucket.name
self.bucket_info = bucket_info
try:
self.placement_rule = RPlacementRule(self.bucket_info.placement_rule)
self.placement_target = self.zone.get_placement_target(self.bucket_info.placement_rule)
except:
pass
def get_data_pool(self):
try:
# old style explicit pool
explicit_pool = self.bucket_info.bucket.pool
except:
# new style explicit pool
explicit_pool = self.bucket_info.bucket.explicit_placement.data_pool
if explicit_pool is not None and explicit_pool != '':
return explicit_pool
return self.placement_target.get_data_pool(self.placement_rule)
def get_tail_pool(self, obj_layout):
try:
placement_rule = obj_layout.manifest.tail_placement.placement_rule
except:
placement_rule = ''
if placement_rule == '':
try:
# new style
return obj_layout.manifest.tail_placement.bucket.explicit_placement.data_pool
except:
pass
try:
# old style
return obj_layout.manifest.tail_bucket.pool
except:
pass
pr = RPlacementRule(placement_rule)
return self.placement_target.get_data_pool(pr)
class RStorageClasses:
def __init__(self, config):
if hasattr(config, 'storage_classes'):
self.storage_classes = config.storage_classes
else:
try:
self.storage_classes = bunch.bunchify({ 'STANDARD': { 'data_pool': config.data_pool }})
except:
self.storage_classes = None
pass
def get(self, storage_class):
assert(self.storage_classes != None)
try:
if not storage_class:
storage_class = 'STANDARD'
sc = self.storage_classes[storage_class]
except:
eq('could not find storage class ' + storage_class, 0)
return sc
def get_all(self):
for (name, _) in self.storage_classes.iteritems():
yield name
class RPlacementTarget:
def __init__(self, name, config):
self.name = name
self.index_pool = config.index_pool
self.data_extra_pool = config.data_extra_pool
self.storage_classes = RStorageClasses(config)
if not self.data_extra_pool:
self.data_extra_pool = self.storage_classes.get_data_pool('STANDARD')
def get_data_pool(self, placement_rule):
return self.storage_classes.get(placement_rule.storage_class).data_pool
class RZone:
def __init__(self, conn):
self.conn = conn
self.rgw_rest_admin = RGWRESTAdmin(self.conn.system)
self.zone_params = self.rgw_rest_admin.get_zone_params()
self.placement_targets = {}
for e in self.zone_params.placement_pools:
self.placement_targets[e.key] = e.val
print 'zone_params:', self.zone_params
def get_placement_target(self, placement_id):
plid = placement_id
if placement_id is None or placement_id == '':
print 'zone_params=', self.zone_params
plid = self.zone_params.default_placement
try:
return RPlacementTarget(plid, self.placement_targets[plid])
except:
pass
return None
def get_default_placement(self):
return get_placement_target(self.zone_params.default_placement)
def create_bucket(self, name):
bucket = self.create_raw_bucket(name)
bucket_info = self.rgw_rest_admin.get_bucket_instance_info(bucket.name)
print 'bucket_info:', bucket_info
return RBucket(self, bucket, bucket_info)
def get_bucket(self, name):
bucket = self.get_raw_bucket(name)
bucket_info = self.rgw_rest_admin.get_bucket_instance_info(bucket.name)
print 'bucket_info:', bucket_info
return RBucket(self, bucket, bucket_info)
def create_raw_bucket(self, name):
return self.conn.regular.create_bucket(name)
def get_raw_bucket(self, name):
return self.conn.regular.get_bucket(name)
def refresh_rbucket(self, rbucket):
rbucket.bucket = self.get_raw_bucket(rbucket.bucket.name)
rbucket.bucket_info = self.rgw_rest_admin.get_bucket_instance_info(rbucket.bucket.name)
class RTest:
def __init__(self):
self._name = self.__class__.__name__
self.r_buckets = []
self.init()
def create_bucket(self):
bid = len(self.r_buckets) + 1
bucket_name = suite.get_bucket_name(self._name + '.' + str(bid))
bucket_name = bucket_name.replace("_", "-")
rb = suite.zone.create_bucket(bucket_name)
self.r_buckets.append(rb)
return rb
def get_buckets(self):
for rb in self.r_buckets:
yield rb
def init(self):
pass
def prepare(self):
pass
def check(self):
pass
def to_json(self):
attrs = {}
for x in dir(self):
if x.startswith('r_'):
attrs[x] = getattr(self, x)
return json.dumps(attrs, cls=RTestJSONSerialize)
def from_json(self, s):
j = json.loads(s, object_hook=rtest_decode_json)
for e in j:
setattr(self, e, j[e])
def save(self):
suite.write_test_data(self)
def load(self):
suite.read_test_data(self)
for rb in self.r_buckets:
suite.zone.refresh_rbucket(rb)
def test(self):
suite.register_test(self)
if suite.is_preparing():
self.prepare()
self.save()
if suite.is_checking():
self.load()
self.check()
def read_config(fp):
config = bunch.Bunch()
g = yaml.safe_load_all(fp)
for new in g:
print bunch.bunchify(new)
config.update(bunch.bunchify(new))
return config
str_config_opts = [
'user_id',
'access_key',
'secret_key',
'host',
'ceph_conf',
'bucket_prefix',
]
int_config_opts = [
'port',
]
bool_config_opts = [
'is_secure',
]
def dict_find(d, k):
if d.has_key(k):
return d[k]
return None
class RagweedEnv:
def __init__(self):
self.config = bunch.Bunch()
cfg = ConfigParser.RawConfigParser()
try:
path = os.environ['RAGWEED_CONF']
except KeyError:
raise RuntimeError(
'To run tests, point environment '
+ 'variable RAGWEED_CONF to a config file.',
)
with file(path) as f:
cfg.readfp(f)
for section in cfg.sections():
try:
(section_type, name) = section.split(None, 1)
if not self.config.has_key(section_type):
self.config[section_type] = bunch.Bunch()
self.config[section_type][name] = bunch.Bunch()
cur = self.config[section_type]
except ValueError:
section_type = ''
name = section
self.config[name] = bunch.Bunch()
cur = self.config
cur[name] = bunch.Bunch()
for var in str_config_opts:
try:
cur[name][var] = cfg.get(section, var)
except ConfigParser.NoOptionError:
pass
for var in int_config_opts:
try:
cur[name][var] = cfg.getint(section, var)
except ConfigParser.NoOptionError:
pass
for var in bool_config_opts:
try:
cur[name][var] = cfg.getboolean(section, var)
except ConfigParser.NoOptionError:
pass
print json.dumps(self.config)
rgw_conf = self.config.rgw
try:
self.bucket_prefix = rgw_conf.bucket_prefix
except:
self.bucket_prefix = 'ragweed'
conn = bunch.Bunch()
for (k, u) in self.config.user.iteritems():
conn[k] = RGWConnection(u.access_key, u.secret_key, rgw_conf.host, dict_find(rgw_conf, 'port'), dict_find(rgw_conf, 'is_secure'))
self.zone = RZone(conn)
self.suite = RSuite('ragweed', self.bucket_prefix, self.zone, os.environ['RAGWEED_STAGES'])
try:
self.ceph_conf = self.config.rados.ceph_conf
except:
raise RuntimeError(
'ceph_conf is missing under the [rados] section in ' + os.environ['RAGWEED_CONF']
)
self.rados = rados.Rados(conffile=self.ceph_conf)
self.rados.connect()
pools = self.rados.list_pools()
for pool in pools:
print "rados pool>", pool
def setup_module():
global ragweed_env
global suite
ragweed_env = RagweedEnv()
suite = ragweed_env.suite
| 1.421875 | 1 |
ecommerce-website/orders/admin.py | Shanu85/FCS_Project | 0 | 3967 | from django.contrib import admin
from .models import Order, receiverInfo
@admin.register(Order)
class OrderAdmin(admin.ModelAdmin):
date_hierarchy = 'created_at'
list_display = ('user', 'code', 'total_price', 'shipping_status', 'created_at')
list_display_links = ('user',)
list_editable = ('shipping_status',)
list_filter = ('shipping_status', 'payment_mode', 'created_at')
list_per_page = 25
search_fields = ('user__phone_number', 'user__email', 'code')
readonly_fields = ('user','cart', 'receiver', 'payment_mode', 'shipping_status', 'code')
def total_price(self, obj):
return obj.cart.total_price
def has_add_permission(self, request):
return False
@admin.register(receiverInfo)
class receiverInfoAdmin(admin.ModelAdmin):
date_hierarchy = 'created_at'
list_display = ('id', 'full_name', 'phone_number', 'address', 'created_at')
list_display_links = ('id', 'full_name')
list_filter = ('created_at',)
list_per_page = 25
search_fields = ('full_name', 'phone_number', 'address')
readonly_fields = ('full_name', 'phone_number', 'address')
| 1.242188 | 1 |
vunit/test/unit/test_tokenizer.py | bjacobs1/vunit | 1 | 3983 | # This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this file,
# You can obtain one at http://mozilla.org/MPL/2.0/.
#
# Copyright (c) 2014-2018, <NAME> <EMAIL>
"""
Test of the general tokenizer
"""
from unittest import TestCase
from vunit.parsing.tokenizer import describe_location
from vunit.test.mock_2or3 import mock
class TestTokenizer(TestCase):
"""
Test of the general tokenizer
"""
def test_describes_single_char_location(self):
self.assertEqual(
_describe_location("""\
S
"""), """\
at filename0 line 1:
S
~""")
def test_describes_single_char_location_within(self):
self.assertEqual(
_describe_location("""\
S
"""), """\
at filename0 line 1:
S
~""")
def test_describes_multi_char_location(self):
self.assertEqual(
_describe_location("""\
S E
"""), """\
at filename0 line 1:
S E
~~~""")
def test_describes_multi_char_location_within(self):
self.assertEqual(
_describe_location("""\
S E
"""), """\
at filename0 line 1:
S E
~~~""")
def test_describes_multi_line_location(self):
self.assertEqual(
_describe_location("""\
S____
E
"""), """\
at filename0 line 1:
S____
~~~~~""")
def test_describes_multi_file_location(self):
self.assertEqual(
_describe_location("""\
S__E""", """\
SE"""), """\
from filename0 line 2:
S__E
~~~~
at filename1 line 3:
SE
~~""")
def test_describe_location_none(self):
self.assertEqual(describe_location(None),
"Unknown location")
def test_describe_missing_location(self):
self.assertEqual(describe_location((("missing.svh", (0, 0)), None)),
"Unknown location in missing.svh")
def test_describe_none_filename_location(self):
self.assertEqual(describe_location(((None, (0, 0)), None)),
"Unknown Python string")
def _describe_location(*codes):
"""
Helper to test describe_location
"""
contents = {}
location = None
for idx, code in enumerate(codes):
filename = "filename%i" % idx
contents[filename] = code
start = code.index("S")
if "E" in code:
end = code.index("E")
else:
end = start
location = ((filename, (start, end)), location)
with mock.patch("vunit.parsing.tokenizer.read_file", autospec=True) as mock_read_file:
with mock.patch("vunit.parsing.tokenizer.file_exists", autospec=True) as mock_file_exists:
def file_exists_side_effect(filename):
return filename in contents
def read_file_side_effect(filename):
return contents[filename]
mock_file_exists.side_effect = file_exists_side_effect
mock_read_file.side_effect = read_file_side_effect
retval = describe_location(location=location)
return retval
| 1.632813 | 2 |
generate_training_data_drb.py | SimonTopp/Graph-WaveNet | 0 | 4031 | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import argparse
import numpy as np
import os
import pandas as pd
import util
import os.path
import pandas as pd
import numpy as np
import yaml
import xarray as xr
import datetime
import pickle
def scale(dataset, std=None, mean=None):
"""
scale the data so it has a standard deviation of 1 and a mean of zero
:param dataset: [xr dataset] input or output data
:param std: [xr dataset] standard deviation if scaling test data with dims
:param mean: [xr dataset] mean if scaling test data with dims
:return: scaled data with original dims
"""
if not isinstance(std, xr.Dataset) or not isinstance(mean, xr.Dataset):
std = dataset.std(skipna=True)
mean = dataset.mean(skipna=True)
# adding small number in case there is a std of zero
scaled = (dataset - mean) / (std + 1e-10)
check_if_finite(std)
check_if_finite(mean)
return scaled, std, mean
def sel_partition_data(dataset, start_dates, end_dates):
"""
select the data from a date range or a set of date ranges
:param dataset: [xr dataset] input or output data with date dimension
:param start_dates: [str or list] fmt: "YYYY-MM-DD"; date(s) to start period
(can have multiple discontinuos periods)
:param end_dates: [str or list] fmt: "YYYY-MM-DD"; date(s) to end period
(can have multiple discontinuos periods)
:return: dataset of just those dates
"""
# if it just one date range
if isinstance(start_dates, str):
if isinstance(end_dates, str):
return dataset.sel(date=slice(start_dates, end_dates))
else:
raise ValueError("start_dates is str but not end_date")
# if it's a list of date ranges
elif isinstance(start_dates, list) or isinstance(start_dates, tuple):
if len(start_dates) == len(end_dates):
data_list = []
for i in range(len(start_dates)):
date_slice = slice(start_dates[i], end_dates[i])
data_list.append(dataset.sel(date=date_slice))
return xr.concat(data_list, dim="date")
else:
raise ValueError("start_dates and end_dates must have same length")
else:
raise ValueError("start_dates must be either str, list, or tuple")
def separate_trn_tst(
dataset,
train_start_date,
train_end_date,
val_start_date,
val_end_date,
test_start_date,
test_end_date,
):
"""
separate the train data from the test data according to the start and end
dates. This assumes your training data is in one continuous block and all
the dates that are not in the training are in the testing.
:param dataset: [xr dataset] input or output data with dims
:param train_start_date: [str or list] fmt: "YYYY-MM-DD"; date(s) to start
train period (can have multiple discontinuos periods)
:param train_end_date: [str or list] fmt: "YYYY-MM-DD"; date(s) to end train
period (can have multiple discontinuos periods)
:param val_start_date: [str or list] fmt: "YYYY-MM-DD"; date(s) to start
validation period (can have multiple discontinuos periods)
:param val_end_date: [str or list] fmt: "YYYY-MM-DD"; date(s) to end
validation period (can have multiple discontinuos periods)
:param test_start_date: [str or list] fmt: "YYYY-MM-DD"; date(s) to start
test period (can have multiple discontinuos periods)
:param test_end_date: [str or list] fmt: "YYYY-MM-DD"; date(s) to end test
period (can have multiple discontinuos periods)
"""
train = sel_partition_data(dataset, train_start_date, train_end_date)
val = sel_partition_data(dataset, val_start_date, val_end_date)
test = sel_partition_data(dataset, test_start_date, test_end_date)
return train, val, test
def split_into_batches(data_array, seq_len=365, offset=1):
"""
split training data into batches with size of batch_size
:param data_array: [numpy array] array of training data with dims [nseg,
ndates, nfeat]
:param seq_len: [int] length of sequences (i.e., 365)
:param offset: [float] 0-1, how to offset the batches (e.g., 0.5 means that
the first batch will be 0-365 and the second will be 182-547)
:return: [numpy array] batched data with dims [nbatches, nseg, seq_len
(batch_size), nfeat]
"""
combined = []
for i in range(int(1 / offset)):
start = int(i * offset * seq_len)
idx = np.arange(start=start, stop=data_array.shape[1] + 1, step=seq_len)
split = np.split(data_array, indices_or_sections=idx, axis=1)
# add all but the first and last batch since they will be smaller
combined.extend([s for s in split if s.shape[1] == seq_len])
combined = np.asarray(combined)
return combined
def read_multiple_obs(obs_files, x_data):
"""
read and format multiple observation files. we read in the pretrain data to
make sure we have the same indexing.
:param obs_files: [list] list of filenames of observation files
:param pre_train_file: [str] the file of pre_training data
:return: [xr dataset] the observations in the same time
"""
obs = [x_data.sortby(["seg_id_nat", "date"])]
for filename in obs_files:
ds = xr.open_zarr(filename)
obs.append(ds)
if "site_id" in ds.variables:
del ds["site_id"]
obs = xr.merge(obs, join="left")
obs = obs[["temp_c", "discharge_cms"]]
obs = obs.rename(
{"temp_c": "seg_tave_water", "discharge_cms": "seg_outflow"}
)
return obs
def reshape_for_training(data):
"""
reshape the data for training
:param data: training data (either x or y or mask) dims: [nbatch, nseg,
len_seq, nfeat/nout]
:return: reshaped data [nbatch * nseg, len_seq, nfeat/nout]
"""
n_batch, n_seg, seq_len, n_feat = data.shape
return np.reshape(data, [n_batch * n_seg, seq_len, n_feat])
def get_exclude_start_end(exclude_grp):
"""
get the start and end dates for the exclude group
:param exclude_grp: [dict] dictionary representing the exclude group from
the exclude yml file
:return: [tuple of datetime objects] start date, end date
"""
start = exclude_grp.get("start_date")
if start:
start = datetime.datetime.strptime(start, "%Y-%m-%d")
end = exclude_grp.get("end_date")
if end:
end = datetime.datetime.strptime(end, "%Y-%m-%d")
return start, end
def convert_batch_reshape(dataset, seq_len=365, offset=1, y = False, period = np.nan):
"""
convert xarray dataset into numpy array, swap the axes, batch the array and
reshape for training
:param dataset: [xr dataset] data to be batched
:param seq_len: [int] length of sequences (i.e., 365)
:param offset: [float] 0-1, how to offset the batches (e.g., 0.5 means that
the first batch will be 0-365 and the second will be 182-547)
:return: [numpy array] batched and reshaped dataset
"""
# convert xr.dataset to numpy array
dataset = dataset.transpose("seg_id_nat", "date")
arr = dataset.to_array().values
# if the dataset is empty, just return it as is
if dataset.date.size == 0:
return arr
# before [nfeat, nseg, ndates]; after [nseg, ndates, nfeat]
# this is the order that the split into batches expects
arr = np.moveaxis(arr, 0, -1)
# batch the data
# after [nbatch, nseg, seq_len, nfeat]
batched = split_into_batches(arr, seq_len=seq_len, offset=offset)
# reshape data
# after [nseq, seq_len, nseg, nfeat]
#reshaped = reshape_for_training(batched)
reshaped = np.moveaxis(batched, [0,1,2,3], [0,2,1,3])
if y & np.isfinite(period):
reshaped = reshaped[:,-period:,...]
return reshaped
def coord_as_reshaped_array(dataset, coord_name, seq_len=365, offset=1):
# I need one variable name. It can be any in the dataset, but I'll use the
# first
first_var = next(iter(dataset.data_vars.keys()))
coord_array = xr.broadcast(dataset[coord_name], dataset[first_var])[0]
new_var_name = coord_name + "1"
dataset[new_var_name] = coord_array
reshaped_np_arr = convert_batch_reshape(
dataset[[new_var_name]], seq_len=seq_len, offset=offset
)
return reshaped_np_arr
def check_if_finite(xarr):
assert np.isfinite(xarr.to_array().values).all()
def prep_data(
obs_temper_file,
obs_flow_file,
pretrain_file,
#distfile,
train_start_date,
train_end_date,
val_start_date,
val_end_date,
test_start_date,
test_end_date,
x_vars=None,
y_vars= ["seg_tave_water", "seg_outflow"],
seq_length = 365,
offset = 1,
period = None,
primary_variable="temp",
#catch_prop_file=None,
#exclude_file=None,
#log_q=False,
out_file=None,
#segs=None,
normalize_y=False,
):
"""
prepare input and output data for DL model training read in and process
data into training and testing datasets. the training and testing data are
scaled to have a std of 1 and a mean of zero
:param obs_temper_file: [str] temperature observations file (csv)
:param obs_flow_file:[str] discharge observations file (csv)
:param pretrain_file: [str] the file with the pretraining data (SNTemp data)
:param distfile: [str] path to the distance matrix .npz file
:param train_start_date: [str or list] fmt: "YYYY-MM-DD"; date(s) to start
train period (can have multiple discontinuos periods)
:param train_end_date: [str or list] fmt: "YYYY-MM-DD"; date(s) to end train
period (can have multiple discontinuos periods)
:param val_start_date: [str or list] fmt: "YYYY-MM-DD"; date(s) to start
validation period (can have multiple discontinuos periods)
:param val_end_date: [str or list] fmt: "YYYY-MM-DD"; date(s) to end
validation period (can have multiple discontinuos periods)
:param test_start_date: [str or list] fmt: "YYYY-MM-DD"; date(s) to start
test period (can have multiple discontinuos periods)
:param test_end_date: [str or list] fmt: "YYYY-MM-DD"; date(s) to end test
period (can have multiple discontinuos periods)
:param x_vars: [list] variables that should be used as input. If None, all
of the variables will be used
:param primary_variable: [str] which variable the model should focus on
'temp' or 'flow'. This determines the order of the variables.
:param catch_prop_file: [str] the path to the catchment properties file. If
left unfilled, the catchment properties will not be included as predictors
:param exclude_file: [str] path to exclude file
:param log_q: [bool] whether or not to take the log of discharge in training
:param out_file: [str] file to where the values will be written
:returns: training and testing data along with the means and standard
deviations of the training input and output data
'y_trn_pre': batched, scaled, and centered output data for entire
period of record of SNTemp [n_samples, seq_len, n_out]
'y_obs_trn': batched, scaled, and centered output observation data
for the training period
'y_trn_obs_std': standard deviation of the y observations training
data [n_out]
'y_trn_obs_mean': mean of the observation training data [n_out]
'y_obs_tst': un-batched, unscaled, uncentered observation data for
the test period [n_yrs, n_seg, len_seq, n_out]
'dates_ids_trn: batched dates and national seg ids for training data
[n_samples, seq_len, 2]
'dates_ids_tst: un-batched dates and national seg ids for testing
data [n_yrs, n_seg, len_seq, 2]
"""
ds_pre = xr.open_zarr(pretrain_file)
x_data = ds_pre[x_vars]
# make sure we don't have any weird input values
check_if_finite(x_data)
x_trn, x_val, x_tst = separate_trn_tst(
x_data,
train_start_date,
train_end_date,
val_start_date,
val_end_date,
test_start_date,
test_end_date,
)
x_scl, x_std, x_mean = scale(x_data)
x_trn_scl, _, _ = scale(x_trn, std=x_std, mean=x_mean)
x_val_scl, _, _ = scale(x_val, std=x_std, mean=x_mean)
x_tst_scl, _, _ = scale(x_tst, std=x_std, mean=x_mean)
y_obs = read_multiple_obs([obs_temper_file, obs_flow_file], x_data)
y_obs = y_obs[y_vars]
y_pre = ds_pre[y_vars]
y_obs_trn, y_obs_val, y_obs_tst = separate_trn_tst(
y_obs,
train_start_date,
train_end_date,
val_start_date,
val_end_date,
test_start_date,
test_end_date,
)
y_pre_trn, y_pre_val, y_pre_tst = separate_trn_tst(
y_pre,
train_start_date,
train_end_date,
val_start_date,
val_end_date,
test_start_date,
test_end_date,
)
if normalize_y:
# scale y training data and get the mean and std
y_obs_trn, y_std, y_mean = scale(y_obs_trn)
y_pre_trn, _, _ = scale(y_pre_trn, y_std, y_mean)
else:
_, y_std, y_mean = scale(y_obs_trn)
data = {
"x_train": convert_batch_reshape(x_trn_scl, offset=offset, seq_len=seq_length),
"x_val": convert_batch_reshape(x_val_scl, offset=offset, seq_len=seq_length),
"x_test": convert_batch_reshape(x_tst_scl, offset=offset, seq_len=seq_length),
"x_std": x_std.to_array().values,
"x_mean": x_mean.to_array().values,
"x_cols": np.array(x_vars),
"ids_train": coord_as_reshaped_array(x_trn, "seg_id_nat", offset=offset, seq_len=seq_length),
"dates_train": coord_as_reshaped_array(x_trn, "date", offset=offset, seq_len=seq_length),
"ids_val": coord_as_reshaped_array(x_val, "seg_id_nat", offset=offset, seq_len=seq_length),
"dates_val": coord_as_reshaped_array(x_val, "date", offset=offset, seq_len=seq_length),
"ids_test": coord_as_reshaped_array(x_tst, "seg_id_nat", offset=offset, seq_len=seq_length),
"dates_test": coord_as_reshaped_array(x_tst, "date", offset=offset, seq_len=seq_length),
"y_pre_train": convert_batch_reshape(y_pre_trn, offset=offset, seq_len=seq_length, y=True, period=period),
"y_train": convert_batch_reshape(y_obs_trn, offset=offset, seq_len=seq_length, y=True, period=period),
"y_val": convert_batch_reshape(y_obs_val, offset=offset, seq_len=seq_length, y=True, period=period),
"y_test": convert_batch_reshape(y_obs_tst, offset=offset, seq_len=seq_length, y=True, period=period),
"y_vars": np.array(y_vars),
'period': np.array([period]),
'y_pre_train_val': convert_batch_reshape(y_pre_val, offset=offset, seq_len=seq_length, y=True, period=period),
'y_pre_train_test': convert_batch_reshape(y_pre_tst, offset=offset, seq_len=seq_length, y=True, period=period),
"y_std": y_std.to_array().values,
"y_mean": y_mean.to_array().values,
}
if out_file:
if os.path.isdir(out_file) == False:
os.makedirs(out_file)
'''
np.savez_compressed(os.path.join(out_file, 'pre_train.npz'),
x=data['x_train'],
y=data['y_pre_train'])
np.savez_compressed(os.path.join(out_file,'train.npz'),
x=data['x_train'],
y=data['y_obs_train'],
)
np.savez_compressed(os.path.join(out_file, 'test.npz'),
x=data['x_test'],
y=data['y_obs_tst'],
)
np.savez_compressed(os.path.join(out_file,'val.npz'),
x=data['x_val'],
y=data['y_obs_val'],
)
'''
np.savez_compressed(os.path.join(out_file,'data.npz'), **data)
return data
def prep_adj_matrix(infile, dist_type, out_file=None):
"""
process adj matrix.
**The resulting matrix is sorted by seg_id_nat **
:param infile:
:param dist_type: [str] type of distance matrix ("upstream", "downstream" or
"updown")
:param out_file:
:return: [numpy array] processed adjacency matrix
"""
adj_matrices = np.load(infile)
adj = adj_matrices[dist_type]
adj_full = sort_dist_matrix(adj, adj_matrices["rowcolnames"])
adj = adj_full[2]
adj = np.where(np.isinf(adj), 0, adj)
adj = -adj
mean_adj = np.mean(adj[adj != 0])
std_adj = np.std(adj[adj != 0])
adj[adj != 0] = adj[adj != 0] - mean_adj
adj[adj != 0] = adj[adj != 0] / std_adj
adj[adj != 0] = 1 / (1 + np.exp(-adj[adj != 0]))
I = np.eye(adj.shape[0])
A_hat = adj.copy() + I
D = np.sum(A_hat, axis=1)
D_inv = D ** -1.0
D_inv = np.diag(D_inv)
A_hat = np.matmul(D_inv, A_hat)
if out_file:
out_dm = [adj_full[0], adj_full[1], A_hat]
with open(out_file+'.pkl', 'wb') as f:
pickle.dump(out_dm, f, protocol=2)
return adj_full[0], adj_full[1], A_hat
def sort_dist_matrix(mat, row_col_names):
"""
sort the distance matrix by seg_id_nat
:return:
"""
df = pd.DataFrame(mat, columns=row_col_names, index=row_col_names)
df = df.sort_index(axis=0)
df = df.sort_index(axis=1)
sensor_id_to_ind = {}
for i, sensor_id in enumerate(df.columns):
sensor_id_to_ind[sensor_id] = i
return row_col_names, sensor_id_to_ind, df
#check = prep_adj_matrix('../../gits/river-dl/DRB_data/distance_matrix.npz', 'upstream', 'data/DRB_gwn_full/adj_mx')
#if __name__ == "__main__":
check2 = prep_data(obs_temper_file='../../gits/river-dl/DRB_data/obs_temp_full',
obs_flow_file='../../gits/river-dl/DRB_data/obs_flow_full',
pretrain_file='../../gits/river-dl/DRB_data/uncal_sntemp_input_output',
train_start_date=['1985-10-01', '2016-10-01'],
train_end_date=['2006-09-30', '2020-09-30'],
val_start_date='2006-10-01',
val_end_date='2016-09-30',
test_start_date=['1980-10-01', '2020-10-01'],
test_end_date=['1985-09-30', '2021-09-30'],
x_vars=["seg_rain", "seg_tave_air", "seginc_swrad", "seg_length", "seginc_potet", "seg_slope", "seg_humid",
"seg_elev"],
y_vars=['seg_tave_water'],
primary_variable='temp',
seq_length=365,
period=np.nan,
offset=1,
out_file = 'data/DRB_gwn_full')
'''f __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("--output_dir", type=str, default="data/METR-LA", help="Output directory.")
parser.add_argument("--traffic_df_filename", type=str, default="data/metr-la.h5", help="Raw traffic readings.",)
parser.add_argument("--seq_length_x", type=int, default=12, help="Sequence Length.",)
parser.add_argument("--seq_length_y", type=int, default=12, help="Sequence Length.",)
parser.add_argument("--y_start", type=int, default=1, help="Y pred start", )
parser.add_argument("--dow", action='store_true',)
args = parser.parse_args()
if os.path.exists(args.output_dir):
reply = str(input(f'{args.output_dir} exists. Do you want to overwrite it? (y/n)')).lower().strip()
if reply[0] != 'y': exit
else:
os.makedirs(args.output_dir)
generate_train_val_test(args)
##### Reformat our inputs to match theirs.
df = pd.read_hdf("data/metr-la.h5")
seq_length_x = 12
seq_length_y = 12
y_start = 1
LAtrain = np.load('data/METR-LA/train.npz')
LAtest = np.load('data/METR-LA/test.npz')
LAval = np.load('data/METR-LA/val.npz')
LAtrain['x'].shape
LAtrain['y'].shape
LAtest['x'].shape
LAtest['y'].shape
check = np.moveaxis(data['x_train'], [0,1,2,3], [0,2,1,3])
np.savez_compressed(os.path.join(out_file, 'pre_train.npz'),
x=data['x_train'],
y=data['y_pre_train'])
np.savez_compressed(os.path.join(out_file,'train.npz'),
x=data['x_train'],
y=data['y_pre_train'],
)
np.savez_compressed(os.path.join(out_file, 'test.npz'),
x=data['x_test'],
y=data['y_pre_test'],
)
np.savez_compressed(os.path.join(out_file,'val.npz'),
x=data['x_val'],
y=data['y_pre_val'],
)
''' | 2.421875 | 2 |
test/inference_correctness/dcn_multi_hot.py | x-y-z/HugeCTR | 130 | 4039 | import hugectr
from mpi4py import MPI
solver = hugectr.CreateSolver(model_name = "dcn",
max_eval_batches = 1,
batchsize_eval = 16384,
batchsize = 16384,
lr = 0.001,
vvgpu = [[0]],
repeat_dataset = True,
use_mixed_precision = False,
scaler = 1.0,
use_cuda_graph = True,
metrics_spec = {hugectr.MetricsType.AUC: 1.0})
reader = hugectr.DataReaderParams(data_reader_type = hugectr.DataReaderType_t.Norm,
source = ["./dcn_data/file_list.txt"],
eval_source = "./dcn_data/file_list_test.txt",
check_type = hugectr.Check_t.Sum,
num_workers = 16)
optimizer = hugectr.CreateOptimizer(optimizer_type = hugectr.Optimizer_t.Adam,
update_type = hugectr.Update_t.Global,
beta1 = 0.9,
beta2 = 0.999,
epsilon = 0.0001)
model = hugectr.Model(solver, reader, optimizer)
model.add(hugectr.Input(label_dim = 1, label_name = "label",
dense_dim = 13, dense_name = "dense",
data_reader_sparse_param_array =
[hugectr.DataReaderSparseParam("data1", 2, False, 26)]))
model.add(hugectr.SparseEmbedding(embedding_type = hugectr.Embedding_t.DistributedSlotSparseEmbeddingHash,
workspace_size_per_gpu_in_mb = 300,
embedding_vec_size = 16,
combiner = "sum",
sparse_embedding_name = "sparse_embedding1",
bottom_name = "data1",
optimizer = optimizer))
model.add(hugectr.DenseLayer(layer_type = hugectr.Layer_t.Reshape,
bottom_names = ["sparse_embedding1"],
top_names = ["reshape1"],
leading_dim=416))
model.add(hugectr.DenseLayer(layer_type = hugectr.Layer_t.Concat,
bottom_names = ["reshape1", "dense"], top_names = ["concat1"]))
model.add(hugectr.DenseLayer(layer_type = hugectr.Layer_t.Slice,
bottom_names = ["concat1"],
top_names = ["slice11", "slice12"],
ranges=[(0,429),(0,429)]))
model.add(hugectr.DenseLayer(layer_type = hugectr.Layer_t.MultiCross,
bottom_names = ["slice11"],
top_names = ["multicross1"],
num_layers=1))
model.add(hugectr.DenseLayer(layer_type = hugectr.Layer_t.InnerProduct,
bottom_names = ["slice12"],
top_names = ["fc1"],
num_output=1024))
model.add(hugectr.DenseLayer(layer_type = hugectr.Layer_t.ReLU,
bottom_names = ["fc1"],
top_names = ["relu1"]))
model.add(hugectr.DenseLayer(layer_type = hugectr.Layer_t.Dropout,
bottom_names = ["relu1"],
top_names = ["dropout1"],
dropout_rate=0.5))
model.add(hugectr.DenseLayer(layer_type = hugectr.Layer_t.InnerProduct,
bottom_names = ["dropout1"],
top_names = ["fc2"],
num_output=1024))
model.add(hugectr.DenseLayer(layer_type = hugectr.Layer_t.ReLU,
bottom_names = ["fc2"],
top_names = ["relu2"]))
model.add(hugectr.DenseLayer(layer_type = hugectr.Layer_t.Dropout,
bottom_names = ["relu2"],
top_names = ["dropout2"],
dropout_rate=0.5))
model.add(hugectr.DenseLayer(layer_type = hugectr.Layer_t.Concat,
bottom_names = ["dropout2", "multicross1"],
top_names = ["concat2"]))
model.add(hugectr.DenseLayer(layer_type = hugectr.Layer_t.InnerProduct,
bottom_names = ["concat2"],
top_names = ["fc3"],
num_output=1))
model.add(hugectr.DenseLayer(layer_type = hugectr.Layer_t.BinaryCrossEntropyLoss,
bottom_names = ["fc3", "label"],
top_names = ["loss"]))
model.compile()
model.summary()
model.graph_to_json(graph_config_file = "/dump_infer/dcn.json")
model.fit(max_iter = 2300, display = 200, eval_interval = 2000, snapshot = 2000, snapshot_prefix = "/dump_infer/dcn")
model.export_predictions("/dump_infer/dcn_pred_" + str(2000), "/dump_infer/dcn_label_" + str(2000))
from hugectr.inference import InferenceParams, CreateInferenceSession
import numpy as np
batch_size = 16384
num_batches = 1
data_source = "./dcn_data/file_list_test.txt"
inference_params = InferenceParams(model_name = "dcn",
max_batchsize = batch_size,
hit_rate_threshold = 1.0,
dense_model_file = "/dump_infer/dcn_dense_2000.model",
sparse_model_files = ["/dump_infer/dcn0_sparse_2000.model"],
device_id = 0,
use_gpu_embedding_cache = False,
cache_size_percentage = 1.0,
i64_input_key = False,
use_mixed_precision = False,
use_cuda_graph = True)
inference_session = CreateInferenceSession("/dump_infer/dcn.json", inference_params)
predictions = inference_session.predict(num_batches = num_batches,
source = data_source,
data_reader_type = hugectr.DataReaderType_t.Norm,
check_type = hugectr.Check_t.Sum)
grount_truth = np.loadtxt("/dump_infer/dcn_pred_2000")
diff = predictions-grount_truth
mse = np.mean(diff*diff)
if mse > 1e-3:
raise RuntimeError("Too large mse between DCN multi hot inference and training: {}".format(mse))
sys.exit(1)
else:
print("DCN multi hot inference results are consistent with those during training, mse: {}".format(mse)) | 1.429688 | 1 |
src/metarl/envs/dm_control/dm_control_env.py | neurips2020submission11699/metarl | 2 | 4047 | from dm_control import suite
from dm_control.rl.control import flatten_observation
from dm_env import StepType
import gym
import numpy as np
from metarl.envs import Step
from metarl.envs.dm_control.dm_control_viewer import DmControlViewer
class DmControlEnv(gym.Env):
"""
Binding for `dm_control <https://arxiv.org/pdf/1801.00690.pdf>`_
"""
def __init__(self, env, name=None):
self._name = name or type(env.task).__name__
self._env = env
self._viewer = None
@classmethod
def from_suite(cls, domain_name, task_name):
return cls(suite.load(domain_name, task_name),
name='{}.{}'.format(domain_name, task_name))
def step(self, action):
time_step = self._env.step(action)
return Step(
flatten_observation(time_step.observation)['observations'],
time_step.reward, time_step.step_type == StepType.LAST,
**time_step.observation)
def reset(self):
time_step = self._env.reset()
return flatten_observation(time_step.observation)['observations']
def render(self, mode='human'):
# pylint: disable=inconsistent-return-statements
if mode == 'human':
if not self._viewer:
title = 'dm_control {}'.format(self._name)
self._viewer = DmControlViewer(title=title)
self._viewer.launch(self._env)
self._viewer.render()
return None
elif mode == 'rgb_array':
return self._env.physics.render()
else:
raise NotImplementedError
def close(self):
if self._viewer:
self._viewer.close()
self._env.close()
self._viewer = None
self._env = None
def _flat_shape(self, observation):
return np.sum(int(np.prod(v.shape)) for k, v in observation.items())
@property
def action_space(self):
action_spec = self._env.action_spec()
if (len(action_spec.shape) == 1) and (-np.inf in action_spec.minimum or
np.inf in action_spec.maximum):
return gym.spaces.Discrete(np.prod(action_spec.shape))
else:
return gym.spaces.Box(action_spec.minimum,
action_spec.maximum,
dtype=np.float32)
@property
def observation_space(self):
flat_dim = self._flat_shape(self._env.observation_spec())
return gym.spaces.Box(low=-np.inf,
high=np.inf,
shape=[flat_dim],
dtype=np.float32)
def __getstate__(self):
d = self.__dict__.copy()
d['_viewer'] = None
return d
| 1.632813 | 2 |
Numbers/Roman Number Generator/tests.py | fossabot/IdeaBag2-Solutions | 10 | 4095 | #!/usr/bin/env python3
import unittest
from roman_number_generator import arabic_to_roman
class Test(unittest.TestCase):
def _start_arabic_to_roman(self):
self.assertRaises(ValueError, arabic_to_roman, 4000)
self.assertEqual(arabic_to_roman(4), "IV")
self.assertEqual(arabic_to_roman(12), "XII")
self.assertEqual(arabic_to_roman(20), "XX")
if __name__ == "__main__":
unittest.main()
| 1.578125 | 2 |
portfolio/urls.py | ramza007/Ramza.io | 3 | 4119 | from django.conf.urls import url
from django.urls import path, include,re_path
from . import views
from rest_framework.authtoken.views import obtain_auth_token
urlpatterns = [
path('', views.index, name='index'),
path('about', views.about, name='about'),
path('projects', views.projects, name='projects'),
path('photos', views.photos, name='photos'),
re_path(r'^api/projects/$', views.ProjectList.as_view()),
re_path(r'^api-token-auth/', obtain_auth_token),
re_path(r'api/project/project-id/(?P<pk>[0-9]+)/$', views.ProjectDescription.as_view()),
]
| 1.171875 | 1 |
frontend/config.py | lcbm/cs-data-ingestion | 0 | 4135 | """Flask App configuration file."""
import logging
import os
import dotenv
import frontend.constants as constants
dotenv.load_dotenv(os.path.join(constants.BASEDIR, "frontend.env"))
class Base:
"""Configuration class used as base for all environments."""
DEBUG = False
TESTING = False
LOGGING_FORMAT = "[%(asctime)s] %(levelname)s in %(message)s"
LOGGING_LOCATION = "frontend.log"
LOGGING_LEVEL = os.environ.get("LOGGING_LEVEL", logging.DEBUG)
class Development(Base):
"""Configuration class for development environment.
Parameters
----------
Base: base configuration object.
"""
DEBUG = True
TESTING = False
ENV = "dev"
class Staging(Base):
"""Configuration class for development staging environment.
Parameters
----------
Base: base configuration object.
"""
DEBUG = False
TESTING = True
ENV = "staging"
class Production(Base):
"""Configuration class for development production environment.
Parameters
----------
Base: base configuration object.
"""
DEBUG = False
TESTING = False
ENV = "prod"
config = {
"development": "frontend.config.Development",
"staging": "frontend.config.Staging",
"production": "frontend.config.Production",
"default": "frontend.config.Development",
}
def configure_app(app):
"""Configures the Flask app according to the FLASK_ENV
envar. In case FLASK_ENV is not defined, then use the
'default' configuration.
Parameters
----------
app: flask.Flask
Flask app Module.
"""
# Configure app
config_name = os.environ.get("FLASK_ENV", "default")
app.config.from_object(config[config_name])
# Configure logging
handler = logging.FileHandler(app.config["LOGGING_LOCATION"])
handler.setLevel(app.config["LOGGING_LEVEL"])
formatter = logging.Formatter(app.config["LOGGING_FORMAT"])
handler.setFormatter(formatter)
app.logger.addHandler(handler)
| 1.75 | 2 |
BigData/sparkTask/test.py | Rainstyd/rainsty | 1 | 4143 | #!/usr/bin/python
# -*- coding: utf-8 -*-
"""
@author: rainsty
@file: test.py
@time: 2020-01-04 18:36:57
@description:
"""
import os
from pyspark.sql import SparkSession
os.environ['JAVA_HOME'] = '/root/jdk'
os.environ['SPARK_HOME'] = '/root/spark'
os.environ['PYTHON_HOME'] = "/root/python"
os.environ['PYSPARK_PYTHON'] = "/usr/bin/python"
os.environ['SPARK_MASTER_IP'] = 'rainsty'
def create_spark_context():
sc = SparkSession.builder \
.appName("TestSparkSession") \
.master("spark://rainsty:7077") \
.config('spark.executor.num', '1')\
.config('spark.executor.memory', '512m')\
.config("spark.executor.cores", '1')\
.config('spark.cores.max', '1')\
.config('spark.driver.memory', '512m') \
.getOrCreate()
return sc
logFile = "/root/spark/README.md"
spark = create_spark_context()
logData = spark.read.text(logFile).cache()
numAs = logData.filter(logData.value.contains('a')).count()
numBs = logData.filter(logData.value.contains('b')).count()
print("Lines with a: %i, lines with b: %i" % (numAs, numBs))
spark.stop()
| 1.601563 | 2 |
copy_block_example.py | MilesCranmer/bifrost_paper | 0 | 4151 | from copy import deepcopy
import bifrost as bf
from bifrost.pipeline import TransformBlock
from bifrost.ndarray import copy_array
class CopyBlock(TransformBlock):# $\tikzmark{block-start}$
"""Copy the input ring to output ring"""
def __init__(self, iring, space):
super(CopyBlock, self).__init__(iring)
self.orings = [self.create_ring(space=space)]
def on_sequence(self, iseq):
return deepcopy(iseq.header)
def on_data(self, ispan, ospan):
copy_array(ospan.data, ispan.data)#$\tikzmark{block-end}$
def copy_block(iring, space):
return CopyBlock(iring, space)
bc = bf.BlockChainer()
bc.blocks.read_wav(['hey_jude.wav'], gulp_nframe=4096)
bc.custom(copy_block)(space='cuda')# $\tikzmark{gpu-start}$
bc.views.split_axis('time', 256, label='fine_time')
bc.blocks.fft(axes='fine_time', axis_labels='freq')
bc.blocks.detect(mode='scalar')
bc.blocks.transpose(['time', 'pol', 'freq'])#$\tikzmark{gpu-end}$
bc.blocks.copy(space='system')
bc.blocks.quantize('i8')
bc.blocks.write_sigproc()
pipeline = bf.get_default_pipeline()# $\tikzmark{pipeline-start}$
pipeline.shutdown_on_signals()
pipeline.run()#$\tikzmark{pipeline-end}$
| 1.742188 | 2 |
asr/dataloaders/am_dataloader.py | Z-yq/audioSamples.github.io | 1 | 4167 | import logging
import random
import numpy as np
import pypinyin
import tensorflow as tf
from augmentations.augments import Augmentation
from utils.speech_featurizers import SpeechFeaturizer
from utils.text_featurizers import TextFeaturizer
logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(name)s - %(levelname)s - %(message)s')
import time
class AM_DataLoader():
def __init__(self, config_dict, training=True):
self.speech_config = config_dict['speech_config']
self.phone_config = config_dict['inp_config']
self.text_config = config_dict['tar_config']
self.running_config=config_dict['running_config']
self.augment_config = config_dict['augments_config']
self.streaming = self.speech_config['streaming']
self.chunk = self.speech_config['sample_rate'] * self.speech_config['streaming_bucket']
self.batch = config_dict['running_config']['batch_size']
self.speech_featurizer = SpeechFeaturizer(self.speech_config)
self.phone_featurizer = TextFeaturizer(self.phone_config)
self.text_featurizer = TextFeaturizer(self.text_config)
self.make_file_list( training)
self.augment = Augmentation(self.augment_config)
self.init_text_to_vocab()
self.epochs = 1
self.steps = 0
def return_data_types(self):
return (tf.float32, tf.int32, tf.int32, tf.int32,tf.int32)
def return_data_shape(self):
return (
tf.TensorShape([self.batch, None, 1]),
tf.TensorShape([self.batch, ]),
tf.TensorShape([self.batch, None]),
tf.TensorShape([self.batch, ]),
tf.TensorShape([self.batch, None]),
)
def get_per_epoch_steps(self):
return len(self.train_list) // self.batch
def eval_per_epoch_steps(self):
return len(self.test_list) // self.batch
def init_text_to_vocab(self):
pypinyin.load_phrases_dict({'调大': [['tiáo'], ['dà']],
'调小': [['tiáo'], ['xiǎo']],
'调亮': [['tiáo'], ['liàng']],
'调暗': [['tiáo'], ['àn']],
'肖': [['xiāo']],
'英雄传': [['yīng'], ['xióng'], ['zhuàn']],
'新传': [['xīn'], ['zhuàn']],
'外传': [['wài'], ['zhuàn']],
'正传': [['zhèng'], ['zhuàn']], '水浒传': [['shuǐ'], ['hǔ'], ['zhuàn']]
})
def text_to_vocab_func(txt):
pins = pypinyin.pinyin(txt)
pins = [i[0] for i in pins]
phones = []
for pin in pins:
if pin in self.phone_featurizer.vocab_array:
phones += [pin]
else:
phones += list(pin)
# print(phones)
return phones
self.text_to_vocab = text_to_vocab_func
def make_file_list(self, training=True):
train_list=self.speech_config['train_list']
test_list=self.speech_config['eval_list']
if training:
with open(train_list, encoding='utf-8') as f:
train_list = f.readlines()
train_list = [i.strip() for i in train_list if i != '']
self.train_list = train_list
np.random.shuffle(self.train_list)
with open(test_list, encoding='utf-8') as f:
data = f.readlines()
data = [i.strip() for i in data if i != '']
self.test_list = data
self.train_offset = 0
self.test_offset = 0
logging.info('load train list {} test list {}'.format(len(self.train_list), len(self.test_list)))
else:
with open(test_list, encoding='utf-8') as f:
data = f.readlines()
data = [i.strip() for i in data if i != '']
self.test_list = data
self.test_offset = 0
def only_chinese(self, word):
txt = ''
for ch in word:
if '\u4e00' <= ch <= '\u9fff':
txt += ch
else:
continue
return txt
def eval_data_generator(self):
sample = []
speech_features = []
input_length = []
phones = []
phones_length = []
txts = []
max_input = 0
batch = self.batch
for i in range(batch * 10):
line = self.test_list[self.test_offset]
self.test_offset += 1
if self.test_offset > len(self.test_list) - 1:
self.test_offset = 0
wp, txt = line.strip().split('\t')
try:
data = self.speech_featurizer.load_wav(wp)
except:
logging.info('{} load data failed,skip'.format(wp))
continue
if len(data) < 400:
continue
elif len(data) > self.speech_featurizer.sample_rate * self.speech_config['wav_max_duration']:
logging.info(
'{} duration out of wav_max_duration({}),skip'.format(wp, self.speech_config['wav_max_duration']))
continue
if self.speech_config['only_chinese']:
txt = self.only_chinese(txt)
if not self.streaming:
speech_feature = data / np.abs(data).max()
speech_feature = np.expand_dims(speech_feature, -1)
in_len = len(speech_feature) // (
self.speech_config['reduction_factor'] * (self.speech_featurizer.sample_rate / 1000) *
self.speech_config['stride_ms'])
else:
speech_feature = data
speech_feature = np.expand_dims(speech_feature, -1)
reduce = self.speech_config['reduction_factor'] * (self.speech_featurizer.sample_rate / 1000) * \
self.speech_config['stride_ms']
in_len = len(speech_feature) // self.chunk
if len(speech_feature) % self.chunk != 0:
in_len += 1
chunk_times = self.chunk // reduce
if self.chunk % reduce != 0:
chunk_times += 1
in_len *= chunk_times
py = self.text_to_vocab(txt)
if self.check_valid(py, self.phone_featurizer.vocab_array) is not True:
logging.info(' {} txt phone {} not all in tokens,continue'.format(txt, self.check_valid(py,
self.phone_featurizer.vocab_array)))
continue
if self.check_valid(txt, self.text_featurizer.vocab_array) is not True:
logging.info(' {} txt phone {} not all in tokens,continue'.format(txt, self.check_valid(py,
self.text_featurizer.vocab_array)))
continue
txt = list(txt)
phone_feature = self.phone_featurizer.extract(py)
text_feature = self.text_featurizer.extract(txt)+[self.text_featurizer.endid()]
if in_len < len(phone_feature):
logging.info('{} feature length < phone length,continue'.format(wp))
continue
max_input = max(max_input, len(speech_feature))
speech_features.append(speech_feature)
input_length.append(in_len)
phones.append(np.array(phone_feature))
txts.append(np.array(text_feature))
phones_length.append(len(phone_feature))
sample.append(line)
if len(sample) == batch:
break
if self.streaming:
max_input = max_input // self.chunk * self.chunk + self.chunk
speech_features = self.speech_featurizer.pad_signal(speech_features, max_input)
if self.streaming:
reduce = self.speech_config['reduction_factor'] * (self.speech_featurizer.sample_rate / 1000) * \
self.speech_config['stride_ms']
max_input = max_input // self.chunk * self.chunk + self.chunk
max_in_len = max_input // self.chunk
chunk_times = self.chunk // reduce
if self.chunk % reduce != 0:
chunk_times += 1
max_in_len *= chunk_times
input_length = np.clip(input_length, 0, max_in_len)
speech_features = self.speech_featurizer.pad_signal(speech_features, max_input)
phones = tf.keras.preprocessing.sequence.pad_sequences(phones, maxlen=max([len(i) for i in phones]),
padding='post', value=self.phone_featurizer.pad)
txts = tf.keras.preprocessing.sequence.pad_sequences(txts, maxlen=max([len(i) for i in txts]), padding='post',
value=self.text_featurizer.pad)
x = np.array(speech_features, 'float32')
phones = np.array(phones, 'int32')
txts = np.array(txts, 'int32')
input_length = np.array(input_length, 'int32')
phones_length = np.array(phones_length, 'int32')
return x, input_length, phones, phones_length, txts
def check_valid(self, txt, vocab_list):
if len(txt) == 0:
return False
for n in txt:
if n in vocab_list:
pass
else:
return n
return True
def generate(self, train=True):
sample = []
speech_features = []
input_length = []
phones = []
phones_length = []
txts = []
max_input = 0
if train:
batch = self.batch * 3 // 4 if self.augment.available() else self.batch
else:
batch = self.batch
for i in range(batch * 10):
if train:
line = self.train_list[self.train_offset]
self.train_offset += 1
if self.train_offset > len(self.train_list) - 1:
self.train_offset = 0
np.random.shuffle(self.train_list)
self.epochs += 1
else:
line = self.test_list[self.test_offset]
self.test_offset += 1
if self.test_offset > len(self.test_list) - 1:
self.test_offset = 0
wp, txt = line.strip().split('\t')
try:
data = self.speech_featurizer.load_wav(wp)
except:
logging.info('{} load data failed,skip'.format(wp))
continue
if len(data) < 400:
continue
elif len(data) > self.speech_featurizer.sample_rate * self.speech_config['wav_max_duration']:
logging.info(
'{} duration out of wav_max_duration({}),skip'.format(wp, self.speech_config['wav_max_duration']))
continue
if self.speech_config['only_chinese']:
txt = self.only_chinese(txt)
if not self.streaming:
speech_feature = data / np.abs(data).max()
speech_feature = np.expand_dims(speech_feature, -1)
in_len = len(speech_feature) // (
self.speech_config['reduction_factor'] * (self.speech_featurizer.sample_rate / 1000) *
self.speech_config['stride_ms'])
else:
speech_feature = data
speech_feature = np.expand_dims(speech_feature, -1)
reduce = self.speech_config['reduction_factor'] * (self.speech_featurizer.sample_rate / 1000) * \
self.speech_config['stride_ms']
in_len = len(speech_feature) // self.chunk
if len(speech_feature) % self.chunk != 0:
in_len += 1
chunk_times = self.chunk // reduce
if self.chunk % reduce != 0:
chunk_times += 1
in_len *= chunk_times
py = self.text_to_vocab(txt)
if self.check_valid(py, self.phone_featurizer.vocab_array) is not True:
logging.info(' {} txt phone {} not all in tokens,continue'.format(txt, self.check_valid(py,
self.phone_featurizer.vocab_array)))
continue
if self.check_valid(txt, self.text_featurizer.vocab_array) is not True:
logging.info(' {} txt {} not all in tokens,continue'.format(txt, self.check_valid(txt,
self.text_featurizer.vocab_array)))
continue
txt = list(txt)
phone_feature = self.phone_featurizer.extract(py)
text_feature = self.text_featurizer.extract(txt)+[self.text_featurizer.endid()]
if in_len < len(phone_feature):
logging.info('{} feature length < phone length,continue'.format(wp))
continue
max_input = max(max_input, len(speech_feature))
speech_features.append(speech_feature)
input_length.append(in_len)
phones.append(np.array(phone_feature))
txts.append(np.array(text_feature))
phones_length.append(len(phone_feature))
sample.append(line)
if len(sample) == batch:
break
if train and self.augment.available():
sample = random.sample(sample, self.batch // 4)
for i in sample:
wp, txt = i.strip().split('\t')
try:
data = self.speech_featurizer.load_wav(wp)
except:
continue
if len(data) < 400:
logging.info('{} wav too short < 25ms,skip'.format(wp))
continue
elif len(data) > self.speech_featurizer.sample_rate * self.speech_config['wav_max_duration']:
continue
data = self.augment.process(data)
if self.speech_config['only_chinese']:
txt = self.only_chinese(txt)
if not self.streaming:
speech_feature = data / np.abs(data).max()
speech_feature = np.expand_dims(speech_feature, -1)
in_len = len(speech_feature) // (
self.speech_config['reduction_factor'] * (self.speech_featurizer.sample_rate / 1000) *
self.speech_config['stride_ms'])
else:
speech_feature = data
speech_feature = np.expand_dims(speech_feature, -1)
reduce = self.speech_config['reduction_factor'] * (self.speech_featurizer.sample_rate / 1000) * \
self.speech_config['stride_ms']
in_len = len(speech_feature) // self.chunk
if len(speech_feature) % self.chunk != 0:
in_len += 1
chunk_times = self.chunk // reduce
if self.chunk % reduce != 0:
chunk_times += 1
in_len *= chunk_times
py = self.text_to_vocab(txt)
if self.check_valid(py, self.phone_featurizer.vocab_array) is not True:
logging.info(' {} txt phone {} not all in tokens,continue'.format(txt, self.check_valid(py,
self.phone_featurizer.vocab_array)))
continue
if self.check_valid(txt, self.text_featurizer.vocab_array) is not True:
logging.info(' {} txt {} not all in tokens,continue'.format(txt, self.check_valid(txt,
self.text_featurizer.vocab_array)))
continue
txt = list(txt)
phone_feature = self.phone_featurizer.extract(py)
text_feature = self.text_featurizer.extract(txt)+[self.text_featurizer.endid()]
if in_len < len(phone_feature):
logging.info('{} feature length < phone length,continue'.format(wp))
continue
max_input = max(max_input, len(speech_feature))
speech_features.append(speech_feature)
input_length.append(in_len)
phones.append(np.array(phone_feature))
txts.append(np.array(text_feature))
phones_length.append(len(phone_feature))
if self.streaming:
reduce = self.speech_config['reduction_factor'] * (self.speech_featurizer.sample_rate / 1000) * \
self.speech_config['stride_ms']
max_input = max_input // self.chunk * self.chunk + self.chunk
max_in_len = max_input // self.chunk
chunk_times = self.chunk // reduce
if self.chunk % reduce != 0:
chunk_times += 1
max_in_len *= chunk_times
input_length = np.clip(input_length, 0, max_in_len)
speech_features = self.speech_featurizer.pad_signal(speech_features, max_input)
phones=tf.keras.preprocessing.sequence.pad_sequences(phones,maxlen=max([len(i) for i in phones]),padding='post',value=self.phone_featurizer.pad)
txts=tf.keras.preprocessing.sequence.pad_sequences(txts,maxlen=max([len(i) for i in txts]),padding='post',value=self.text_featurizer.pad)
x = np.array(speech_features, 'float32')
phones = np.array(phones, 'int32')
txts = np.array(txts, 'int32')
input_length = np.array(input_length, 'int32')
phones_length = np.array(phones_length, 'int32')
return x, input_length, phones, phones_length,txts
def generator(self, train=True):
while 1:
s=time.time()
x, input_length, phones, phones_length,txts = self.generate(train)
e=time.time()
logging.info('load data cost time: {}'.format(e-s))
if x.shape[0] == 0:
logging.info('load data length zero,continue')
continue
yield x, input_length, phones, phones_length,txts
| 1.734375 | 2 |
projects/scocen/cmd_components_simple.py | mikeireland/chronostar | 4 | 4199 | """
Plot CMDs for each component.
"""
import numpy as np
from astropy.table import Table
import matplotlib.pyplot as plt
import matplotlib.cm as cm
plt.ion()
# Pretty plots
from fig_settings import *
############################################
# Some things are the same for all the plotting scripts and we put
# this into a single library to avoid confusion.
import scocenlib as lib
data_filename = lib.data_filename
comps_filename = lib.comps_filename
compnames = lib.compnames
colors = lib.colors
############################################
# Minimal probability required for membership
pmin_membership = 0.5
############################################
# how to split subplots
grid = [5, 5]
# CMD limits
xlim = [-1, 5]
ylim = [17, -3]
############################################
# Read data
try:
tab = tab0
comps = comps0
except:
tab0 = Table.read(data_filename)
Gmag = tab0['phot_g_mean_mag'] - 5 * np.log10(1.0 / (tab0['parallax'] * 1e-3) / 10) # tab['parallax'] in micro arcsec
tab0['Gmag'] = Gmag
comps0 = Table.read(comps_filename)
tab = tab0
comps = comps0
# Main sequence parametrization
# fitpar for pmag, rpmag
fitpar = [0.17954163, -2.48748376, 12.9279348, -31.35434182, 38.31330583, -12.25864507]
poly = np.poly1d(fitpar)
x = np.linspace(1, 4, 100)
y = poly(x)
m = y > 4
yms = y[m]
xms = x[m]
def plot_MS_parametrisation_and_spectral_types(ax, xlim, ylim):
ax.plot(xms, yms, c='brown', label='Median main sequence', linewidth=1)
ax.plot(xms, yms - 1, c='brown', label='1 mag above the median', linewidth=1, linestyle='--')
ax.plot(xms, yms - 1.5, c='brown', label='1.5 mag above the median', linewidth=1, linestyle='--')
ax.axvline(x=0.369, linewidth=0.5, color='k') # F
ax.axvline(x=0.767, linewidth=0.5, color='k') # G
ax.axvline(x=0.979, linewidth=0.5, color='k') # K
ax.axvline(x=1.848, linewidth=0.5, color='k') # M
ax.set_xlim(xlim[0], xlim[1])
ax.set_ylim(ylim[0], ylim[1])
return ax
print('Plotting %d components.'%len(comps))
fig=plt.figure()
for i, c in enumerate(comps):
ax = fig.add_subplot(grid[0], grid[1], i+1) # TODO: adjust this if needed
comp_ID = c['comp_ID']
col=tab['membership%s'%comp_ID]
mask = col > pmin_membership
t=tab[mask]
if len(t)>100:
alpha=0.5
else:
alpha=1
t.sort('membership%s'%comp_ID)
#~ t.reverse()
#~ ax.scatter(t['bp_rp'], t['Gmag'], s=1, c='k', alpha=alpha)
ax.scatter(t['bp_rp'], t['Gmag'], s=1, c=t['membership%s'%comp_ID], alpha=1, vmin=0.5, vmax=1, cmap=cm.jet)
ax=plot_MS_parametrisation_and_spectral_types(ax, xlim, ylim)
age=c['Age']
ax.set_title('%s (%.2f$\pm$%.2f Myr %s) %d'%(comp_ID, age, c['Crossing_time'], c['Age_reliable'], len(t)))
#~ plt.tight_layout()
plt.show()
| 2.046875 | 2 |
tests/test_handler.py | CJSoldier/webssh | 13 | 4247 | import unittest
import paramiko
from tornado.httputil import HTTPServerRequest
from tests.utils import read_file, make_tests_data_path
from webssh.handler import MixinHandler, IndexHandler, InvalidValueError
class TestMixinHandler(unittest.TestCase):
def test_get_real_client_addr(self):
handler = MixinHandler()
handler.request = HTTPServerRequest(uri='/')
self.assertIsNone(handler.get_real_client_addr())
ip = '127.0.0.1'
handler.request.headers.add('X-Real-Ip', ip)
self.assertEqual(handler.get_real_client_addr(), False)
handler.request.headers.add('X-Real-Port', '12345x')
self.assertEqual(handler.get_real_client_addr(), False)
handler.request.headers.update({'X-Real-Port': '12345'})
self.assertEqual(handler.get_real_client_addr(), (ip, 12345))
handler.request.headers.update({'X-Real-ip': None})
self.assertEqual(handler.get_real_client_addr(), False)
handler.request.headers.update({'X-Real-Port': '12345x'})
self.assertEqual(handler.get_real_client_addr(), False)
class TestIndexHandler(unittest.TestCase):
def test_get_specific_pkey_with_plain_key(self):
fname = 'test_rsa.key'
cls = paramiko.RSAKey
key = read_file(make_tests_data_path(fname))
pkey = IndexHandler.get_specific_pkey(cls, key, None)
self.assertIsInstance(pkey, cls)
pkey = IndexHandler.get_specific_pkey(cls, key, 'iginored')
self.assertIsInstance(pkey, cls)
pkey = IndexHandler.get_specific_pkey(cls, 'x'+key, None)
self.assertIsNone(pkey)
def test_get_specific_pkey_with_encrypted_key(self):
fname = 'test_rsa_password.key'
cls = paramiko.RSAKey
password = '<PASSWORD>'
key = read_file(make_tests_data_path(fname))
pkey = IndexHandler.get_specific_pkey(cls, key, password)
self.assertIsInstance(pkey, cls)
pkey = IndexHandler.get_specific_pkey(cls, 'x'+key, None)
self.assertIsNone(pkey)
with self.assertRaises(paramiko.PasswordRequiredException):
pkey = IndexHandler.get_specific_pkey(cls, key, None)
def test_get_pkey_obj_with_plain_key(self):
fname = 'test_ed25519.key'
cls = paramiko.Ed25519Key
key = read_file(make_tests_data_path(fname))
pkey = IndexHandler.get_pkey_obj(key, None, fname)
self.assertIsInstance(pkey, cls)
pkey = IndexHandler.get_pkey_obj(key, 'iginored', fname)
self.assertIsInstance(pkey, cls)
with self.assertRaises(InvalidValueError) as exc:
pkey = IndexHandler.get_pkey_obj('x'+key, None, fname)
self.assertIn('Invalid private key', str(exc))
def test_get_pkey_obj_with_encrypted_key(self):
fname = 'test_ed25519_password.key'
password = '<PASSWORD>'
cls = paramiko.Ed25519Key
key = read_file(make_tests_data_path(fname))
pkey = IndexHandler.get_pkey_obj(key, password, fname)
self.assertIsInstance(pkey, cls)
with self.assertRaises(InvalidValueError) as exc:
pkey = IndexHandler.get_pkey_obj(key, 'wrongpass', fname)
self.assertIn('Wrong password', str(exc))
with self.assertRaises(InvalidValueError) as exc:
pkey = IndexHandler.get_pkey_obj('x'+key, password, fname)
self.assertIn('Invalid private key', str(exc))
with self.assertRaises(paramiko.PasswordRequiredException):
pkey = IndexHandler.get_pkey_obj(key, '', fname)
| 1.632813 | 2 |
fedora_college/modules/content/views.py | fedora-infra/fedora-college | 2 | 4255 | # -*- coding: utf-8 -*-
import re
from unicodedata import normalize
from flask import Blueprint, render_template, current_app
from flask import redirect, url_for, g, abort
from sqlalchemy import desc
from fedora_college.core.database import db
from fedora_college.modules.content.forms import * # noqa
from fedora_college.core.models import * # noqa
from fedora_college.fedmsgshim import publish
from flask_fas_openid import fas_login_required
bundle = Blueprint('content', __name__, template_folder='templates')
from fedora_college.modules.content.media import * # noqa
_punct_re = re.compile(r'[\t !"#$%&\'()*\-/<=>?@\[\\\]^_`{|},.]+')
# Verify if user is authenticated
def authenticated():
return hasattr(g, 'fas_user') and g.fas_user
# generate url slug
def slugify(text, delim=u'-'):
"""Generates an slightly worse ASCII-only slug."""
result = []
for word in _punct_re.split(text.lower()):
word = normalize('NFKD', word).encode('ascii', 'ignore')
if word:
result.append(word)
return unicode(delim.join(result))
# attach tags to a content entry
def attach_tags(tags, content):
rem = TagsMap.query.filter_by(content_id=content.content_id).all()
for r in rem:
db.session.delete(r)
db.session.commit()
for tag in tags:
tag_db = Tags.query.filter_by(tag_text=tag).first()
if tag_db is None:
tag_db = Tags(tag)
db.session.add(tag_db)
db.session.commit()
Map = TagsMap(tag_db.tag_id, content.content_id)
db.session.add(Map)
db.session.commit()
# delete content
@bundle.route('/content/delete/<posturl>', methods=['GET', 'POST'])
@bundle.route('/content/delete/<posturl>/', methods=['GET', 'POST'])
@fas_login_required
def delete_content(posturl=None):
if posturl is not None:
db.session.rollback()
content = Content.query.filter_by(slug=posturl).first_or_404()
rem = TagsMap.query.filter_by(
content_id=content.content_id).all()
'''delete mapped tags'''
for r in rem:
db.session.delete(r)
comments = Comments.query.filter_by(
content_id=content.content_id).all()
'''delete comments with foriegn keys'''
for r in comments:
db.session.delete(r)
db.session.delete(content)
db.session.commit()
return redirect(url_for('profile.user',
nickname=g.fas_user['username']))
abort(404)
# add / edit more content
@bundle.route('/content/add/', methods=['GET', 'POST'])
@bundle.route('/content/add', methods=['GET', 'POST'])
@bundle.route('/content/edit/<posturl>/', methods=['GET', 'POST'])
@bundle.route('/content/edit/<posturl>', methods=['GET', 'POST'])
@fas_login_required
def addcontent(posturl=None):
if authenticated():
form = CreateContent()
form_action = url_for('content.addcontent')
media = Media.query.order_by(desc(Media.timestamp)).limit(10).all()
if posturl is not None:
content = Content.query.filter_by(slug=posturl).first_or_404()
form = CreateContent(obj=content)
if form.validate_on_submit():
form.populate_obj(content)
tags = str(form.tags.data).split(',')
attach_tags(tags, content)
content.rehtml()
db.session.commit()
'''Publish the message'''
msg = content.getdata()
msg['title'] = content.title
msg['link'] = current_app.config[
'EXTERNAL_URL'] + content.slug
publish(
topic=current_app.config['CONTENT_EDIT_TOPIC'],
msg=msg
)
if content.type_content == "blog":
print url_for('content.blog', slug=posturl)
return redirect(url_for('content.blog', slug=posturl))
return redirect(url_for('home.content', slug=posturl))
else:
if form.validate_on_submit():
url_name = slugify(form.title.data)
content = Content(form.title.data,
url_name,
form.description.data,
form.active.data,
form.tags.data,
g.fas_user['username'],
form.type_content.data
)
tags = str(form.tags.data).split(',')
try:
db.session.add(content)
db.session.commit()
attach_tags(tags, content)
'''Publish the message'''
msg = content.getdata()
msg['title'] = content.title
msg['link'] = current_app.config[
'EXTERNAL_URL'] + url_name
publish(
topic=current_app.config['CONTENT_CREATE_TOPIC'],
msg=msg
)
if content.type_content == "blog":
return redirect(url_for('content.blog', slug=posturl))
return redirect(url_for('home.content', slug=url_name))
# Duplicate entry
except Exception as e:
return str(e)
db.session.rollback()
pass
tags = Tags.query.all()
return render_template('content/edit_content.html', form=form,
form_action=form_action, title="Create Content",
media=media[0:5], tags=tags)
abort(404)
# View Blog post
@bundle.route('/blog', methods=['GET', 'POST'])
@bundle.route('/blog/', methods=['GET', 'POST'])
@bundle.route('/blog/<slug>/', methods=['GET', 'POST'])
@bundle.route('/blog/<slug>', methods=['GET', 'POST'])
@bundle.route('/blog/page/<id>', methods=['GET', 'POST'])
@bundle.route('/blog/page/<id>', methods=['GET', 'POST'])
def blog(slug=None, id=0):
id = int(id)
screen = Content.query. \
filter_by(
type_content="lecture",
active=True
).limit(10).all()
if slug is not None:
try:
posts = Content.query. \
filter_by(slug=slug).all()
except:
posts = "No such posts in database."
else:
try:
posts = Content.query. \
filter_by(type_content="blog").all()
if id > 0:
posts = posts[id - 1:id + 5]
else:
posts = posts[0:5]
except:
posts = []
return render_template('blog/index.html',
title='Blog',
content=posts,
screen=screen,
id=id,
slug=slug
)
| 1.453125 | 1 |
jaxformer/hf/sample.py | salesforce/CodeGen | 105 | 4263 | # Copyright (c) 2022, salesforce.com, inc.
# All rights reserved.
# SPDX-License-Identifier: BSD-3-Clause
# For full license text, see the LICENSE file in the repo root or https://opensource.org/licenses/BSD-3-Clause
import os
import re
import time
import random
import argparse
import torch
from transformers import GPT2TokenizerFast
from jaxformer.hf.codegen.modeling_codegen import CodeGenForCausalLM
########################################################################
# util
class print_time:
def __init__(self, desc):
self.desc = desc
def __enter__(self):
print(self.desc)
self.t = time.time()
def __exit__(self, type, value, traceback):
print(f'{self.desc} took {time.time()-self.t:.02f}s')
def set_env():
os.environ['TOKENIZERS_PARALLELISM'] = 'false'
def set_seed(seed, deterministic=True):
random.seed(seed)
os.environ['PYTHONHASHSEED'] = str(seed)
torch.manual_seed(seed)
if torch.cuda.is_available():
torch.cuda.manual_seed(seed)
torch.backends.cudnn.deterministic = deterministic
torch.backends.cudnn.benchmark = not deterministic
# torch.use_deterministic_algorithms(deterministic)
def cast(model, fp16=True):
if fp16:
model.half()
return model
########################################################################
# model
def create_model(ckpt, fp16=True):
if fp16:
return CodeGenForCausalLM.from_pretrained(ckpt, revision='float16', torch_dtype=torch.float16, low_cpu_mem_usage=True)
else:
return CodeGenForCausalLM.from_pretrained(ckpt)
def create_tokenizer():
t = GPT2TokenizerFast.from_pretrained('gpt2')
t.max_model_input_sizes['gpt2'] = 1e20
return t
def include_whitespace(t, n_min=2, n_max=20, as_special_tokens=False):
t.add_tokens([' ' * n for n in reversed(range(n_min, n_max))], special_tokens=as_special_tokens)
return t
def include_tabs(t, n_min=2, n_max=20, as_special_tokens=False):
t.add_tokens(['\t' * n for n in reversed(range(n_min, n_max))], special_tokens=as_special_tokens)
return t
def create_custom_gpt2_tokenizer():
t = create_tokenizer()
t = include_whitespace(t=t, n_min=2, n_max=32, as_special_tokens=False)
t = include_tabs(t=t, n_min=2, n_max=10, as_special_tokens=False)
return t
########################################################################
# sample
def sample(
device,
model,
tokenizer,
context,
pad_token_id,
num_return_sequences=1,
temp=0.2,
top_p=0.95,
max_length_sample=128,
max_length=2048
):
input_ids = tokenizer(
context,
truncation=True,
padding=True,
max_length=max_length,
return_tensors='pt',
).input_ids
input_ids_len = input_ids.shape[1]
assert input_ids_len < max_length
with torch.no_grad():
input_ids = input_ids.to(device)
tokens = model.generate(
input_ids,
do_sample=True,
num_return_sequences=num_return_sequences,
temperature=temp,
max_length=input_ids_len + max_length_sample,
top_p=top_p,
pad_token_id=pad_token_id,
use_cache=True,
)
text = tokenizer.batch_decode(tokens[:, input_ids_len:, ...])
return text
def truncate(completion):
def find_re(string, pattern, start_pos):
m = pattern.search(string, start_pos)
return m.start() if m else -1
terminals = [
re.compile(r, re.MULTILINE)
for r in
[
'^#',
re.escape('<|endoftext|>'),
"^'''",
'^"""',
'\n\n\n'
]
]
prints = list(re.finditer('^print', completion, re.MULTILINE))
if len(prints) > 1:
completion = completion[:prints[1].start()]
defs = list(re.finditer('^def', completion, re.MULTILINE))
if len(defs) > 1:
completion = completion[:defs[1].start()]
start_pos = 0
terminals_pos = [pos for pos in [find_re(completion, terminal, start_pos) for terminal in terminals] if pos != -1]
if len(terminals_pos) > 0:
return completion[:min(terminals_pos)]
else:
return completion
def test_truncate():
assert truncate('\nif len_a > len_b:\n result = a\nelse:\n result = b\n\n\n\n#') == '\nif len_a > len_b:\n result = a\nelse:\n result = b'
########################################################################
# main
def main():
# (0) constants
models_nl = ['codegen-350M-nl', 'codegen-2B-nl', 'codegen-6B-nl', 'codegen-16B-nl']
models_pl = ['codegen-350M-multi', 'codegen-2B-multi', 'codegen-6B-multi', 'codegen-16B-multi', 'codegen-350M-mono', 'codegen-2B-mono', 'codegen-6B-mono', 'codegen-16B-mono']
models = models_nl + models_pl
# (1) params
parser = argparse.ArgumentParser()
parser.add_argument('--model', type=str, choices=models, default='codegen-350M-mono')
parser.add_argument('--device', type=str, default='cuda:0')
parser.add_argument('--rng-seed', type=int, default=42)
parser.add_argument('--rng-deterministic', type=bool, default=True)
parser.add_argument('--p', type=float, default=0.95)
parser.add_argument('--t', type=float, default=0.2)
parser.add_argument('--max-length', type=int, default=128)
parser.add_argument('--batch-size', type=int, default=1)
parser.add_argument('--no-fp16', action="store_false")
parser.add_argument('--pad', type=int, default=50256)
parser.add_argument('--context', type=str, default='def helloworld():')
args = parser.parse_args()
# (2) preamble
set_env()
set_seed(args.rng_seed, deterministic=args.rng_deterministic)
device = torch.device(args.device)
if device.type == "cpu":
args.no_fp16 = False
if args.model.startswith("codegen-16B"):
args.no_fp16 = True
ckpt = f'./checkpoints/{args.model}'
# (3) load
with print_time('loading parameters'):
model = create_model(ckpt=ckpt, fp16=args.no_fp16).to(device)
with print_time('loading tokenizer'):
if args.model in models_pl:
tokenizer = create_custom_gpt2_tokenizer()
else:
tokenizer = create_tokenizer()
tokenizer.padding_side = 'left'
tokenizer.pad_token = args.pad
# (4) sample
with print_time('sampling'):
completion = sample(device=device, model=model, tokenizer=tokenizer, context=args.context, pad_token_id=args.pad, num_return_sequences=args.batch_size, temp=args.t, top_p=args.p, max_length_sample=args.max_length)[0]
truncation = truncate(completion)
print('=' * 100)
print(completion)
print('=' * 100)
print(args.context+truncation)
print('=' * 100)
if __name__ == '__main__':
test_truncate()
main()
print('done.')
| 1.554688 | 2 |
Chibrary/utils.py | chiro2001/chibrary | 0 | 4271 | import json
import re
from flask import request, abort, jsonify
from Chibrary import config
from Chibrary.config import logger
from Chibrary.exceptions import *
from functools import wraps
from urllib import parse
from Chibrary.server import db
def parse_url_query(url: str) -> dict:
if not url.lower().startswith('http://') \
and not url.lower().startswith('https://'):
return {}
query = url[url.rindex('/') + 1:]
if '?' not in query:
return {}
query = query[query.index('?') + 1:]
lines = query.split('&')
result = {}
for line in lines:
if line.count('=') != 1:
continue
key, val = line.split('=')
# 注意这里的类型转化处理
if val == 'undefined':
val = None
else:
try:
val = int(val)
except ValueError:
try:
val = float(val)
except ValueError:
pass
if val is not None:
if type(val) is str:
result[key] = parse.unquote(val)
else:
result[key] = val
return result
def form_url_query(url: str, data: dict):
# if not url.lower().startswith('http://') \
# and not url.lower().startswith('https://'):
# logger.warning('Provided wrong url %s !' % url)
# return url
# if len(data) == 0:
# return url
# query = '?'
# for key in data:
# # 特事特办(?)
# if type(data[key]) is str and '/' in data[key]:
# query = query + parse.urlencode({key: data[key]}) + '&'
# else:
# query = query + key + '=' + parse.quote(str(data[key])) + '&'
# query = query[:-1]
# return url + query
# 这里是+和%20的坑
return url + '?' + parse.urlencode(data).replace('+', '%20')
def remove_ids_dfs(data: dict):
if '_id' in data:
del data['_id']
for key in data:
if type(data[key]) is dict:
data[key] = remove_ids_dfs(data[key])
return data
"""
返回值格式:
{
code: ...,
message: ...,
data: ...,
}
"""
def make_result(code: int, message=None, data=None):
result = {
'code': code,
}
# 根据code选message
if message is None:
try:
result['message'] = config.code[str(code)]
except ValueError:
logger.warning('Error code %s not found!' % code)
result['message'] = config.code['0']
else:
result['message'] = message
if data is not None:
# 一定要删除所有_id元素
data = remove_ids_dfs(data)
result['data'] = data
return result
def make_error_result(error):
return make_result(1, message=str(error))
def dump(data):
return json.dumps(data)
def check_args(args: dict, requirements: list):
for r in requirements:
if r not in args:
return False
return True
def format_file_size(size_by_bytes: int) -> str:
units = ['B', 'KB', 'MB', 'GB', 'TB']
# 最终数值应该在1~999之间
index = 0
unit = units[index]
while size_by_bytes > 1000:
index = index + 1
unit = units[index]
size_by_bytes = size_by_bytes / 1000
if index == len(units):
break
if size_by_bytes > 20:
return "%.0f%s" % (size_by_bytes, unit)
return "%.2f%s" % (size_by_bytes, unit)
# 用户在header里面加上Authorization: {token}
def login_check(f):
@wraps(f)
def decorated(*args, **kwargs):
headers = dict(request.headers)
if 'Authorization' not in headers:
return make_result(3) # login error
token = headers['Authorization']
if db.token_find_by_token(token) is None:
return make_result(3) # login error
return f(*args, **kwargs)
return decorated
# 用户在header里面加上Authorization: {token}
def admin_check(f):
@wraps(f)
def decorated(*args, **kwargs):
headers = dict(request.headers)
if 'Authorization' not in headers:
return make_result(3) # login error
token = headers['Authorization']
token_data = db.token_find_by_token(token)
if token_data is None:
return make_result(3) # login error
# 用户level大于等于10表示有管理员效力
user = db.user_find(username=token_data['username'])
if user is None:
return make_result(3) # login error,不会有效
if user['info']['level'] < 10:
return make_result(10) # No permission
return f(*args, **kwargs)
return decorated
# 必须在request过程中调用,获取不到直接打断
def get_user_from_headers():
headers = dict(request.headers)
if 'Authorization' not in headers:
abort(jsonify(make_result(3))) # login error
token = headers['Authorization']
token_data = db.token_find_by_token(token)
if token_data is None:
abort(jsonify(make_result(3))) # login error
# 用户level大于等于10表示有管理员效力
user = db.user_find(username=token_data['username'])
if user is None:
abort(jsonify(make_result(3))) # login error,不会有效
return user
def check_admin_abort():
headers = dict(request.headers)
if 'Authorization' not in headers:
abort(jsonify(make_result(3))) # login error
token = headers['Authorization']
token_data = db.token_find_by_token(token)
if token_data is None:
abort(jsonify(make_result(3))) # login error
# 用户level大于等于10表示有管理员效力
user = db.user_find(username=token_data['username'])
if user is None:
abort(jsonify(make_result(3))) # login error,不会有效
if user['info']['level'] < 10:
abort(jsonify(make_result(10))) # No permission
def is_number(s):
try:
float(s)
return True
except ValueError:
pass
# try:
# import unicodedata
# unicodedata.numeric(s)
# return True
# except (TypeError, ValueError):
# pass
return False
# def url_check(url: str):
# url = url.lower()
# reg = "^(https|http|ftp|rtsp|mms)\\://?([a-zA-Z0-9\\.\\-]+(\\:[a-zA-Z0-9\\.&%\\$\\-]+)*@)?((25[0-5]|2" \
# "[0-4][0-9]|[0-1]{1}[0-9]{2}|[1-9]{1}[0-9]{1}|[1-9])\\.(25[0-5]|2[0-4][0-9]|[0-1]{1}[0-9]{2}|[1-9]" \
# "{1}[0-9]{1}|[1-9]|0)\\.(25[0-5]|2[0-4][0-9]|[0-1]{1}[0-9]{2}|[1-9]{1}[0-9]{1}|[1-9]|0)\\.(25[0-5]|" \
# "2[0-4][0-9]|[0-1]{1}[0-9]{2}|[1-9]{1}[0-9]{1}|[0-9])|([a-zA-Z0-9\\-]+\\.)*[a-zA-Z0-9\\-]+\\.[a-zA-Z]" \
# "{2,4})(\\:[0-9]+)?(/[^/][a-zA-Z0-9\\.\\,\\?\\'\\\\/\\+&%\\$\\=~_\\-@]*)*$"
# print(re.search(url, reg))
if __name__ == '__main__':
print(parse_url_query('http://blog.com/sss/ssss/s?wd=dsfa&a=fdsa&a=1&b=1.1&a=s'))
print(format_file_size(20250000))
# print(url_check('http://www.bilibili.com/'))
| 1.601563 | 2 |
python/csv/csv_dict_writer.py | y2ghost/study | 0 | 4279 | import csv
def csv_dict_writer(path, headers, data):
with open(path, 'w', newline='') as csvfile:
writer = csv.DictWriter(csvfile, delimiter=',',
fieldnames=headers)
writer.writeheader()
for record in data:
writer.writerow(record)
if __name__ == '__main__':
data = '''book_title,author,publisher,pub_date,isbn
Python 101,<NAME>, <NAME>,2020,123456789
wxPython Recipes,<NAME>,Apress,2018,978-1-4842-3237-8
Python Interviews,<NAME>,Packt Publishing,2018,9781788399081'''
records = []
for line in data.splitlines():
records.append(line.strip().split(','))
headers = records.pop(0)
list_of_dicts = []
for row in records:
my_dict = dict(zip(headers, row))
list_of_dicts.append(my_dict)
csv_dict_writer('output_dict.csv', headers, list_of_dicts)
| 2.203125 | 2 |
torrents/migrations/0011_auto_20190223_2345.py | 2600box/harvest | 9 | 4287 | # Generated by Django 2.1.7 on 2019-02-23 23:45
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('torrents', '0010_auto_20190223_0326'),
]
operations = [
migrations.AlterModelOptions(
name='realm',
options={'ordering': ('name',)},
),
]
| 0.488281 | 0 |
eventstreams_sdk/adminrest_v1.py | IBM/eventstreams-python-sdk | 2 | 4295 | # coding: utf-8
# (C) Copyright IBM Corp. 2021.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# IBM OpenAPI SDK Code Generator Version: 3.25.0-2b3f843a-20210115-164628
"""
The administration REST API for IBM Event Streams on Cloud.
"""
from typing import Dict, List
import json
from ibm_cloud_sdk_core import BaseService, DetailedResponse
from ibm_cloud_sdk_core.authenticators.authenticator import Authenticator
from ibm_cloud_sdk_core.get_authenticator import get_authenticator_from_environment
from ibm_cloud_sdk_core.utils import convert_model
from .common import get_sdk_headers
##############################################################################
# Service
##############################################################################
class AdminrestV1(BaseService):
"""The adminrest V1 service."""
DEFAULT_SERVICE_URL = 'https://adminrest.cloud.ibm.com'
DEFAULT_SERVICE_NAME = 'adminrest'
@classmethod
def new_instance(cls,
service_name: str = DEFAULT_SERVICE_NAME,
) -> 'AdminrestV1':
"""
Return a new client for the adminrest service using the specified
parameters and external configuration.
"""
authenticator = get_authenticator_from_environment(service_name)
service = cls(
authenticator
)
service.configure_service(service_name)
return service
def __init__(self,
authenticator: Authenticator = None,
) -> None:
"""
Construct a new client for the adminrest service.
:param Authenticator authenticator: The authenticator specifies the authentication mechanism.
Get up to date information from https://github.com/IBM/python-sdk-core/blob/master/README.md
about initializing the authenticator of your choice.
"""
BaseService.__init__(self,
service_url=self.DEFAULT_SERVICE_URL,
authenticator=authenticator)
#########################
# default
#########################
def create_topic(self,
*,
name: str = None,
partitions: int = None,
partition_count: int = None,
configs: List['ConfigCreate'] = None,
**kwargs
) -> DetailedResponse:
"""
Create a new topic.
Create a new topic.
:param str name: (optional) The name of topic to be created.
:param int partitions: (optional) The number of partitions.
:param int partition_count: (optional) The number of partitions, this field
takes precedence over 'partitions'. Default value is 1 if not specified.
:param List[ConfigCreate] configs: (optional) The config properties to be
set for the new topic.
:param dict headers: A `dict` containing the request headers
:return: A `DetailedResponse` containing the result, headers and HTTP status code.
:rtype: DetailedResponse
"""
if configs is not None:
configs = [convert_model(x) for x in configs]
headers = {}
sdk_headers = get_sdk_headers(service_name=self.DEFAULT_SERVICE_NAME,
service_version='V1',
operation_id='create_topic')
headers.update(sdk_headers)
data = {
'name': name,
'partitions': partitions,
'partition_count': partition_count,
'configs': configs
}
data = {k: v for (k, v) in data.items() if v is not None}
data = json.dumps(data)
headers['content-type'] = 'application/json'
if 'headers' in kwargs:
headers.update(kwargs.get('headers'))
headers['Accept'] = 'application/json'
url = '/admin/topics'
request = self.prepare_request(method='POST',
url=url,
headers=headers,
data=data)
response = self.send(request)
return response
def list_topics(self,
*,
topic_filter: str = None,
per_page: int = None,
page: int = None,
**kwargs
) -> DetailedResponse:
"""
Get a list of topics.
Returns a list containing information about all of the Kafka topics that are
defined for an instance of the Event Streams service. If there are currently no
topics defined then an empty list is returned.
:param str topic_filter: (optional) A filter to be applied to the topic
names. A simple filter can be specified as a string with asterisk (`*`)
wildcards representing 0 or more characters, e.g. `topic-name*` will filter
all topic names that begin with the string `topic-name` followed by any
character sequence. A more complex filter pattern can be used by
surrounding a regular expression in forward slash (`/`) delimiters, e.g.
`/topic-name.* /`.
:param int per_page: (optional) The number of topic names to be returns.
:param int page: (optional) The page number to be returned. The number 1
represents the first page. The default value is 1.
:param dict headers: A `dict` containing the request headers
:return: A `DetailedResponse` containing the result, headers and HTTP status code.
:rtype: DetailedResponse with `List[TopicDetail]` result
"""
headers = {}
sdk_headers = get_sdk_headers(service_name=self.DEFAULT_SERVICE_NAME,
service_version='V1',
operation_id='list_topics')
headers.update(sdk_headers)
params = {
'topic_filter': topic_filter,
'per_page': per_page,
'page': page
}
if 'headers' in kwargs:
headers.update(kwargs.get('headers'))
headers['Accept'] = 'application/json'
url = '/admin/topics'
request = self.prepare_request(method='GET',
url=url,
headers=headers,
params=params)
response = self.send(request)
return response
def get_topic(self,
topic_name: str,
**kwargs
) -> DetailedResponse:
"""
Get detailed information on a topic.
Get detailed information on a topic.
:param str topic_name: The topic name for the topic to be listed.
:param dict headers: A `dict` containing the request headers
:return: A `DetailedResponse` containing the result, headers and HTTP status code.
:rtype: DetailedResponse with `dict` result representing a `TopicDetail` object
"""
if topic_name is None:
raise ValueError('topic_name must be provided')
headers = {}
sdk_headers = get_sdk_headers(service_name=self.DEFAULT_SERVICE_NAME,
service_version='V1',
operation_id='get_topic')
headers.update(sdk_headers)
if 'headers' in kwargs:
headers.update(kwargs.get('headers'))
headers['Accept'] = 'application/json'
path_param_keys = ['topic_name']
path_param_values = self.encode_path_vars(topic_name)
path_param_dict = dict(zip(path_param_keys, path_param_values))
url = '/admin/topics/{topic_name}'.format(**path_param_dict)
request = self.prepare_request(method='GET',
url=url,
headers=headers)
response = self.send(request)
return response
def delete_topic(self,
topic_name: str,
**kwargs
) -> DetailedResponse:
"""
Delete a topic.
Delete a topic.
:param str topic_name: The topic name for the topic to be listed.
:param dict headers: A `dict` containing the request headers
:return: A `DetailedResponse` containing the result, headers and HTTP status code.
:rtype: DetailedResponse
"""
if topic_name is None:
raise ValueError('topic_name must be provided')
headers = {}
sdk_headers = get_sdk_headers(service_name=self.DEFAULT_SERVICE_NAME,
service_version='V1',
operation_id='delete_topic')
headers.update(sdk_headers)
if 'headers' in kwargs:
headers.update(kwargs.get('headers'))
headers['Accept'] = 'application/json'
path_param_keys = ['topic_name']
path_param_values = self.encode_path_vars(topic_name)
path_param_dict = dict(zip(path_param_keys, path_param_values))
url = '/admin/topics/{topic_name}'.format(**path_param_dict)
request = self.prepare_request(method='DELETE',
url=url,
headers=headers)
response = self.send(request)
return response
def update_topic(self,
topic_name: str,
*,
new_total_partition_count: int = None,
configs: List['ConfigUpdate'] = None,
**kwargs
) -> DetailedResponse:
"""
Increase the number of partitions and/or update one or more topic configuration parameters.
Increase the number of partitions and/or update one or more topic configuration
parameters.
:param str topic_name: The topic name for the topic to be listed.
:param int new_total_partition_count: (optional) The new partition number
to be increased.
:param List[ConfigUpdate] configs: (optional) The config properties to be
updated for the topic. Valid config keys are 'cleanup.policy',
'retention.ms', 'retention.bytes', 'segment.bytes', 'segment.ms',
'segment.index.bytes'.
:param dict headers: A `dict` containing the request headers
:return: A `DetailedResponse` containing the result, headers and HTTP status code.
:rtype: DetailedResponse
"""
if topic_name is None:
raise ValueError('topic_name must be provided')
if configs is not None:
configs = [convert_model(x) for x in configs]
headers = {}
sdk_headers = get_sdk_headers(service_name=self.DEFAULT_SERVICE_NAME,
service_version='V1',
operation_id='update_topic')
headers.update(sdk_headers)
data = {
'new_total_partition_count': new_total_partition_count,
'configs': configs
}
data = {k: v for (k, v) in data.items() if v is not None}
data = json.dumps(data)
headers['content-type'] = 'application/json'
if 'headers' in kwargs:
headers.update(kwargs.get('headers'))
headers['Accept'] = 'application/json'
path_param_keys = ['topic_name']
path_param_values = self.encode_path_vars(topic_name)
path_param_dict = dict(zip(path_param_keys, path_param_values))
url = '/admin/topics/{topic_name}'.format(**path_param_dict)
request = self.prepare_request(method='PATCH',
url=url,
headers=headers,
data=data)
response = self.send(request)
return response
def get_mirroring_topic_selection(self,
**kwargs
) -> DetailedResponse:
"""
Get current topic selection for mirroring.
Get current topic selection for mirroring.
:param dict headers: A `dict` containing the request headers
:return: A `DetailedResponse` containing the result, headers and HTTP status code.
:rtype: DetailedResponse with `dict` result representing a `MirroringTopicSelection` object
"""
headers = {}
sdk_headers = get_sdk_headers(service_name=self.DEFAULT_SERVICE_NAME,
service_version='V1',
operation_id='get_mirroring_topic_selection')
headers.update(sdk_headers)
if 'headers' in kwargs:
headers.update(kwargs.get('headers'))
headers['Accept'] = 'application/json'
url = '/admin/mirroring/topic-selection'
request = self.prepare_request(method='GET',
url=url,
headers=headers)
response = self.send(request)
return response
def replace_mirroring_topic_selection(self,
*,
includes: List[str] = None,
**kwargs
) -> DetailedResponse:
"""
Replace topic selection for mirroring.
Replace topic selection for mirroring. This operation replaces the complete set of
mirroring topic selections.
:param List[str] includes: (optional)
:param dict headers: A `dict` containing the request headers
:return: A `DetailedResponse` containing the result, headers and HTTP status code.
:rtype: DetailedResponse with `dict` result representing a `MirroringTopicSelection` object
"""
headers = {}
sdk_headers = get_sdk_headers(service_name=self.DEFAULT_SERVICE_NAME,
service_version='V1',
operation_id='replace_mirroring_topic_selection')
headers.update(sdk_headers)
data = {
'includes': includes
}
data = {k: v for (k, v) in data.items() if v is not None}
data = json.dumps(data)
headers['content-type'] = 'application/json'
if 'headers' in kwargs:
headers.update(kwargs.get('headers'))
headers['Accept'] = 'application/json'
url = '/admin/mirroring/topic-selection'
request = self.prepare_request(method='POST',
url=url,
headers=headers,
data=data)
response = self.send(request)
return response
def get_mirroring_active_topics(self,
**kwargs
) -> DetailedResponse:
"""
Get topics that are being actively mirrored.
Get topics that are being actively mirrored.
:param dict headers: A `dict` containing the request headers
:return: A `DetailedResponse` containing the result, headers and HTTP status code.
:rtype: DetailedResponse with `dict` result representing a `MirroringActiveTopics` object
"""
headers = {}
sdk_headers = get_sdk_headers(service_name=self.DEFAULT_SERVICE_NAME,
service_version='V1',
operation_id='get_mirroring_active_topics')
headers.update(sdk_headers)
if 'headers' in kwargs:
headers.update(kwargs.get('headers'))
headers['Accept'] = 'application/json'
url = '/admin/mirroring/active-topics'
request = self.prepare_request(method='GET',
url=url,
headers=headers)
response = self.send(request)
return response
##############################################################################
# Models
##############################################################################
class ReplicaAssignmentBrokers():
"""
ReplicaAssignmentBrokers.
:attr List[int] replicas: (optional)
"""
def __init__(self,
*,
replicas: List[int] = None) -> None:
"""
Initialize a ReplicaAssignmentBrokers object.
:param List[int] replicas: (optional)
"""
self.replicas = replicas
@classmethod
def from_dict(cls, _dict: Dict) -> 'ReplicaAssignmentBrokers':
"""Initialize a ReplicaAssignmentBrokers object from a json dictionary."""
args = {}
if 'replicas' in _dict:
args['replicas'] = _dict.get('replicas')
return cls(**args)
@classmethod
def _from_dict(cls, _dict):
"""Initialize a ReplicaAssignmentBrokers object from a json dictionary."""
return cls.from_dict(_dict)
def to_dict(self) -> Dict:
"""Return a json dictionary representing this model."""
_dict = {}
if hasattr(self, 'replicas') and self.replicas is not None:
_dict['replicas'] = self.replicas
return _dict
def _to_dict(self):
"""Return a json dictionary representing this model."""
return self.to_dict()
def __str__(self) -> str:
"""Return a `str` version of this ReplicaAssignmentBrokers object."""
return json.dumps(self.to_dict(), indent=2)
def __eq__(self, other: 'ReplicaAssignmentBrokers') -> bool:
"""Return `true` when self and other are equal, false otherwise."""
if not isinstance(other, self.__class__):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other: 'ReplicaAssignmentBrokers') -> bool:
"""Return `true` when self and other are not equal, false otherwise."""
return not self == other
class ConfigCreate():
"""
ConfigCreate.
:attr str name: (optional) The name of the config property.
:attr str value: (optional) The value for a config property.
"""
def __init__(self,
*,
name: str = None,
value: str = None) -> None:
"""
Initialize a ConfigCreate object.
:param str name: (optional) The name of the config property.
:param str value: (optional) The value for a config property.
"""
self.name = name
self.value = value
@classmethod
def from_dict(cls, _dict: Dict) -> 'ConfigCreate':
"""Initialize a ConfigCreate object from a json dictionary."""
args = {}
if 'name' in _dict:
args['name'] = _dict.get('name')
if 'value' in _dict:
args['value'] = _dict.get('value')
return cls(**args)
@classmethod
def _from_dict(cls, _dict):
"""Initialize a ConfigCreate object from a json dictionary."""
return cls.from_dict(_dict)
def to_dict(self) -> Dict:
"""Return a json dictionary representing this model."""
_dict = {}
if hasattr(self, 'name') and self.name is not None:
_dict['name'] = self.name
if hasattr(self, 'value') and self.value is not None:
_dict['value'] = self.value
return _dict
def _to_dict(self):
"""Return a json dictionary representing this model."""
return self.to_dict()
def __str__(self) -> str:
"""Return a `str` version of this ConfigCreate object."""
return json.dumps(self.to_dict(), indent=2)
def __eq__(self, other: 'ConfigCreate') -> bool:
"""Return `true` when self and other are equal, false otherwise."""
if not isinstance(other, self.__class__):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other: 'ConfigCreate') -> bool:
"""Return `true` when self and other are not equal, false otherwise."""
return not self == other
class ConfigUpdate():
"""
ConfigUpdate.
:attr str name: (optional) The name of the config property.
:attr str value: (optional) The value for a config property.
:attr bool reset_to_default: (optional) When true, the value of the config
property is reset to its default value.
"""
def __init__(self,
*,
name: str = None,
value: str = None,
reset_to_default: bool = None) -> None:
"""
Initialize a ConfigUpdate object.
:param str name: (optional) The name of the config property.
:param str value: (optional) The value for a config property.
:param bool reset_to_default: (optional) When true, the value of the config
property is reset to its default value.
"""
self.name = name
self.value = value
self.reset_to_default = reset_to_default
@classmethod
def from_dict(cls, _dict: Dict) -> 'ConfigUpdate':
"""Initialize a ConfigUpdate object from a json dictionary."""
args = {}
if 'name' in _dict:
args['name'] = _dict.get('name')
if 'value' in _dict:
args['value'] = _dict.get('value')
if 'reset_to_default' in _dict:
args['reset_to_default'] = _dict.get('reset_to_default')
return cls(**args)
@classmethod
def _from_dict(cls, _dict):
"""Initialize a ConfigUpdate object from a json dictionary."""
return cls.from_dict(_dict)
def to_dict(self) -> Dict:
"""Return a json dictionary representing this model."""
_dict = {}
if hasattr(self, 'name') and self.name is not None:
_dict['name'] = self.name
if hasattr(self, 'value') and self.value is not None:
_dict['value'] = self.value
if hasattr(self, 'reset_to_default') and self.reset_to_default is not None:
_dict['reset_to_default'] = self.reset_to_default
return _dict
def _to_dict(self):
"""Return a json dictionary representing this model."""
return self.to_dict()
def __str__(self) -> str:
"""Return a `str` version of this ConfigUpdate object."""
return json.dumps(self.to_dict(), indent=2)
def __eq__(self, other: 'ConfigUpdate') -> bool:
"""Return `true` when self and other are equal, false otherwise."""
if not isinstance(other, self.__class__):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other: 'ConfigUpdate') -> bool:
"""Return `true` when self and other are not equal, false otherwise."""
return not self == other
class MirroringActiveTopics():
"""
Topics that are being actively mirrored.
:attr List[str] active_topics: (optional)
"""
def __init__(self,
*,
active_topics: List[str] = None) -> None:
"""
Initialize a MirroringActiveTopics object.
:param List[str] active_topics: (optional)
"""
self.active_topics = active_topics
@classmethod
def from_dict(cls, _dict: Dict) -> 'MirroringActiveTopics':
"""Initialize a MirroringActiveTopics object from a json dictionary."""
args = {}
if 'active_topics' in _dict:
args['active_topics'] = _dict.get('active_topics')
return cls(**args)
@classmethod
def _from_dict(cls, _dict):
"""Initialize a MirroringActiveTopics object from a json dictionary."""
return cls.from_dict(_dict)
def to_dict(self) -> Dict:
"""Return a json dictionary representing this model."""
_dict = {}
if hasattr(self, 'active_topics') and self.active_topics is not None:
_dict['active_topics'] = self.active_topics
return _dict
def _to_dict(self):
"""Return a json dictionary representing this model."""
return self.to_dict()
def __str__(self) -> str:
"""Return a `str` version of this MirroringActiveTopics object."""
return json.dumps(self.to_dict(), indent=2)
def __eq__(self, other: 'MirroringActiveTopics') -> bool:
"""Return `true` when self and other are equal, false otherwise."""
if not isinstance(other, self.__class__):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other: 'MirroringActiveTopics') -> bool:
"""Return `true` when self and other are not equal, false otherwise."""
return not self == other
class MirroringTopicSelection():
"""
Mirroring topic selection payload.
:attr List[str] includes: (optional)
"""
def __init__(self,
*,
includes: List[str] = None) -> None:
"""
Initialize a MirroringTopicSelection object.
:param List[str] includes: (optional)
"""
self.includes = includes
@classmethod
def from_dict(cls, _dict: Dict) -> 'MirroringTopicSelection':
"""Initialize a MirroringTopicSelection object from a json dictionary."""
args = {}
if 'includes' in _dict:
args['includes'] = _dict.get('includes')
return cls(**args)
@classmethod
def _from_dict(cls, _dict):
"""Initialize a MirroringTopicSelection object from a json dictionary."""
return cls.from_dict(_dict)
def to_dict(self) -> Dict:
"""Return a json dictionary representing this model."""
_dict = {}
if hasattr(self, 'includes') and self.includes is not None:
_dict['includes'] = self.includes
return _dict
def _to_dict(self):
"""Return a json dictionary representing this model."""
return self.to_dict()
def __str__(self) -> str:
"""Return a `str` version of this MirroringTopicSelection object."""
return json.dumps(self.to_dict(), indent=2)
def __eq__(self, other: 'MirroringTopicSelection') -> bool:
"""Return `true` when self and other are equal, false otherwise."""
if not isinstance(other, self.__class__):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other: 'MirroringTopicSelection') -> bool:
"""Return `true` when self and other are not equal, false otherwise."""
return not self == other
class ReplicaAssignment():
"""
ReplicaAssignment.
:attr int id: (optional) The ID of the partition.
:attr ReplicaAssignmentBrokers brokers: (optional)
"""
def __init__(self,
*,
id: int = None,
brokers: 'ReplicaAssignmentBrokers' = None) -> None:
"""
Initialize a ReplicaAssignment object.
:param int id: (optional) The ID of the partition.
:param ReplicaAssignmentBrokers brokers: (optional)
"""
self.id = id
self.brokers = brokers
@classmethod
def from_dict(cls, _dict: Dict) -> 'ReplicaAssignment':
"""Initialize a ReplicaAssignment object from a json dictionary."""
args = {}
if 'id' in _dict:
args['id'] = _dict.get('id')
if 'brokers' in _dict:
args['brokers'] = ReplicaAssignmentBrokers.from_dict(_dict.get('brokers'))
return cls(**args)
@classmethod
def _from_dict(cls, _dict):
"""Initialize a ReplicaAssignment object from a json dictionary."""
return cls.from_dict(_dict)
def to_dict(self) -> Dict:
"""Return a json dictionary representing this model."""
_dict = {}
if hasattr(self, 'id') and self.id is not None:
_dict['id'] = self.id
if hasattr(self, 'brokers') and self.brokers is not None:
_dict['brokers'] = self.brokers.to_dict()
return _dict
def _to_dict(self):
"""Return a json dictionary representing this model."""
return self.to_dict()
def __str__(self) -> str:
"""Return a `str` version of this ReplicaAssignment object."""
return json.dumps(self.to_dict(), indent=2)
def __eq__(self, other: 'ReplicaAssignment') -> bool:
"""Return `true` when self and other are equal, false otherwise."""
if not isinstance(other, self.__class__):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other: 'ReplicaAssignment') -> bool:
"""Return `true` when self and other are not equal, false otherwise."""
return not self == other
class TopicConfigs():
"""
TopicConfigs.
:attr str cleanup_policy: (optional) The value of config property
'cleanup.policy'.
:attr str min_insync_replicas: (optional) The value of config property
'min.insync.replicas'.
:attr str retention_bytes: (optional) The value of config property
'retention.bytes'.
:attr str retention_ms: (optional) The value of config property 'retention.ms'.
:attr str segment_bytes: (optional) The value of config property
'segment.bytes'.
:attr str segment_index_bytes: (optional) The value of config property
'segment.index.bytes'.
:attr str segment_ms: (optional) The value of config property 'segment.ms'.
"""
def __init__(self,
*,
cleanup_policy: str = None,
min_insync_replicas: str = None,
retention_bytes: str = None,
retention_ms: str = None,
segment_bytes: str = None,
segment_index_bytes: str = None,
segment_ms: str = None) -> None:
"""
Initialize a TopicConfigs object.
:param str cleanup_policy: (optional) The value of config property
'cleanup.policy'.
:param str min_insync_replicas: (optional) The value of config property
'min.insync.replicas'.
:param str retention_bytes: (optional) The value of config property
'retention.bytes'.
:param str retention_ms: (optional) The value of config property
'retention.ms'.
:param str segment_bytes: (optional) The value of config property
'segment.bytes'.
:param str segment_index_bytes: (optional) The value of config property
'segment.index.bytes'.
:param str segment_ms: (optional) The value of config property
'segment.ms'.
"""
self.cleanup_policy = cleanup_policy
self.min_insync_replicas = min_insync_replicas
self.retention_bytes = retention_bytes
self.retention_ms = retention_ms
self.segment_bytes = segment_bytes
self.segment_index_bytes = segment_index_bytes
self.segment_ms = segment_ms
@classmethod
def from_dict(cls, _dict: Dict) -> 'TopicConfigs':
"""Initialize a TopicConfigs object from a json dictionary."""
args = {}
if 'cleanup.policy' in _dict:
args['cleanup_policy'] = _dict.get('cleanup.policy')
if 'min.insync.replicas' in _dict:
args['min_insync_replicas'] = _dict.get('min.insync.replicas')
if 'retention.bytes' in _dict:
args['retention_bytes'] = _dict.get('retention.bytes')
if 'retention.ms' in _dict:
args['retention_ms'] = _dict.get('retention.ms')
if 'segment.bytes' in _dict:
args['segment_bytes'] = _dict.get('segment.bytes')
if 'segment.index.bytes' in _dict:
args['segment_index_bytes'] = _dict.get('segment.index.bytes')
if 'segment.ms' in _dict:
args['segment_ms'] = _dict.get('segment.ms')
return cls(**args)
@classmethod
def _from_dict(cls, _dict):
"""Initialize a TopicConfigs object from a json dictionary."""
return cls.from_dict(_dict)
def to_dict(self) -> Dict:
"""Return a json dictionary representing this model."""
_dict = {}
if hasattr(self, 'cleanup_policy') and self.cleanup_policy is not None:
_dict['cleanup.policy'] = self.cleanup_policy
if hasattr(self, 'min_insync_replicas') and self.min_insync_replicas is not None:
_dict['min.insync.replicas'] = self.min_insync_replicas
if hasattr(self, 'retention_bytes') and self.retention_bytes is not None:
_dict['retention.bytes'] = self.retention_bytes
if hasattr(self, 'retention_ms') and self.retention_ms is not None:
_dict['retention.ms'] = self.retention_ms
if hasattr(self, 'segment_bytes') and self.segment_bytes is not None:
_dict['segment.bytes'] = self.segment_bytes
if hasattr(self, 'segment_index_bytes') and self.segment_index_bytes is not None:
_dict['segment.index.bytes'] = self.segment_index_bytes
if hasattr(self, 'segment_ms') and self.segment_ms is not None:
_dict['segment.ms'] = self.segment_ms
return _dict
def _to_dict(self):
"""Return a json dictionary representing this model."""
return self.to_dict()
def __str__(self) -> str:
"""Return a `str` version of this TopicConfigs object."""
return json.dumps(self.to_dict(), indent=2)
def __eq__(self, other: 'TopicConfigs') -> bool:
"""Return `true` when self and other are equal, false otherwise."""
if not isinstance(other, self.__class__):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other: 'TopicConfigs') -> bool:
"""Return `true` when self and other are not equal, false otherwise."""
return not self == other
class TopicDetail():
"""
TopicDetail.
:attr str name: (optional) The name of the topic.
:attr int partitions: (optional) The number of partitions.
:attr int replication_factor: (optional) The number of replication factor.
:attr int retention_ms: (optional) The value of config property 'retention.ms'.
:attr str cleanup_policy: (optional) The value of config property
'cleanup.policy'.
:attr TopicConfigs configs: (optional)
:attr List[ReplicaAssignment] replica_assignments: (optional) The replia
assignment of the topic.
"""
def __init__(self,
*,
name: str = None,
partitions: int = None,
replication_factor: int = None,
retention_ms: int = None,
cleanup_policy: str = None,
configs: 'TopicConfigs' = None,
replica_assignments: List['ReplicaAssignment'] = None) -> None:
"""
Initialize a TopicDetail object.
:param str name: (optional) The name of the topic.
:param int partitions: (optional) The number of partitions.
:param int replication_factor: (optional) The number of replication factor.
:param int retention_ms: (optional) The value of config property
'retention.ms'.
:param str cleanup_policy: (optional) The value of config property
'cleanup.policy'.
:param TopicConfigs configs: (optional)
:param List[ReplicaAssignment] replica_assignments: (optional) The replia
assignment of the topic.
"""
self.name = name
self.partitions = partitions
self.replication_factor = replication_factor
self.retention_ms = retention_ms
self.cleanup_policy = cleanup_policy
self.configs = configs
self.replica_assignments = replica_assignments
@classmethod
def from_dict(cls, _dict: Dict) -> 'TopicDetail':
"""Initialize a TopicDetail object from a json dictionary."""
args = {}
if 'name' in _dict:
args['name'] = _dict.get('name')
if 'partitions' in _dict:
args['partitions'] = _dict.get('partitions')
if 'replicationFactor' in _dict:
args['replication_factor'] = _dict.get('replicationFactor')
if 'retentionMs' in _dict:
args['retention_ms'] = _dict.get('retentionMs')
if 'cleanupPolicy' in _dict:
args['cleanup_policy'] = _dict.get('cleanupPolicy')
if 'configs' in _dict:
args['configs'] = TopicConfigs.from_dict(_dict.get('configs'))
if 'replicaAssignments' in _dict:
args['replica_assignments'] = [ReplicaAssignment.from_dict(x) for x in _dict.get('replicaAssignments')]
return cls(**args)
@classmethod
def _from_dict(cls, _dict):
"""Initialize a TopicDetail object from a json dictionary."""
return cls.from_dict(_dict)
def to_dict(self) -> Dict:
"""Return a json dictionary representing this model."""
_dict = {}
if hasattr(self, 'name') and self.name is not None:
_dict['name'] = self.name
if hasattr(self, 'partitions') and self.partitions is not None:
_dict['partitions'] = self.partitions
if hasattr(self, 'replication_factor') and self.replication_factor is not None:
_dict['replicationFactor'] = self.replication_factor
if hasattr(self, 'retention_ms') and self.retention_ms is not None:
_dict['retentionMs'] = self.retention_ms
if hasattr(self, 'cleanup_policy') and self.cleanup_policy is not None:
_dict['cleanupPolicy'] = self.cleanup_policy
if hasattr(self, 'configs') and self.configs is not None:
_dict['configs'] = self.configs.to_dict()
if hasattr(self, 'replica_assignments') and self.replica_assignments is not None:
_dict['replicaAssignments'] = [x.to_dict() for x in self.replica_assignments]
return _dict
def _to_dict(self):
"""Return a json dictionary representing this model."""
return self.to_dict()
def __str__(self) -> str:
"""Return a `str` version of this TopicDetail object."""
return json.dumps(self.to_dict(), indent=2)
def __eq__(self, other: 'TopicDetail') -> bool:
"""Return `true` when self and other are equal, false otherwise."""
if not isinstance(other, self.__class__):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other: 'TopicDetail') -> bool:
"""Return `true` when self and other are not equal, false otherwise."""
return not self == other
| 0.902344 | 1 |
comcenterproject/project/helpers.py | tongpa/bantak_program | 0 | 4303 | # -*- coding: utf-8 -*-
"""WebHelpers used in project."""
#from webhelpers import date, feedgenerator, html, number, misc, text
from markupsafe import Markup
def bold(text):
return Markup('<strong>%s</strong>' % text) | 1.34375 | 1 |
tools/archive/create_loadable_configs.py | madelinemccombe/iron-skillet | 0 | 4311 | # Copyright (c) 2018, Palo Alto Networks
#
# Permission to use, copy, modify, and/or distribute this software for any
# purpose with or without fee is hereby granted, provided that the above
# copyright notice and this permission notice appear in all copies.
#
# THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
# OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
# Author: <NAME> <<EMAIL>>
'''
Palo Alto Networks create_loadable_configs.py
Provides rendering of configuration templates with user defined values
Output is a set of loadable full configurations and set commands for Panos and Panorama
Edit the config_variables.yaml values and then run the script
This software is provided without support, warranty, or guarantee.
Use at your own risk.
'''
import datetime
import os
import shutil
import sys
import time
import getpass
import oyaml
from jinja2 import Environment, FileSystemLoader
from passlib.hash import des_crypt
from passlib.hash import md5_crypt
from passlib.hash import sha256_crypt
from passlib.hash import sha512_crypt
defined_filters = ['md5_hash', 'des_hash', 'sha512_hash']
def myconfig_newdir(myconfigdir_name, foldertime):
'''
create a new main loadable_configs folder if required then new subdirectories for configs
:param myconfigdir_name: prefix folder name from the my_variables.py file
:param foldertime: datetime when script run; to be used as suffix of folder name
:return: the myconfigdir full path name
'''
# get the full path to the config directory we want (panos / panorama)
myconfigpath = os.path.abspath(os.path.join('..', 'loadable_configs'))
if os.path.isdir(myconfigpath) is False:
os.mkdir(myconfigpath, mode=0o755)
print('created new loadable config directory')
# check that configs folder exists and if not create a new one
# then create snippets and full sub-directories
myconfigdir = '{0}/{1}-{2}'.format(myconfigpath, myconfigdir_name, foldertime)
if os.path.isdir(myconfigdir) is False:
os.mkdir(myconfigdir, mode=0o755)
print('\ncreated new archive folder {0}-{1}'.format(myconfigdir_name, foldertime))
if os.path.isdir('{0}/{1}'.format(myconfigdir, config_type)) is False:
os.mkdir('{0}/{1}'.format(myconfigdir, config_type))
print('created new subdirectories for {0}'.format(config_type))
return myconfigdir
def create_context(config_var_file):
# read the metafile to get variables and values
try:
with open(config_var_file, 'r') as var_metadata:
variables = oyaml.safe_load(var_metadata.read())
except IOError as ioe:
print(f'Could not open metadata file {config_var_file}')
print(ioe)
sys.exit()
# grab the metadata values and convert to key-based dictionary
jinja_context = dict()
for snippet_var in variables['variables']:
jinja_context[snippet_var['name']] = snippet_var['value']
return jinja_context
def template_render(filename, template_path, render_type, context):
'''
render the jinja template using the context value from config_variables.yaml
:param filename: name of the template file
:param template_path: path for the template file
:param render_type: type if full or set commands; aligns with folder name
:param context: dict of variables to render
:return: return the rendered xml file and set conf file
'''
print('..creating template for {0}'.format(filename))
env = Environment(loader=FileSystemLoader('{0}/{1}'.format(template_path, render_type)))
# load our custom jinja filters here, see the function defs below for reference
env.filters['md5_hash'] = md5_hash
env.filters['des_hash'] = des_hash
env.filters['sha512_hash'] = sha512_hash
template = env.get_template(filename)
rendered_template = template.render(context)
return rendered_template
def template_save(snippet_name, myconfigdir, config_type, element):
'''
after rendering the template save to the myconfig directory
each run saves with a unique prefix name + datetime
:param snippet_name: name of the output file
:param myconfigdir: path to the my_config directory
:param config_type: based on initial run list; eg. panos or panorama
:param element: xml element rendered based on input variables; used as folder name
:param render_type: type eg. if full or snippets; aligns with folder name
:return: no value returned (future could be success code)
'''
print('..saving template for {0}'.format(snippet_name))
filename = snippet_name
with open('{0}/{1}/{2}'.format(myconfigdir, config_type, filename), 'w') as configfile:
configfile.write(element)
# copy the variables file used for the render into the my_template folder
var_file = 'loadable_config_vars/config_variables.yaml'
if os.path.isfile('{0}/{1}'.format(myconfigdir, var_file)) is False:
vfilesrc = var_file
vfiledst = '{0}/{1}'.format(myconfigdir, var_file)
shutil.copy(vfilesrc, vfiledst)
return
# define functions for custom jinja filters
def md5_hash(txt):
'''
Returns the MD5 Hashed secret for use as a password hash in the PanOS configuration
:param txt: text to be hashed
:return: password hash of the string with salt and configuration information. Suitable to place in the phash field
in the configurations
'''
return md5_crypt.hash(txt)
def des_hash(txt):
'''
Returns the DES Hashed secret for use as a password hash in the PanOS configuration
:param txt: text to be hashed
:return: password hash of the string with salt and configuration information. Suitable to place in the phash field
in the configurations
'''
return des_crypt.hash(txt)
def sha256_hash(txt):
'''
Returns the SHA256 Hashed secret for use as a password hash in the PanOS configuration
:param txt: text to be hashed
:return: password hash of the string with salt and configuration information. Suitable to place in the
phash field in the configurations
'''
return sha256_crypt.hash(txt)
def sha512_hash(txt):
'''
Returns the SHA512 Hashed secret for use as a password hash in the PanOS configuration
:param txt: text to be hashed
:return: password hash of the string with salt and configuration information. Suitable to place in the
phash field in the configurations
'''
return sha512_crypt.hash(txt)
def replace_variables(config_type, render_type, input_var):
'''
get the input variables and render the output configs with jinja2
inputs are read from the template directory and output to my_config
:param config_type: panos or panorama to read/write to the respective directories
:param archivetime: datetimestamp used for the output my_config folder naming
'''
config_variables = 'config_variables.yaml'
# create dict of values for the jinja template render
context = create_context(config_variables)
# update context dict with variables from user input
for snippet_var in input_var:
context[snippet_var] = input_var[snippet_var]
# get the full path to the output directory we want (panos / panorama)
template_path = os.path.abspath(os.path.join('..',
'templates', config_type))
# append to the sys path for module lookup
sys.path.append(template_path)
# output subdir located in loadable_configs dir
myconfig_path = myconfig_newdir(input_var['output_dir'], input_var['archive_time'])
# render full and set conf files
print('\nworking with {0} config template'.format(render_type))
if render_type == 'full':
filename = 'iron_skillet_{0}_full.xml'.format(config_type)
if render_type == 'set_commands':
filename = 'iron_skillet_{0}_full.conf'.format(config_type)
element = template_render(filename, template_path, render_type, context)
template_save(filename, myconfig_path, config_type, element)
print('\nconfigs have been created and can be found in {0}'.format(myconfig_path))
print('along with the metadata values used to render the configs\n')
return
if __name__ == '__main__':
# Use the timestamp to create a unique folder name
print('=' * 80)
print(' ')
print('Welcome to Iron-Skillet'.center(80))
print(' ')
print('=' * 80)
input_var = {}
# archive_time used as part of the my_config directory name
input_var['archive_time'] = datetime.datetime.fromtimestamp(time.time()).strftime('%Y%m%d_%H%M%S')
print('\ndatetime used for folder creation: {0}\n'.format(input_var['archive_time']))
# this prompts for the prefix name of the output directory
input_var['output_dir'] = input('Enter the name of the output directory: ')
# this prompts for the superuser username to be added into the configuration; no default admin/admin used
input_var['ADMINISTRATOR_USERNAME'] = input('Enter the superuser administrator account username: ')
print('\na phash will be created for superuser {0} and added to the config file\n'.format(
input_var['ADMINISTRATOR_USERNAME']))
passwordmatch = False
# prompt for the superuser password to create a phash and store in the my_config files; no default admin/admin
while passwordmatch is False:
password1 = getpass.getpass("Enter the superuser administrator account password: ")
password2 = getpass.getpass("Enter password again to verify: ")
if password1 == password2:
input_var['ADMINISTRATOR_PASSWORD'] = <PASSWORD>
passwordmatch = <PASSWORD>
else:
print('\nPasswords do not match. Please try again.\n')
# loop through all config types that have their respective template folders
for config_type in ['panos', 'panorama']:
for render_type in ['full', 'set_commands']:
replace_variables(config_type, render_type, input_var) | 1.367188 | 1 |
examples/references/segmentation/pascal_voc2012/code/dataflow/dataloaders.py | kagrze/ignite | 0 | 4319 | from typing import Callable, Optional, Tuple, Union
import numpy as np
from torch.utils.data import DataLoader, Sampler
from torch.utils.data.dataset import Subset, ConcatDataset
import torch.utils.data.distributed as data_dist
from dataflow.datasets import get_train_dataset, get_val_dataset, TransformedDataset, get_train_noval_sbdataset
def get_train_val_loaders(root_path: str,
train_transforms: Callable,
val_transforms: Callable,
batch_size: int = 16,
num_workers: int = 8,
val_batch_size: Optional[int] = None,
pin_memory: bool = True,
random_seed: Optional[int] = None,
train_sampler: Optional[Union[Sampler, str]] = None,
val_sampler: Optional[Union[Sampler, str]] = None,
with_sbd: Optional[str] = None,
limit_train_num_samples: Optional[int] = None,
limit_val_num_samples: Optional[int] = None) -> Tuple[DataLoader, DataLoader, DataLoader]:
train_ds = get_train_dataset(root_path)
val_ds = get_val_dataset(root_path)
if with_sbd is not None:
sbd_train_ds = get_train_noval_sbdataset(with_sbd)
train_ds = ConcatDataset([train_ds, sbd_train_ds])
if random_seed is not None:
np.random.seed(random_seed)
if limit_train_num_samples is not None:
train_indices = np.random.permutation(len(train_ds))[:limit_train_num_samples]
train_ds = Subset(train_ds, train_indices)
if limit_val_num_samples is not None:
val_indices = np.random.permutation(len(val_ds))[:limit_val_num_samples]
val_ds = Subset(val_ds, val_indices)
# random samples for evaluation on training dataset
if len(val_ds) < len(train_ds):
train_eval_indices = np.random.permutation(len(train_ds))[:len(val_ds)]
train_eval_ds = Subset(train_ds, train_eval_indices)
else:
train_eval_ds = train_ds
train_ds = TransformedDataset(train_ds, transform_fn=train_transforms)
val_ds = TransformedDataset(val_ds, transform_fn=val_transforms)
train_eval_ds = TransformedDataset(train_eval_ds, transform_fn=val_transforms)
if isinstance(train_sampler, str):
assert train_sampler == 'distributed'
train_sampler = data_dist.DistributedSampler(train_ds)
if isinstance(val_sampler, str):
assert val_sampler == 'distributed'
val_sampler = data_dist.DistributedSampler(val_ds, shuffle=False)
train_loader = DataLoader(train_ds, shuffle=train_sampler is None,
batch_size=batch_size, num_workers=num_workers,
sampler=train_sampler,
pin_memory=pin_memory, drop_last=True)
val_batch_size = batch_size * 4 if val_batch_size is None else val_batch_size
val_loader = DataLoader(val_ds, shuffle=False, sampler=val_sampler,
batch_size=val_batch_size, num_workers=num_workers,
pin_memory=pin_memory, drop_last=False)
train_eval_loader = DataLoader(train_eval_ds, shuffle=False, sampler=val_sampler,
batch_size=val_batch_size, num_workers=num_workers,
pin_memory=pin_memory, drop_last=False)
return train_loader, val_loader, train_eval_loader
def get_inference_dataloader(root_path: str,
mode: str,
transforms: Callable,
batch_size: int = 16,
num_workers: int = 8,
pin_memory: bool = True,
limit_num_samples: Optional[int] = None) -> DataLoader:
assert mode in ('train', 'test'), "Mode should be 'train' or 'test'"
get_dataset_fn = get_train_dataset if mode == "train" else get_val_dataset
dataset = get_dataset_fn(root_path, return_meta=True)
if limit_num_samples is not None:
indices = np.random.permutation(len(dataset))[:limit_num_samples]
dataset = Subset(dataset, indices)
dataset = TransformedDataset(dataset, transform_fn=transforms)
loader = DataLoader(dataset, shuffle=False,
batch_size=batch_size, num_workers=num_workers,
pin_memory=pin_memory, drop_last=False)
return loader
| 1.757813 | 2 |
src/dsrlib/ui/utils.py | fraca7/dsremap | 8 | 4335 | #!/usr/bin/env python3
import os
import contextlib
from PyQt5 import QtCore, QtWidgets
from dsrlib.settings import Settings
class LayoutBuilder:
def __init__(self, target):
self.target = target
self._stack = []
@contextlib.contextmanager
def _layout(self, cls, *args, **kwargs):
layout = cls()
self._stack.append(layout)
try:
yield layout
finally:
self._pop(*args, **kwargs)
def _pop(self, *args, **kwargs):
layout = self._stack.pop()
if self._stack:
parent = self._stack[-1]
if isinstance(layout, QtWidgets.QSplitter):
parent.addWidget(layout)
else:
if isinstance(parent, QtWidgets.QSplitter):
container = QtWidgets.QWidget(parent)
container.setLayout(layout)
parent.addWidget(container)
else:
parent.addLayout(layout, *args, **kwargs)
elif isinstance(self.target, QtWidgets.QMainWindow):
if isinstance(layout, QtWidgets.QSplitter):
self.target.setCentralWidget(layout)
else:
container = QtWidgets.QWidget(self.target)
container.setLayout(layout)
self.target.setCentralWidget(container)
else:
if isinstance(layout, QtWidgets.QSplitter):
layout2 = QtWidgets.QHBoxLayout()
layout2.setContentsMargins(0, 0, 0, 0)
layout2.addWidget(layout)
self.target.setLayout(layout2)
else:
self.target.setLayout(layout)
@contextlib.contextmanager
def hbox(self, *args, **kwargs): # pragma: no cover
with self._layout(QtWidgets.QHBoxLayout, *args, **kwargs) as layout:
layout.setContentsMargins(1, 1, 1, 1)
layout.setSpacing(1)
yield layout
@contextlib.contextmanager
def vbox(self, *args, **kwargs): # pragma: no cover
with self._layout(QtWidgets.QVBoxLayout, *args, **kwargs) as layout:
layout.setContentsMargins(1, 1, 1, 1)
layout.setSpacing(1)
yield layout
def stack(self, *args, **kwargs): # pragma: no cover
return self._layout(QtWidgets.QStackedLayout, *args, **kwargs)
def form(self, *args, **kwargs):
class _FormLayout(QtWidgets.QFormLayout):
def addLayout(self, layout):
self.addRow(layout)
def addRow(self, label, widget=None): # pylint: disable=C0111
if isinstance(label, str):
label = QtWidgets.QLabel(label)
label.setSizePolicy(QtWidgets.QSizePolicy.Preferred, QtWidgets.QSizePolicy.Expanding)
label.setAlignment(QtCore.Qt.AlignVCenter)
if widget is None:
super().addRow(label)
else:
super().addRow(label, widget)
return self._layout(_FormLayout, *args, **kwargs)
def split(self, *args, **kwargs): # pragma: no cover
return self._layout(QtWidgets.QSplitter, *args, **kwargs)
def getSaveFilename(parent, domain, extension):
with Settings().grouped('Paths') as settings:
path = QtCore.QStandardPaths.writableLocation(QtCore.QStandardPaths.DocumentsLocation)
sname = 'save_%s' % domain
if settings.contains(sname):
path = settings.value(sname)
while True:
name, dummy = QtWidgets.QFileDialog.getSaveFileName(parent, _('Save'), path, '*.%s' % extension, options=QtWidgets.QFileDialog.DontConfirmOverwrite)
if not name:
return None
if not name.endswith('.%s' % extension):
name = '%s.%s' % (name, extension)
if os.path.exists(name):
resp = QtWidgets.QMessageBox.question(parent,
_('Overwrite file?'),
_('This file already exists. Overwrite?'),
QtWidgets.QMessageBox.Yes|QtWidgets.QMessageBox.No|QtWidgets.QMessageBox.Cancel)
if resp == QtWidgets.QMessageBox.Yes:
settings.setValue(sname, os.path.dirname(name))
return name
if resp == QtWidgets.QMessageBox.No:
continue
return None
settings.setValue(sname, os.path.dirname(name))
return name
def getOpenFilename(parent, domain, extension):
with Settings().grouped('Paths') as settings:
path = QtCore.QStandardPaths.writableLocation(QtCore.QStandardPaths.DocumentsLocation)
sname = 'open_%s' % domain
if settings.contains(sname):
path = settings.value(sname)
name, dummy = QtWidgets.QFileDialog.getOpenFileName(parent, _('Open file'), path, '*.%s' % extension if extension else '')
if name:
settings.setValue(sname, os.path.dirname(name))
return name
return None
class EnumComboBox(QtWidgets.QComboBox):
valueChanged = QtCore.pyqtSignal(object)
def __init__(self, *args, enum, value=None, **kwargs):
super().__init__(*args, **kwargs)
self._enum = enum
for item in enum:
self.addItem(enum.label(item), item)
if value is not None:
self.setValue(value)
self.currentIndexChanged.connect(self._emit)
def setValue(self, value):
for index, item in enumerate(self._enum):
if value == item:
self.setCurrentIndex(index)
break
else:
raise ValueError('Value "%s" not found in enum' % str(value))
def _emit(self, _):
self.valueChanged.emit(self.currentData())
| 1.851563 | 2 |
satt/trace/logger/panic.py | jnippula/satt | 54 | 4343 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
'''
// Copyright (c) 2015 Intel Corporation
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
'''
""" PanicLogger RAM-tracing
"""
import sys
import time
from logger import Logger
class PanicLogger(Logger):
""" Panic logger
"""
def __init__(self, control):
# Base class init call
Logger.__init__(self, control)
# Add default kernel module parameter for RAM-tracing
self._kernel_module_parameters += " trace_method=1 sideband_log_method=1"
# Add more option to command line input
self._parser.add_argument('-p', '--panic', action='store', help='Panic tracing mode: 1=Normal, 2=Hooked(default)',
required=False, default=2)
self._parser.add_argument('-s', '--sideband', action='store', help='Panic tracing mode: 0=Off, 1=On(default)',
required=False, default=1)
self._parser.add_argument('-g', '--gbuffer', action='store', help='Dump trace data to gbuffer: 0=Off, 1=On(default)',
required=False, default=1)
self._parser.add_argument('-u', '--userspace', action='store', help='Exclude user space: 0=Off, 1=On(default)',
required=False, default=1)
self._parser.add_argument('-k', '--kernel', action='store', help='Exclude kernel: 0=Off(default), 1=On',
required=False, default=0)
self._parser.add_argument('-d', '--dump', action='store',
help='Dump kernel and kernel modules for processing: 0=Off, 1=On(default)',
required=False, default=0)
self.args = self._parser.parse_args()
self._kernel_module_parameters += " panic_tracer=" + str(self.args.panic)
self._kernel_module_parameters += " panic_sideband=" + str(self.args.sideband)
self._kernel_module_parameters += " panic_gbuffer=" + str(self.args.gbuffer)
self._kernel_module_parameters += " exclude_userspace=" + str(self.args.userspace)
self._kernel_module_parameters += " exclude_kernel=" + str(self.args.kernel)
def initialize(self):
self._debug_print("PanicLogger::initialize")
# Initialize Logger base class
Logger.initialize(self)
# Call start_tracing earlier to stop execution earlier
self.start_tracing()
def start_tracing(self):
self._debug_print("start_tracing")
trace_name, trace_path = self.get_trace_name("Enter <<trace name>> to start panic tracing? :")
if trace_name:
self.set_trace_path(trace_path, trace_name)
self.get_build_info()
# TODO Problem, there is no Sideband.bin info yet
# Quick Fix
# Start tracing, wait 100ms, Stop tracing, fetch sideband info
Logger.start_tracing(self)
time.sleep(0.2)
Logger.stop_tracing(self)
time.sleep(0.2)
Logger.get_sideband_data(self)
self.dump_kernel()
self.dump_linux_gate()
self.dump_kernel_modules()
Logger.start_tracing(self)
print ""
print "Panic tracing activated"
print "If panic happens, wait 10s and reboot device."
print ""
print "When device boot up run following command:"
print "sat-panic-fetch " + self.trace_name
sys.exit(0)
else:
print "Panic Tracer did not get started"
def stop_tracing(self):
return
def get_data(self):
return
def get_trace_data(self):
return
| 1.445313 | 1 |
fuzzers/011-cle-ffconfig/generate.py | tmichalak/prjuray | 39 | 4359 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# Copyright (C) 2020 The Project U-Ray Authors.
#
# Use of this source code is governed by a ISC-style
# license that can be found in the LICENSE file or at
# https://opensource.org/licenses/ISC
#
# SPDX-License-Identifier: ISC
'''
FDCE Primitive: D Flip-Flop with Clock Enable and Asynchronous Clear
FDPE Primitive: D Flip-Flop with Clock Enable and Asynchronous Preset
FDRE Primitive: D Flip-Flop with Clock Enable and Synchronous Reset
FDSE Primitive: D Flip-Flop with Clock Enable and Synchronous Set
LDCE Primitive: Transparent Data Latch with Asynchronous Clear and Gate Enable
LDPE Primitive: Transparent Data Latch with Asynchronous Preset and Gate Enable
'''
from prims import isff, isl
from utils.segmaker import Segmaker
segmk = Segmaker("design.bits", bits_per_word=16)
def loadtop():
'''
i,prim,loc,bel
0,FDPE,SLICE_X12Y100,C5FF
1,FDPE,SLICE_X15Y100,A5FF
2,FDPE_1,SLICE_X16Y100,B5FF
3,LDCE_1,SLICE_X17Y100,BFF
'''
f = open('top.txt', 'r')
f.readline()
ret = {}
for l in f:
i, prim, loc, bel, init = l.split(",")
i = int(i)
init = int(init)
ret[loc] = (i, prim, loc, bel, init)
return ret
top = loadtop()
def vs2i(s):
return {"1'b0": 0, "1'b1": 1}[s]
print("Loading tags from design.txt")
with open("design.txt", "r") as f:
for line in f:
'''
puts $fp "$type $tile $grid_x $grid_y $ff $bel_type $used $usedstr"
CLEM CLEM_X10Y137 30 13 SLICE_X13Y137/AFF REG_INIT 1 FDRE
CLEM CLEM_X10Y137 30 13 SLICE_X12Y137/D2FF FF_INIT 0
'''
line = line.split()
tile_type = line[0]
tile_name = line[1]
grid_x = line[2]
grid_y = line[3]
# Other code uses BEL name
# SLICE_X12Y137/D2FF
site_ff_name = line[4]
site, ff_name = site_ff_name.split('/')
ff_type = line[5]
used = int(line[6])
cel_prim = None
cel_name = None
if used:
cel_name = line[7]
cel_prim = line[8]
cinv = int(line[9])
init = vs2i(line[10])
# A B C D E F G H
which = ff_name[0]
# LUT6 vs LUT5 FF
is2 = '2' in ff_name
if used:
segmk.add_site_tag(site, "%s.ZINI" % ff_name, 1 ^ init)
'''
On name:
The primitives you listed have a control input to set the FF value to zero (clear/reset),
the other three primitives have a control input that sets the FF value to one.
Z => inversion
'''
segmk.add_site_tag(site, "%s.ZRST" % ff_name,
cel_prim in ('FDRE', 'FDCE', 'LDCE'))
segmk.compile()
segmk.write()
| 1.570313 | 2 |
BizPy/openpyxl/20200513/horizontal_chart.py | t2y/python-study | 18 | 4367 | import pandas as pd
from openpyxl import Workbook
from openpyxl.chart import BarChart, Reference
wb = Workbook()
ws = wb.active
df = pd.read_csv('population.csv')
ws.append(df.columns.tolist())
for row in df.values:
ws.append(list(row))
row_length = 1 + len(df.values)
values = Reference(ws, min_col=2, max_col=2, min_row=1, max_row=row_length)
categories = Reference(ws, min_col=1, min_row=2, max_row=row_length)
chart = BarChart()
chart.type = 'bar'
chart.style = 11
chart.shape = 4
chart.title = '都道府県別の人口'
chart.x_axis.title = '都道府県'
chart.y_axis.title = '人口'
chart.add_data(values, titles_from_data=True)
chart.set_categories(categories)
ws.add_chart(chart, 'A9')
wb.save('population_horizontal.xlsx')
| 2.34375 | 2 |
fast_lemon_api_test.py | a6502/fast_lemon_api | 0 | 4375 | #!/usr/bin/env pytest-3
from fastapi.testclient import TestClient
from fast_lemon_api import app
client = TestClient(app)
def test_get_root():
response = client.get("/")
assert response.status_code == 200
assert response.text == "Welcome to the fast-lemon-api!\n"
neworder = {
"isin": "blablablabla",
"limit_price": 0.2,
"side": "buy",
"quantity": 1,
"valid_until": 1996943663,
"status": "open"
}
order_id = None
def test_post_orders1():
response = client.post('/orders/',
json={
"isin": "blablablabla",
"limit_price": 0.2,
"side": "buy",
"quantity": 1,
"valid_until": 1996943663,
})
assert response.status_code == 201
j = response.json()
#print(repr(j))
order_id = j.pop('uuid')
assert j == neworder
#assert 0
def test_post_orders2():
response = client.post('/orders/',
json={
"isin": "blablabla",
"limit_price": 0.2,
"side": "buy",
"quantity": 1,
"valid_until": 1996950863
})
assert response.status_code == 422
assert response.json() == {
'detail': [{
'loc': ['body', 'isin'],
'msg': 'ensure this value has at least 12 characters',
'type': 'value_error.any_str.min_length',
'ctx': {
'limit_value': 12
}
}]
}
def test_post_orders3():
response = client.post('/orders/',
json={
"isin": "blablablablabla",
"limit_price": 0.2,
"side": "buy",
"quantity": 1,
"valid_until": 1996950863
})
assert response.status_code == 422
assert response.json() == {
'detail': [{
'ctx': {
'limit_value': 12
},
'loc': ['body', 'isin'],
'msg': 'ensure this value has at most 12 characters',
'type': 'value_error.any_str.max_length'
}]
}
def test_post_orders4():
response = client.post('/orders/',
json={
"isin": "blablablabla",
"limit_price": -1,
"side": "buy",
"quantity": 1,
"valid_until": 1996950863
})
assert response.status_code == 422
assert response.json() == {
'detail': [{
'ctx': {
'limit_value': 0
},
'loc': ['body', 'limit_price'],
'msg': 'ensure this value is greater than 0',
'type': 'value_error.number.not_gt'
}]
}
def test_post_orders5():
response = client.post('/orders/',
json={
"isin": "blablablabla",
"limit_price": 0.2,
"side": "BUY!",
"quantity": 1,
"valid_until": 1996950863
})
assert response.status_code == 422
assert response.json() == {
'detail': [{
'ctx': {
'enum_values': ['buy', 'sell']
},
'loc': ['body', 'side'],
'msg':
"value is not a valid enumeration member; permitted: 'buy', 'sell'",
'type': 'type_error.enum'
}]
}
def test_post_orders6():
response = client.post('/orders/',
json={
"isin": "blablablabla",
"limit_price": 0.33333,
"side": "SELL",
"quantity": 0,
"valid_until": 1996950863
})
assert response.status_code == 422
assert response.json() == {
'detail': [{
'ctx': {
'limit_value': 0
},
'loc': ['body', 'quantity'],
'msg': 'ensure this value is greater than 0',
'type': 'value_error.number.not_gt'
}]
}
def test_post_orders8():
response = client.post('/orders/',
json={
"isin": "blablablabla",
"limit_price": 0.2,
"side": "SELL",
"quantity": 1.1,
"valid_until": 1996950863
})
assert response.status_code == 422
assert response.json() == {
'detail': [{
'loc': ['body', 'quantity'],
'msg': 'value is not a valid integer',
'type': 'type_error.integer'
}]
}
def test_post_orders7():
response = client.post('/orders/',
json={
"isin": "blablablabla",
"limit_price": 0.2,
"side": "SELL",
"quantity": 2,
"valid_until": 1996
})
assert response.status_code == 422
assert response.json() == {
'detail': [{
'loc': ['body', 'valid_until'],
'msg': 'valid_until cannot be in the past',
'type': 'value_error'
}]
}
| 1.765625 | 2 |
back2back/httpmulticlient.py | excentis/ByteBlower_python_examples | 2 | 4391 | """
HTTP MultiServer/MultiClient for the ByteBlower Python API.
All examples are guaranteed to work with Python 2.7 and above
Copyright 2018, Ex<NAME>.
"""
# Needed for python2 / python3 print function compatibility
from __future__ import print_function
# import the ByteBlower module
import byteblowerll.byteblower as byteblower
import time
configuration = {
# Address (IP or FQDN) of the ByteBlower server to use
'server_address': 'byteblower-tp-1300.lab.byteblower.excentis.com',
# Configuration for the first ByteBlower port.
# Will be used as HTTP server.
'port_1_config': {
'interface': 'trunk-1-13',
'mac': '00:bb:01:00:00:01',
# IP configuration for the ByteBlower Port.
# Options are 'DHCPv4', 'DHCPv6', 'SLAAC', 'static'
# if DHCPv4, use "dhcpv4"
'ip': 'dhcpv4',
# if DHCPv6, use "dhcpv6"
# 'ip': 'dhcpv6',
# if SLAAC, use "slaac"
# 'ip': 'slaac',
# if staticv4, use ["ipaddress", netmask, gateway]
# 'ip': ['192.168.0.2', "255.255.255.0", "192.168.0.1"],
# if staticv6, use ["ipaddress", prefixlength]
# 'ip': ['fd00:c2b6:b24b:be67:2827:688d:e6a1:6a3b', '64'],
# TCP port number to be used by the HTTP connection.
# On the HTTP server, this will be the port on which the server
# listens.
'tcp_port': 4096
},
# Configuration for the second ByteBlower port.
# Will be used as HTTP client.
'port_2_config': {
'interface': 'trunk-1-25',
'mac': '00:bb:01:00:00:02',
# IP configuration for the ByteBlower Port.
# Options are 'DHCPv4', 'DHCPv6', 'SLAAC', 'static'
# if DHCPv4, use "dhcpv4"
'ip': 'dhcpv4',
# if DHCPv6, use "dhcpv6"
# ip': 'dhcpv6',
# if SLAAC, use "slaac"
# 'ip': 'slaac',
# if staticv4, use ["ipaddress", netmask, gateway]
# 'ip': ['192.168.0.2', "255.255.255.0", "192.168.0.1"],
# if staticv6, use ["ipaddress", prefixlength]
# 'ip': ['fd00:c2b6:b24b:be67:2827:688d:e6a1:6a3b', '64'],
# TCP port range the HTTP Clients will use to connect with
# the HTTP server
'tcp_port_min': 32000,
'tcp_port_max': 50000
},
# HTTP Method
# HTTP Method can be GET or PUT
# - GET: Standard HTTP download, we retrieve data from the web server
# - PUT: Standard HTTP upload, the wireless endpoint will push data to the
# webserver
'http_method': 'GET',
# 'http_method': 'PUT',
# total duration, in nanoseconds.
# This is the duration of the flow. When this duration expires,
# all sessions will be stopped.
'duration': 10000000000,
# session duration, in nanoseconds
# Duration of the individual sessions
# 'session_duration': 1500000000,
'session_duration': None,
# session size, in bytes
# The number of bytes transmitted by a session
'session_size': 1 * 1000 * 1000,
# 'session_size': None,
# max concurrent sessions
# Maximum number of sessions that will be running simultaneously
'max_concurrent_sessions': 100,
# maximum number of sessions
# No more than this number of sessions will be created
# 0 means no limit
'max_total_sessions': 0,
# TOS value to use on the HTTP client (and server)
'tos': 0
}
class Example:
def __init__(self, **kwargs):
self.server_address = kwargs['server_address']
self.port_1_config = kwargs['port_1_config']
self.port_2_config = kwargs['port_2_config']
# Helper function, we can use this to parse the HTTP Method to the
# enumeration used by the API
from byteblowerll.byteblower import ParseHTTPRequestMethodFromString
http_method_arg = kwargs['http_method']
self.http_method = ParseHTTPRequestMethodFromString(http_method_arg)
self.duration = kwargs['duration']
self.session_duration = kwargs['session_duration']
self.session_size = kwargs['session_size']
self.max_concurrent_sessions = kwargs['max_concurrent_sessions']
self.max_total_sessions = kwargs['max_total_sessions']
self.tos = kwargs['tos']
self.server = None
self.port_1 = None
self.port_2 = None
def cleanup(self):
"""Clean up the created objects"""
byteblower_instance = byteblower.ByteBlower.InstanceGet()
if self.port_1:
self.server.PortDestroy(self.port_1)
self.port_1 = None
if self.port_2:
self.server.PortDestroy(self.port_2)
self.port_2 = None
if self.server is not None:
byteblower_instance.ServerRemove(self.server)
self.server = None
def run(self):
byteblower_instance = byteblower.ByteBlower.InstanceGet()
print("Connecting to ByteBlower server %s..." % self.server_address)
self.server = byteblower_instance.ServerAdd(self.server_address)
# Create the port which will be the HTTP server (port_1)
print("Creating HTTP Server port")
self.port_1 = self.provision_port(self.port_1_config)
print("Creating HTTP Client port")
# Create the port which will be the HTTP client (port_2)
self.port_2 = self.provision_port(self.port_2_config)
http_server_ip_address = self.port_1_config['ip_address']
# create a HTTP server
http_server = self.port_1.ProtocolHttpMultiServerAdd()
server_tcp_port = self.port_1_config['tcp_port']
if server_tcp_port is not None:
http_server.PortSet(server_tcp_port)
else:
server_tcp_port = http_server.PortGet()
# create a HTTP Client
http_client = self.port_2.ProtocolHttpMultiClientAdd()
# - remote endpoint
http_client.RemoteAddressSet(http_server_ip_address)
http_client.RemotePortSet(server_tcp_port)
# - local endpoint
http_client.LocalPortRangeSet(self.port_2_config['tcp_port_min'],
self.port_2_config['tcp_port_max'])
# Configure the direction.
# If the HTTP Method is GET,
# traffic will flow from the HTTP server to the HTTP client
# If the HTTP Method is PUT,
# traffic will flow from the HTTP client to the HTTP server
http_client.HttpMethodSet(self.http_method)
print("Server port:", self.port_1.DescriptionGet())
print("Client port:", self.port_2.DescriptionGet())
# let the HTTP server listen for requests
http_server.Start()
# - total duration of all sessions
http_client.DurationSet(self.duration)
# - how many connections can be created?
http_client.CumulativeConnectionLimitSet(self.max_total_sessions)
# - how many connections can be running at the same time
http_client.MaximumConcurrentRequestsSet(self.max_concurrent_sessions)
# - individual duration, can be size-based or time-based
if self.session_duration is not None:
# let the HTTP Client request a page of a specific duration
# to download...
http_client.SessionDurationSet(self.session_duration)
elif self.session_size is not None:
# let the HTTP Client request a page of a specific size...
http_client.SessionSizeSet(self.session_size)
else:
raise ValueError("Either duration or request_size must be configured")
print("Starting the HTTP client")
http_client.Start()
http_client_result = http_client.ResultGet()
for iteration in range(10):
time.sleep(1)
http_client_result.Refresh()
print("-" * 10)
print("Iteration", iteration+1)
print(" connections attempted", http_client_result.ConnectionsAttemptedGet())
print(" connections established", http_client_result.ConnectionsEstablishedGet())
print(" connections aborted", http_client_result.ConnectionsAbortedGet())
print(" connections refused", http_client_result.ConnectionsRefusedGet())
print("-" * 10)
http_client.Stop()
http_server.Stop()
print("Stopped the HTTP client")
request_status_value = http_client.StatusGet()
request_status_string = byteblower.ConvertHTTPMultiClientStatusToString(request_status_value)
http_client_result.Refresh()
tx_bytes = http_client_result.TcpTxByteCountGet()
tx_speed = http_client_result.TcpTxSpeedGet()
rx_bytes = http_client_result.TcpRxByteCountGet()
rx_speed = http_client_result.TcpRxSpeedGet()
http_server_result = http_server.ResultGet()
http_server_result.Refresh()
print("Requested Duration : {} nanoseconds".format(self.duration))
print("Status : {}".format(request_status_string))
print("Client Result data : {}".format(http_client_result.DescriptionGet()))
print("Server Result data : {}".format(http_server_result.DescriptionGet()))
return [
self.duration,
self.session_duration,
self.session_size,
self.max_total_sessions,
self.max_concurrent_sessions,
tx_bytes, rx_bytes,
tx_speed, rx_speed,
request_status_value
]
def provision_port(self, config):
port = self.server.PortCreate(config['interface'])
port_l2 = port.Layer2EthIISet()
port_l2.MacSet(config['mac'])
ip_config = config['ip']
if not isinstance(ip_config, list):
# Config is not static, DHCP or slaac
if ip_config.lower() == "dhcpv4":
port_l3 = port.Layer3IPv4Set()
port_l3.ProtocolDhcpGet().Perform()
config['ip_address'] = port_l3.IpGet()
elif ip_config.lower() == "dhcpv6":
port_l3 = port.Layer3IPv6Set()
port_l3.ProtocolDhcpGet().Perform()
config['ip_address'] = port_l3.IpDhcpGet()
elif ip_config.lower() == "slaac":
port_l3 = port.Layer3IPv6Set()
port_l3.StatelessAutoconfiguration()
config['ip_address'] = port_l3.IpStatelessGet()
else:
# Static configuration
if len(ip_config) == 3:
# IPv4
port_l3 = port.Layer3IPv4Set()
port_l3.IpSet(ip_config[0])
port_l3.NetmaskSet(ip_config[1])
port_l3.GatewaySet(ip_config[2])
config['ip_address'] = port_l3.IpGet()
elif len(ip_config) == 2:
port_l3 = port.Layer3IPv6Set()
# IPv6
address = ip_config[0]
prefix_length = ip_config[1]
ip = "{}/{}".format(address, prefix_length)
port_l3.IpManualAdd(ip)
config['ip_address'] = ip_config[0]
if not isinstance(config['ip_address'], str):
ip = config['ip_address'][0]
if '/' in ip:
config['ip_address'] = ip.split('/')[0]
print("Created port", port.DescriptionGet())
return port
# When this python module is called stand-alone, the run-function must be
# called. This approach makes it possible to include it in a series of
# examples.
if __name__ == "__main__":
example = Example(**configuration)
try:
example.run()
finally:
example.cleanup()
| 1.75 | 2 |
BST.py | boristown/leetcode | 1 | 4407 | class BST:
def __init__(self, val=0, left=None, right=None):
self.val = val
self.left = left
self.right = right
@staticmethod
def array2BST(array):
'''
array:sorted array
'''
n = len(array)
if n == 0: return None
m = n//2
left,root,right = array[:m],array[m],array[m+1:]
return BST(root,BST.array2BST(left),BST.array2BST(right))
@staticmethod
def BST2array(node):
'''
node:BST node
'''
if not node: return []
return BST.BST2array(node.left)+[node.val]+BST.BST2array(node.right) | 2.59375 | 3 |
train.py | lck1201/simple-effective-3Dpose-baseline | 20 | 4439 | import pprint
import mxnet as mx
from mxnet import gluon
from mxnet import init
from lib.core.get_optimizer import *
from lib.core.metric import MPJPEMetric
from lib.core.loss import MeanSquareLoss
from lib.core.loader import JointsDataIter
from lib.network import get_net
from lib.net_module import *
from lib.utils import *
from lib.dataset.hm36 import hm36
from config import config, gen_config, update_config_from_args, s_args
config = update_config_from_args(config, s_args)
def main():
# Parse config and mkdir output
logger, final_Model_path = create_logger(config)
config.final_Model_path = final_Model_path
gen_config(os.path.join(final_Model_path, 'hyperParams.yaml'))
logger.info('Training config:{}\n'.format(pprint.pformat(config)))
# define context
if config.useGPU:
ctx = [mx.gpu(int(i)) for i in config.gpu.split(',')]
else:
ctx = mx.cpu()
logger.info("Using context:", ctx)
# dataset, generate trainset/ validation set
train_imdbs = []
valid_imdbs = []
for i in range(len(config.DATASET.train_image_set)):
logger.info("Construct Dataset:", config.DATASET.dbname[i], ", Dataset Path:", config.DATASET.dataset_path[i])
train_imdbs.append(eval(config.DATASET.dbname[i])(config.DATASET.train_image_set[i],
config.DATASET.root_path[i],
config.DATASET.dataset_path[i]))
valid_imdbs.append(eval(config.DATASET.dbname[i])(config.DATASET.valid_image_set[i],
config.DATASET.root_path[i],
config.DATASET.dataset_path[i],
config.final_Model_path))
data_names = ['hm36data']
label_names = ['hm36label']
train_data_iter = JointsDataIter(train_imdbs[0], runmode=0,
data_names = data_names, label_names=label_names,
shuffle=config.TRAIN.SHUFFLE, batch_size=len(ctx)*config.TRAIN.batchsize, logger=logger)
valid_data_iter = JointsDataIter(valid_imdbs[0], runmode=1,
data_names = data_names, label_names=label_names,
shuffle=False, batch_size=len(ctx)*config.TEST.batchsize, logger=logger)
assert train_data_iter.get_meanstd()['mean3d'].all() == valid_data_iter.get_meanstd()['mean3d'].all()
# network
net = get_net(config)
if config.resume:
ckp_path = os.path.join(config.resumeckp)
net.collect_params().load(ckp_path, ctx=ctx)
else:
net.initialize(init=init.MSRAPrelu(), ctx=ctx)
if config.NETWORK.hybrid:
net.hybridize()
logger.info(net)
# define loss and metric
mean3d = train_data_iter.get_meanstd()['mean3d']
std3d = train_data_iter.get_meanstd()['std3d']
train_metric = MPJPEMetric('train_metric', mean3d, std3d)
eval_metric = MPJPEMetric('valid_metric', mean3d, std3d)
loss = MeanSquareLoss()
# optimizer
optimizer, optimizer_params = get_optimizer(config, ctx)
# train and valid
TrainDBsize = train_data_iter.get_size()
ValidDBsize = valid_data_iter.get_size()
logger.info("Train DB size:", TrainDBsize, "Valid DB size:",ValidDBsize)
if not isinstance(train_data_iter, mx.io.PrefetchingIter):
train_data_iter = mx.io.PrefetchingIter(train_data_iter)
trainer = gluon.Trainer(net.collect_params(), optimizer, optimizer_params)
for epoch in range(config.TRAIN.begin_epoch, config.TRAIN.end_epoch):
trainNet(net, trainer, train_data_iter, loss, train_metric, epoch, config, logger=logger, ctx=ctx)
validNet(net, valid_data_iter, loss, eval_metric, epoch, config, logger=logger, ctx=ctx)
logger.kill()
if __name__ == '__main__':
main() | 1.53125 | 2 |
feast/DetectionModules/ldar_program.py | GeoSensorWebLab/FEAST_PtE | 10 | 4447 | """
This module defines the LDARProgram class.
"""
import numpy as np
import copy
from .repair import Repair
from ..EmissionSimModules.result_classes import ResultDiscrete, ResultContinuous
class LDARProgram:
"""
An LDAR program contains one or more detection methods and one or more repair methods. Each LDAR program records
the find and repair costs associated with all detection and repair methods in the program. The LDAR program
deploys runs the action methods of each detection and repair method contained in the program. The detection and
repair methods determine their own behavior at each time step.
"""
def __init__(self, gas_field, tech_dict):
"""
:param gas_field: a GasField object
:param tech_dict: a dict containing all of the detection methods to be employed by the LDAR program. The dict
must have the form {"name": DetectionMethod}. All of the relationships between detection methods and between
detection methods and repair methods must be defined by the dispatch_objects specified for each method.
"""
self.emissions = copy.deepcopy(gas_field.emissions)
self.emissions_timeseries = []
self.vents_timeseries = []
#self.emissions_results = ResultContinuous(units='g/s')
#self.vents_results = ResultContinuous(units='g/s')
self.tech_dict = tech_dict
self.repair = {}
self.repair_cost = ResultDiscrete(units='USD')
for tech_name, tech in tech_dict.items():
if type(tech.dispatch_object) is Repair:
self.repair[tech_name + ' ' + tech.dispatch_object.name] = tech.dispatch_object
def action(self, time, gas_field):
"""
Runs the detect method for every tech in tech_dict and runs the repair method
:param time: the simulation time object
:param gas_field: the simulation gas_field object
:return:
"""
for i, tech in enumerate(self.tech_dict.values()):
if hasattr(tech, 'survey_interval') and tech.survey_interval \
and np.mod(time.current_time, tech.survey_interval) < time.delta_t:
tech.action(list(np.linspace(0, gas_field.n_sites - 1, gas_field.n_sites, dtype=int)))
tech.detect(time, gas_field, self.emissions.get_current_emissions(time))
for rep in self.repair.values():
rep.repair(time, self.emissions)
def calc_rep_costs(self, time):
"""
Calculates the total repair costs up to time.current_time, assuming that all reparable emissions that have a
max end_time less than time.current_time have been repaired.
:param time: a FEAST time object
:return: None
"""
for em in self.emissions.emissions.index.unique():
empdf_temp = self.emissions.emissions.loc[[em]]
max_row = empdf_temp[empdf_temp.end_time == empdf_temp.end_time.max()].iloc[0]
if max_row.reparable & (max_row.end_time < time.current_time):
self.repair_cost.append_entry([max_row.end_time, max_row.repair_cost])
| 2.328125 | 2 |
src/robusta/core/model/events.py | kandahk/robusta | 0 | 4455 | import logging
import uuid
from enum import Enum
from typing import List, Optional, Dict, Any
from dataclasses import dataclass, field
from pydantic import BaseModel
from ...integrations.scheduled.playbook_scheduler import PlaybooksScheduler
from ..reporting.base import Finding, BaseBlock
class EventType(Enum):
KUBERNETES_TOPOLOGY_CHANGE = 1
PROMETHEUS = 2
MANUAL_TRIGGER = 3
SCHEDULED_TRIGGER = 4
class ExecutionEventBaseParams(BaseModel):
named_sinks: Optional[List[str]] = None
# Right now:
# 1. this is a dataclass but we need to make all fields optional in subclasses because of https://stackoverflow.com/questions/51575931/
# 2. this can't be a pydantic BaseModel because of various pydantic bugs (see https://github.com/samuelcolvin/pydantic/pull/2557)
# once the pydantic PR that addresses those issues is merged, this should be a pydantic class
# (note that we need to integrate with dataclasses because of hikaru)
@dataclass
class ExecutionBaseEvent:
findings: Dict[str, Finding] = field(default_factory=lambda: {})
named_sinks: Optional[List[str]] = None
response: Dict[
str, Any
] = None # Response returned to caller. For admission or manual triggers for example
stop_processing: bool = False
_scheduler: Optional[PlaybooksScheduler] = None
def set_scheduler(self, scheduler: PlaybooksScheduler):
self._scheduler = scheduler
def get_scheduler(self) -> PlaybooksScheduler:
return self._scheduler
def create_default_finding(self) -> Finding:
"""Create finding default fields according to the event type"""
return Finding(title="Generic Finding", aggregation_key="Generic finding key")
def add_enrichment(
self,
enrichment_blocks: List[BaseBlock],
annotations=None,
finding_key: str = "DEFAULT",
):
finding = self.findings.get(finding_key)
if not finding:
finding = self.create_default_finding()
self.findings[finding_key] = finding
finding.add_enrichment(enrichment_blocks, annotations)
def add_finding(self, finding: Finding, finding_key: str = None):
if (
not finding_key
): # user didn't specify a key, so this finding shouldn't be accessed by key. Randomise it
finding_key = str(uuid.uuid4())
existing_finding = self.findings.get(finding_key)
if existing_finding:
logging.warning(
f"Overriding existing finding. finding_key: {finding_key} new finding: {finding}"
)
self.findings[finding_key] = finding
@staticmethod
def from_params(params: ExecutionEventBaseParams) -> Optional["ExecutionBaseEvent"]:
return ExecutionBaseEvent(named_sinks=params.named_sinks)
| 1.609375 | 2 |
mmdet/ops/dcn/__init__.py | TJUsym/TJU_Advanced_CV_Homework | 1,158 | 4471 | from .functions.deform_conv import deform_conv, modulated_deform_conv
from .functions.deform_pool import deform_roi_pooling
from .modules.deform_conv import (DeformConv, ModulatedDeformConv,
DeformConvPack, ModulatedDeformConvPack)
from .modules.deform_pool import (DeformRoIPooling, DeformRoIPoolingPack,
ModulatedDeformRoIPoolingPack)
__all__ = [
'DeformConv', 'DeformConvPack', 'ModulatedDeformConv',
'ModulatedDeformConvPack', 'DeformRoIPooling', 'DeformRoIPoolingPack',
'ModulatedDeformRoIPoolingPack', 'deform_conv', 'modulated_deform_conv',
'deform_roi_pooling'
]
| 0.396484 | 0 |
StorageSystem.py | aaronFritz2302/ZoomAuto | 0 | 4519 | import sqlite3
from pandas import DataFrame
conn = sqlite3.connect('./data.db',check_same_thread=False)
class DataBase():
cursor = conn.cursor()
def __init__(self):
self.createTable()
def createTable(self):
'''
Creates A Table If it Doesnt Exist
'''
conn.execute("""CREATE TABLE IF NOT EXISTS MeetingData (Name text,ID text,Password text, DateTime text,Audio text,Video Text)""")
def enterData(self,meetingData):
'''
Enters Data From The UI Table To The DataBase
'''
meetingData.to_sql('MeetingData', con = conn, if_exists='replace', index = False)
def readData(self):
'''
Reads Data From The SQL DataBase
'''
self.cursor.execute('''SELECT * FROM MeetingData''')
retVal = DataFrame(self.cursor.fetchall(),columns=['Name','ID','Password','DateTime','Audio','Video'])
return retVal | 2.203125 | 2 |
colab/__init__.py | caseywstark/colab | 1 | 4527 | # -*- coding: utf-8 -*-
__about__ = """
This project demonstrates a social networking site. It provides profiles,
friends, photos, blogs, tribes, wikis, tweets, bookmarks, swaps,
locations and user-to-user messaging.
In 0.5 this was called "complete_project".
"""
| 0.423828 | 0 |
tests/factories.py | luzik/waliki | 324 | 4535 | import factory
from django.contrib.auth.models import User, Group, Permission
from waliki.models import ACLRule, Page, Redirect
class UserFactory(factory.django.DjangoModelFactory):
username = factory.Sequence(lambda n: u'user{0}'.format(n))
password = factory.PostGenerationMethodCall('set_password', '<PASSWORD>')
email = factory.LazyAttribute(lambda o: <EMAIL>' % o.username)
class Meta:
model = User
@factory.post_generation
def groups(self, create, extracted, **kwargs):
if not create:
# Simple build, do nothing.
return
if extracted:
# A list of groups were passed in, use them
for group in extracted:
self.groups.add(group)
class GroupFactory(factory.django.DjangoModelFactory):
class Meta:
model = Group
name = factory.Sequence(lambda n: "Group #%s" % n)
@factory.post_generation
def users(self, create, extracted, **kwargs):
if not create:
# Simple build, do nothing.
return
if extracted:
# A list of groups were passed in, use them
for user in extracted:
self.user_set.add(user)
class ACLRuleFactory(factory.django.DjangoModelFactory):
class Meta:
model = ACLRule
name = factory.Sequence(lambda n: u'Rule {0}'.format(n))
slug = factory.Sequence(lambda n: u'page{0}'.format(n))
@factory.post_generation
def permissions(self, create, extracted, **kwargs):
if not create:
# Simple build, do nothing.
return
if extracted:
# A list of groups were passed in, use them
for perm in extracted:
if not isinstance(perm, Permission):
perm = Permission.objects.get(content_type__app_label='waliki', codename=perm)
self.permissions.add(perm)
@factory.post_generation
def users(self, create, extracted, **kwargs):
if not create:
# Simple build, do nothing.
return
if extracted:
# A list of groups were passed in, use them
for user in extracted:
self.users.add(user)
@factory.post_generation
def groups(self, create, extracted, **kwargs):
if not create:
# Simple build, do nothing.
return
if extracted:
# A list of groups were passed in, use them
for group in extracted:
self.groups.add(group)
class PageFactory(factory.django.DjangoModelFactory):
title = factory.Sequence(lambda n: u'Page {0}'.format(n))
slug = factory.Sequence(lambda n: u'page{0}'.format(n))
@factory.post_generation
def raw(self, create, extracted, **kwargs):
if not create:
# Simple build, do nothing.
return
if extracted:
self.raw = extracted
class Meta:
model = Page
class RedirectFactory(factory.django.DjangoModelFactory):
old_slug = factory.Sequence(lambda n: u'old-page{0}'.format(n))
new_slug = factory.Sequence(lambda n: u'new-page{0}'.format(n))
class Meta:
model = Redirect
| 1.351563 | 1 |
gautools/submit_gaussian.py | thompcinnamon/QM-calc-scripts | 0 | 4575 | #! /usr/bin/env python3
########################################################################
# #
# This script was written by <NAME> in 2015. #
# <EMAIL> <EMAIL> #
# #
# Copyright 2015 <NAME> IV #
# #
# Licensed under the Apache License, Version 2.0 (the "License"); #
# you may not use this file except in compliance with the License. #
# You may obtain a copy of the License at #
# #
# http://www.apache.org/licenses/LICENSE-2.0 #
# #
# Unless required by applicable law or agreed to in writing, software #
# distributed under the License is distributed on an "AS IS" BASIS, #
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or #
# implied. #
# See the License for the specific language governing permissions and #
# limitations under the License. #
# #
########################################################################
# This is written to work with python 3 because it should be good to
# be working on the newest version of python.
from __future__ import print_function
import argparse # For parsing commandline arguments
import datetime
import glob # Allows referencing file system/file names
import os
import re
import readline # Allows easier file input (with tab completion?)
import subprocess # Allows for submitting commands to the shell
from warnings import warn
from thtools import cd, make_obj_dir, save_obj, resolve_path
yes = ['y', 'yes', '1']
# An input function that can prefill in the text entry
# Not sure if this works in 3.5+ because raw_input is gone
def rlinput(prompt, prefill=''):
readline.set_startup_hook(lambda: readline.insert_text(prefill))
try:
return input(prompt)
finally:
readline.set_startup_hook()
def _dir_and_file(path):
warn('_dir_and_file is deprecated. Use os.path.split instead',
DeprecationWarning)
if '/' in path:
rel_dir, f_name = path.rsplit('/', 1)
rel_dir = rel_dir + '/'
else:
rel_dir = ''
f_name = path
return rel_dir, f_name
def create_gau_input(coord_name, template, verbose=True):
"""
make gaussian input file by combining header and coordinates files
This function takes as input a file with a set of molecular
coordinates (the form should not matter, it will just be copied
into the next file) and a template file that should be the header
for the desired calculation (including charge and multiplicity),
returns the name of the file, and creates a Gaussian input file ending
with '.com'
:param str coord_name: name of file with coordinates in a format
Gaussian can read
:param str template: name of file with header for Gaussian calculation
(up to and including the charge and multiplicity)
:param bool verbose: If True, some status messages will be printed
(including file names)
:return: name of the written file
:rtype: str
"""
if verbose:
print('Creating Gaussian input file...')
_out_name = coord_name.rsplit('.', 1)[0] + '.com'
with open(_out_name, 'w') as out_file:
with open(template, 'r') as templ_file:
if verbose:
print('opened {}'.format(template))
for line in templ_file:
out_file.write(line)
if '\n' not in line:
out_file.write('\n')
with open(coord_name, 'r') as in_file:
if verbose:
print('opened {}'.format(coord_name))
for i, line in enumerate(in_file):
if i < 2:
# ignore first two lines
# number of atoms and the title/comment
continue
# if line.strip().isdigit():
# # the first line is the number of atoms
# continue
# # XYZ files created by mathematica have a comment
# # as the second line saying something like:
# # "Created by mathematica". Obv. want to ignore that
# if line.strip().startswith('Create') or
# line.strip().startswith('generated'):
# continue
# else:
out_file.write(line)
out_file.write('\n\n\n')
if verbose:
print('created Gaussian input file {}'.format(_out_name))
return _out_name
def get_input_files(base_name, batch):
_in_name_list = glob.glob(base_name + '*')
_in_name_list.sort() # sort files alphanumerically
_in_name_list.sort(key=len) # sort by length (because otherwise would
# put 1,10,11,... as opposed to 1,...,9,10,...
# if number 01,02,... They should all be the same length and the
# second sort won't do anything.
if not batch:
num_files = len(_in_name_list)
if num_files > 1:
print('Multiple files starting with {}'.format(base_name))
if input('Did you mean to execute a batch job? ') in yes:
batch = True
else:
print('What file name shall I use?')
_in_name_list = [rlinput('file name: ', base_name)]
return _in_name_list, batch
def use_template(template, in_names, verbose):
made_name_list = []
for in_name in in_names:
out_name = create_gau_input(in_name, template, verbose=verbose)
made_name_list.append(out_name)
if verbose:
print('Added {} to files to possibly submit.'.format(out_name))
_in_name_list = made_name_list
_in_name_list.sort()
_in_name_list.sort(key=len)
return _in_name_list
def write_sub_script(input_name, num_cores=16, time='12:00:00', verbose=False,
mem='125', executable='g09',
chk_file=None, copy_chk=False,
ln_running=None,
hold_jid=None, xyz=None, make_xyz=None, make_input=False,
ugt_dict=None):
"""
Write submission script for (Gaussian) jobs for submission to queue
If make_xyz is not None, the file make_xyz will be checked to exist
first to make sure to not waste time when missing a necessary input file.
:param str input_name: Name of the file to use as input
:param int num_cores: Number of cores to request
:param str time: Amount of time to request in the format 'hh:mm:ss'
:param bool verbose: If True, print out some status messages and such
:type mem: int or str
:param mem: Minimum amount of memory to request
:param str executable: Executable file to use for the job
Example, 'g09', 'g16'
:param str chk_file: If not None, this file will be copied back after the
job has completed. If this is not None and make_input is True,
this will also be passed to use_gen_template.
:param bool copy_chk: If this is True, the script will attempt to copy
what should be an existing checkpoint file to the scratch directory
before running the job. `chk_file` must be not None as well.
:param str ln_running: If not None, this will be the base name for
linking the output file to the current directory. If chk_file is not
None, it will also be linked with the same base name.
:param str hold_jid: Job on which this job should depend.
This should be the name of another job in the queuing system.
:param str xyz: Name of an xyz file to use as input to use_gen_template
(if make_input is True).
:param str make_xyz: The name of a file to pass to obabel to be used to
create an xyz file to pass to use_gen_template.
:param bool make_input: If True, use_gen_template will be used to create
input for the Gaussian calculation.
:param dict ugt_dict: dict of arguments to pass to use_gen_template.
This should not include out_file, xyz, nproc, mem, or checkpoint
because those will all be used from other arguments to this function.
out_file will be input_name; xyz will be xyz or a time-based name if
make_xyz is not None; nproc will be $NSLOTS (useful if this gets
changed after job submission); mem will be mem; and checkpoint will
be chk_file.
:return: The name of the script file
:rtype: str
"""
rel_dir, file_name = os.path.split(input_name)
if file_name.endswith('.com'):
short_name = os.path.splitext(file_name)[0]
if not short_name + '.com' == file_name:
raise SyntaxError('problem interpreting file name. ' +
'Period in file name?')
out_name = short_name + '.out'
elif '.' in file_name:
short_name, input_extension = os.path.splitext(file_name)
if not short_name + '.' + input_extension == file_name:
raise SyntaxError('problem interpreting file name. ' +
'Period in file name?')
out_name = short_name + '.out'
else:
short_name = file_name
file_name = short_name + '.com'
print('Assuming input file is {}'.format(file_name))
out_name = short_name + '.out'
job_name = re.match(r'.*?([a-zA-Z].*)', short_name).group(1)
if len(job_name) == 0:
job_name = 'default'
_script_name = os.path.join(rel_dir, 'submit'+short_name+'.sh')
temp_xyz = os.path.abspath('.temp' +
datetime.datetime.now().strftime('%H%M%S%f') +
'.xyz')
if xyz is None or make_xyz is not None:
n_xyz = temp_xyz
else:
n_xyz = resolve_path(xyz)
temp_pkl = temp_xyz[:-4]
if ugt_dict is not None:
make_obj_dir()
pkl_path = save_obj(ugt_dict, temp_pkl)
if chk_file is not None:
chk_line = 'checkpoint=\'{}\','.format(chk_file)
else:
chk_line = ''
with open(_script_name, 'w') as script_file:
sfw = script_file.write
sfw('#!/bin/bash -l\n\n')
sfw('#$ -pe omp {}\n'.format(num_cores))
sfw('#$ -M <EMAIL>\n')
sfw('#$ -m eas\n')
sfw('#$ -l h_rt={}\n'.format(time))
sfw('#$ -l mem_total={}G\n'.format(mem))
sfw('#$ -N {}\n'.format(job_name))
sfw('#$ -j y\n')
sfw('#$ -o {}.log\n\n'.format(short_name))
if hold_jid is not None:
sfw('#$ -hold_jid {}\n\n'.format(hold_jid))
if make_xyz is not None:
sfw('if [ ! -f {} ]; then\n'.format(
os.path.abspath(make_xyz)) +
' exit 17\n'
'fi\n\n')
sfw('module load wxwidgets/3.0.2\n')
sfw('module load openbabel/2.4.1\n\n')
sfw('obabel {} -O {}\n\n'.format(os.path.abspath(
make_xyz), os.path.abspath(n_xyz)))
if make_input:
sfw('python -c "from gautools.tools import '
'use_gen_template as ugt;\n'
'from thtools import load_obj, get_node_mem;\n'
'm = get_node_mem();\n'
'd = load_obj(\'{}\');\n'.format(
os.path.abspath(pkl_path)) +
'ugt(\'{}\',\'{}\','.format(
file_name, os.path.abspath(n_xyz)) +
'nproc=$NSLOTS,mem=m,{}'.format(chk_line) +
'**d)"\n\n')
sfw('INPUTFILE={}\n'.format(file_name))
sfw('OUTPUTFILE={}\n'.format(out_name))
if chk_file is not None:
sfw('CHECKFILE={}\n\n'.format(chk_file))
else:
sfw('\n')
if ln_running is not None:
sfw('WORKINGOUT={}.out\n'.format(ln_running))
if chk_file is not None:
sfw('WORKINGCHK={}.chk\n\n'.format(ln_running))
else:
sfw('\n')
sfw('CURRENTDIR=`pwd`\n')
sfw('SCRATCHDIR=/scratch/$USER\n')
sfw('mkdir -p $SCRATCHDIR\n\n')
sfw('cd $SCRATCHDIR\n\n')
sfw('cp $CURRENTDIR/$INPUTFILE .\n')
if chk_file is not None:
sfw('# ') if not copy_chk else None
sfw('cp $CURRENTDIR/$CHECKFILE .\n\n')
else:
sfw('\n')
if ln_running is not None:
sfw('ln -s -b /net/`hostname -s`$PWD/$OUTPUTFILE '
'$CURRENTDIR/$WORKINGOUT\n')
if chk_file is not None:
sfw('ln -s -b /net/`hostname -s`$PWD/$CHECKFILE '
'$CURRENTDIR/$WORKINGCHK\n\n')
else:
sfw('\n')
sfw('echo About to run {} in /net/`'.format(executable) +
'hostname -s`$SCRATCHDIR\n\n')
sfw('{} <$INPUTFILE > $OUTPUTFILE'.format(executable))
sfw('\n\n')
if ln_running is not None:
sfw('rm $CURRENTDIR/$WORKINGOUT')
if chk_file is not None:
sfw(' $CURRENTDIR/$WORKINGCHK\n\n')
else:
sfw('\n\n')
sfw('cp $OUTPUTFILE $CURRENTDIR/.\n')
if chk_file is not None:
sfw('cp $CHECKFILE $CURRENTDIR/.\n\n')
else:
sfw('\n')
sfw('echo ran in /net/`hostname -s`$SCRATCHDIR\n')
sfw('echo output was copied to $CURRENTDIR\n\n')
if verbose:
print('script written to {}'.format(_script_name))
return _script_name
def submit_scripts(scripts, batch=False, submit=False, verbose=False):
outputs = []
if batch:
if submit or input('submit all jobs? ') in yes:
for script in scripts:
rd, f = _dir_and_file(script)
with cd(rd, ignore_blank=True):
cl = ['qsub', f]
# Don't really know how this works. Copied from
# http://stackoverflow.com/questions/4256107/
# running-bash-commands-in-python
process = subprocess.Popen(cl,
stdout=subprocess.PIPE,
universal_newlines=True)
output = process.communicate()[0]
if verbose:
print(output)
outputs.append(output)
else:
if verbose:
print('No jobs submitted, but scripts created')
else:
if submit or input('submit job {}? '.format(scripts[0])) in yes:
rd, f = _dir_and_file(scripts[0])
with cd(rd, ignore_blank=True):
cl = ['qsub', f]
# Don't really know how this works. Copied from
# http://stackoverflow.com/questions/4256107/
# running-bash-commands-in-python
process = subprocess.Popen(cl,
stdout=subprocess.PIPE,
universal_newlines=True)
output = process.communicate()[0]
if verbose:
print(output)
outputs.append(output)
else:
if verbose:
print('{} not submitted'.format(scripts))
_job_info = [' '.join(output.split(' ')[2:4]) for output in outputs]
return _job_info
if __name__ == '__main__':
description = 'Create and submit a script to run a Gaussian job on SCC'
parser = argparse.ArgumentParser(description=description)
parser.add_argument('in_name',
help='Name of Gaussian input file')
parser.add_argument('-c', '--numcores', type=int, default=16,
help='Number of cores for job')
# I should probably check validity of this time request
# Maybe it doesn't matter so much because it just won't
# submit the job and it will give quick feedback about that?
parser.add_argument('-t', '--time',
help='Time required as "hh:mm:ss"',
default='12:00:00')
parser.add_argument('-e', '--executable', type=str, default='g09',
help='name of executable to run')
parser.add_argument('-b', '--batch', action='store_true',
help='create multiple scripts (batch job)')
parser.add_argument('-x', '--template', default=None,
help='template file for creating input from coords')
parser.add_argument('-s', '--submit', action='store_true',
help='Automatically submit jobs?')
parser.add_argument('-v', '--verbose', action='store_true',
help='make program more verbose')
parser.add_argument('-j', '--nojobinfo', action='store_false',
help='Do not return the submitted job information')
parser.add_argument('-k', '--chk_file', default=None,
help='checkpoint file to be written and copied back')
parser.add_argument('--copy_chk', action='store_true',
help='Copy check file to the scratch directory')
parser.add_argument('-l', '--ln_running', type=str, default=None,
help='base name for linking output to cwd while '
'running')
parser.add_argument('-d', '--hold_jid', default=None,
help='job on which this job should depend')
args = parser.parse_args()
in_name_list, args.batch = get_input_files(args.in_name, args.batch)
if args.template:
in_name_list = use_template(args.template, in_name_list, args.verbose)
script_list = []
for in_name in in_name_list:
script_name = write_sub_script(input_name=in_name,
num_cores=args.numcores,
time=args.time,
verbose=args.verbose,
executable=args.executable,
chk_file=args.chk_file,
copy_chk=args.copy_chk,
ln_running=args.ln_running,
hold_jid=args.hold_jid)
script_list.append(script_name)
if not len(script_list) == len(in_name_list):
# This should never be the case as far as I know, but I would
# like to make sure everything input gets a script and all the
# script names are there to be submitted.
raise IOError('num scripts dif. from num names given')
job_info = submit_scripts(script_list, args.batch, args.submit,
args.verbose)
if job_info and args.nojobinfo:
for job in job_info:
print(job)
if args.verbose:
print('Done. Completed normally.')
| 1.382813 | 1 |
libpermian/issueanalyzer/test_baseissue.py | velezd/permian | 0 | 4607 | import unittest
import logging
import contextlib
from libpermian.settings import Settings
from .proxy import IssueAnalyzerProxy
from .base import BaseAnalyzer, BaseIssue
from .issueset import IssueSet
LOGGER = logging.getLogger('test')
class NewIssue(BaseIssue):
def submit(self):
LOGGER.info('submit was called')
return super().submit()
def make(self):
LOGGER.info('make was called')
return 'http://issuetracker.example.com/new_issue'
def update(self):
LOGGER.info('update was called')
def _lookup(self):
LOGGER.info('lookup was called')
return None
@property
def resolved(self):
return False
@property
def report_url(self):
return 'http://issuetracker.example.com/new/foo'
class TrackedUnresolvedIssue(NewIssue):
def _lookup(self):
LOGGER.info('lookup was called')
return 'http://issuetracker.example.com/123'
@property
def resolved(self):
return False
@property
def report_url(self):
return 'http://issuetracker.example.com/new/bar'
class TrackedResolvedIssue(TrackedUnresolvedIssue):
@property
def resolved(self):
return True
class TestNewIssue(unittest.TestCase):
def setUp(self):
self.settings = Settings({}, {}, [])
self.issue = NewIssue(self.settings)
def test_properties(self):
self.assertTrue(self.issue.new)
self.assertFalse(self.issue.tracked)
self.assertEqual(self.issue.uri, None)
def test_sync(self):
# test lookup was called
with self.assertLogs('test', level='INFO') as cm:
self.issue.sync()
self.assertEqual(cm.output, ['INFO:test:lookup was called'])
self.test_properties()
def test_str(self):
self.assertEqual(str(self.issue), self.issue.report_url)
class TestTrackedUnresolvedIssue(TestNewIssue):
def setUp(self):
self.settings = Settings({}, {}, [])
self.issue = TrackedUnresolvedIssue(self.settings)
def test_properties(self):
self.assertFalse(self.issue.new)
self.assertTrue(self.issue.tracked)
self.assertEqual(self.issue.uri, 'http://issuetracker.example.com/123')
def test_str(self):
self.assertEqual(str(self.issue), self.issue.uri)
# TrackedResolvedIssue should behave the same way as TrackedUnresolvedIssue
# so just inherit the whole test case to run the very same test
class TestTrackedResolvedIssue(TestTrackedUnresolvedIssue):
def setUp(self):
self.settings = Settings({}, {}, [])
self.issue = TrackedResolvedIssue(self.settings)
class TestSubmitDisabled(unittest.TestCase):
settings = Settings(
{
'issueAnalyzer' : {
'create_issues': False,
'update_issues': False,
'create_issues_instead_of_update': False,
}
},
{},
[]
)
def setUp(self):
self.new = NewIssue(self.settings)
self.unresolved = TrackedUnresolvedIssue(self.settings)
self.resolved = TrackedResolvedIssue(self.settings)
# sync the issues so that lookup is not called => logged during submit
self.new.sync()
self.unresolved.sync()
self.resolved.sync()
@contextlib.contextmanager
def assertUnchanged(self, issue):
old_uri = issue.uri
old_new = issue.new
old_tracked = issue.tracked
yield issue
self.assertEqual(issue.uri, old_uri)
self.assertEqual(issue.new, old_new)
self.assertEqual(issue.tracked, old_tracked)
def assertSubmitNoop(self, issue):
with self.assertUnchanged(issue):
with self.assertLogs('test', level='INFO') as cm:
issue.submit()
issue.submit()
self.assertEqual(cm.output, [
"INFO:test:submit was called",
"INFO:test:submit was called",
])
def assertSubmitCreate(self, issue):
with self.assertLogs('test', level='INFO') as cm:
result1 = issue.submit()
result2 = issue.submit()
self.assertEqual(cm.output, [
"INFO:test:submit was called",
"INFO:test:make was called",
"INFO:test:submit was called",
])
self.assertEqual(result1, result2)
return result1
def assertSubmitUpdate(self, issue):
with self.assertUnchanged(issue):
with self.assertLogs('test', level='INFO') as cm:
result1 = issue.submit()
result2 = issue.submit()
self.assertEqual(cm.output, [
"INFO:test:submit was called",
"INFO:test:update was called",
"INFO:test:submit was called",
])
self.assertEqual(result1, result2)
return result1
def testNew(self):
self.assertSubmitNoop(self.new)
def testUnresolved(self):
self.assertSubmitNoop(self.unresolved)
def testResolved(self):
self.assertSubmitNoop(self.resolved)
class TestSubmitCreateUpdate(TestSubmitDisabled):
settings = Settings(
{
'issueAnalyzer' : {
'create_issues': True,
'update_issues': True,
'create_issues_instead_of_update': False,
}
},
{},
[]
)
def testNew(self):
result = self.assertSubmitCreate(self.new)
self.assertTrue(self.new.new)
self.assertTrue(self.new.tracked)
self.assertEqual(result, 'http://issuetracker.example.com/new_issue')
self.assertEqual(result, self.new.uri)
# repeated submit doesn't do anything
with self.assertUnchanged(self.new):
with self.assertLogs('test', level='INFO') as cm:
result = self.new.submit()
self.assertEqual(cm.output, [
"INFO:test:submit was called",
])
def testUnresolved(self):
self.assertSubmitUpdate(self.unresolved)
def testResolved(self):
self.assertSubmitUpdate(self.resolved)
class TestSubmitCreateOnlyNew(TestSubmitCreateUpdate):
settings = Settings(
{
'issueAnalyzer' : {
'create_issues': True,
'update_issues': False,
'create_issues_instead_of_update': False,
}
},
{},
[]
)
def testUnresolved(self):
self.assertSubmitNoop(self.unresolved)
def testResolved(self):
self.assertSubmitNoop(self.resolved)
class TestSubmitUpdateOnlyTracked(TestSubmitCreateUpdate):
settings = Settings(
{
'issueAnalyzer' : {
'create_issues': False,
'update_issues': True,
'create_issues_instead_of_update': False,
}
},
{},
[]
)
def testNew(self):
self.assertSubmitNoop(self.new)
class TestSubmitCreateAlwaysWithUpdateOff(TestSubmitCreateUpdate):
settings = Settings(
{
'issueAnalyzer' : {
'create_issues': True,
'update_issues': False, # This should have no effect
'create_issues_instead_of_update': True,
}
},
{},
[]
)
def testUnresolved(self):
old_uri = self.unresolved.uri
result = self.assertSubmitCreate(self.unresolved)
self.assertEqual(result, 'http://issuetracker.example.com/new_issue')
self.assertEqual(self.unresolved.uri, result)
self.assertNotEqual(result, old_uri)
def testResolved(self):
old_uri = self.resolved.uri
result = self.assertSubmitCreate(self.resolved)
self.assertEqual(result, 'http://issuetracker.example.com/new_issue')
self.assertEqual(self.resolved.uri, result)
self.assertNotEqual(result, old_uri)
# The update_issue should have no effect when create_issues_instead_of_update
# is set to True.
class TestSubmitCreateAlwaysWithUpdateOn(TestSubmitCreateAlwaysWithUpdateOff):
settings = Settings(
{
'issueAnalyzer' : {
'create_issues': True,
'update_issues': True, # This should have no effect
'create_issues_instead_of_update': True,
}
},
{},
[]
)
| 1.59375 | 2 |
NoiseFiltersPy/Injector.py | TVect/NoiseFiltersPy | 6 | 4631 | import numpy as np
import pandas as pd
from abc import ABC
class Injector(ABC):
"""Base class for the injectors of artificial noise.
Attributes
----------
rem_indx : :obj:`List`
Removed indexes (rows) from the dataset after the filtering.
parameters : :obj:`Dict`
Parameters used to define the behaviour of the filter.
clean_data : :obj:`Sequence`
Filtered independent attributes(X) of the dataset.
clean_classes : :obj:`Sequence`
Filtered target attributes(y) of the dataset.
"""
def __init__(self, attributes, labels, rate: float = 0.1) -> None:
self._new_noise = []
if not isinstance(attributes, pd.DataFrame):
self._attrs = pd.DataFrame(attributes)
else:
self._attrs = attributes
if not isinstance(labels, pd.DataFrame):
self._labels = pd.DataFrame(labels)
else:
self._labels = labels
self._rate = rate
self.verify()
self._num_noise = int(self._rate * self._attrs.shape[0])
self._label_types = set(self.labels[0].unique())
@property
def labels(self):
return self._labels
@property
def noise_indx(self):
return self._new_noise
def verify(self) -> None:
if min(self._labels.value_counts()) < 2:
raise ValueError("Number of examples in the minority class must be >= 2.")
if self._attrs.shape[0] != self.labels.shape[0]:
raise ValueError("Attributes and classes must have the sime size.")
if self._rate < 0 or self._rate > 1:
raise ValueError("")
def _gen_random(self, seed: int = None):
"""[summary]
Args:
seed (int, optional): [description]. Defaults to 123.
"""
rng = np.random.default_rng(seed)
for example in self._new_noise:
self._labels.iloc[example] = rng.choice(list(self._label_types - set(self._labels.iloc[example])))
| 2.25 | 2 |
bot/cogs/clan.py | johnvictorfs/atlantisbot-rewrite | 0 | 4655 | import rs3clans
import discord
from discord.ext import commands
from bot.bot_client import Bot
from bot.utils.tools import separator
from bot.utils.context import Context
class Clan(commands.Cog):
def __init__(self, bot: Bot):
self.bot = bot
@commands.cooldown(1, 5, commands.BucketType.user)
@commands.bot_has_permissions(embed_links=True)
@commands.command(aliases=['clan'])
async def clan_detail_info(self, ctx: Context, *, clan_name: str):
try:
clan = rs3clans.Clan(name=clan_name, set_exp=True)
except ConnectionError:
return await ctx.send(f"Houve um erro ao tentar conectar a API da Jagex. Tente novamente mais tarde.")
except rs3clans.ClanNotFoundError:
return await ctx.send(f"O clã '{clan_name}' não existe.")
clan_leader = None
for member in clan:
if member.rank == 'Owner':
clan_leader = member.name
clan_url = clan.name.replace(' ', '%20')
clan_embed = discord.Embed(
title=clan.name,
color=discord.Color.green(),
url=f'http://services.runescape.com/m=clan-home/clan/{clan_url}'
)
clan_embed.set_author(name='RuneClan', url=f'https://runeclan.com/clan/{clan_url}')
clan_embed.set_thumbnail(url=f'http://services.runescape.com/m=avatar-rs/{clan_url}/clanmotif.png')
clan_embed.add_field(name="Exp Total", value=f'{clan.exp:,}')
clan_embed.add_field(name="Membros", value=str(clan.count))
clan_embed.add_field(name="Líder", value=clan_leader)
clan_embed.add_field(name="Exp Média por Membro", value=f'{clan.avg_exp:,.0f}')
return await ctx.send(embed=clan_embed)
@commands.cooldown(1, 5, commands.BucketType.user)
@commands.bot_has_permissions(embed_links=True)
@commands.command(aliases=['claninfo', 'clanexp', 'claexp', 'clainfo', 'clãexp', 'clãinfo'])
async def clan_user_info(self, ctx: Context, *, username: str):
try:
player = rs3clans.Player(name=username, runemetrics=True)
except ConnectionError:
return await ctx.send(f"Houve um erro ao tentar conectar a API da Jagex. Tente novamente mais tarde.")
if not player.exists:
return await ctx.send(f"Jogador '{player.name}' não existe.")
if not player.clan:
return await ctx.send(f"Jogador '{player.name}' não está em um clã.")
user_clan = rs3clans.Clan(name=player.clan)
member = user_clan.get_member(username)
user_clan_exp = member.exp
user_rank = member.rank
display_username = player.name
if self.bot.setting.show_titles:
if player.suffix:
display_username = f"{player.name} {player.title}"
else:
display_username = f"{player.title} {player.name}"
user_url_name = player.name.replace(" ", "%20")
user_url_clan = player.clan.replace(" ", "%20")
icon_url = f"https://secure.runescape.com/m=avatar-rs/{user_url_name}/chat.png"
runeclan_url = f"https://runeclan.com/user/{user_url_name}"
clan_banner_url = f"http://services.runescape.com/m=avatar-rs/l=3/a=869/{user_url_clan}/clanmotif.png"
embed_title = "RuneClan"
rank_header = "__Rank__"
clan_header = "__Clã__"
exp_header = "__Exp no Clã__"
total_exp_header = "__Exp Total__"
private_profile_header = "Indisponível - Perfil Privado"
rank_emoji = self.bot.setting.clan_settings[user_rank]['Emoji']
user_rank = self.bot.setting.clan_settings[user_rank]['Translation']
clan_info_embed = discord.Embed(
title=embed_title,
description="",
color=discord.Colour.dark_blue(),
url=runeclan_url,
)
clan_info_embed.set_author(
icon_url=icon_url, name=display_username
)
clan_info_embed.set_thumbnail(
url=clan_banner_url
)
clan_info_embed.add_field(
name=clan_header,
value=player.clan
)
clan_info_embed.add_field(
name=rank_header,
value=f"{user_rank} {rank_emoji}"
)
clan_info_embed.add_field(
name=exp_header,
value=f"{user_clan_exp:,}"
)
if player.private_profile:
clan_info_embed.add_field(
name=total_exp_header,
value=private_profile_header,
inline=False
)
else:
clan_info_embed.add_field(
name=total_exp_header,
value=f"{player.exp:,}"
)
return await ctx.send(content=None, embed=clan_info_embed)
@commands.cooldown(1, 5, commands.BucketType.user)
@commands.bot_has_permissions(embed_links=True)
@commands.command(aliases=['ranksupdate', 'upranks', 'rank'])
async def ranks(self, ctx: Context, *, clan: str = 'Atlantis'):
if clan.lower() == 'atlantis argus':
return await ctx.send('`!rank argus` irmão')
elif clan.lower() == 'atlantis':
exp_general = 2_000_000_000
exp_captain = 1_000_000_000
exp_lieutenant = 500_000_000
exp_seargent = 250_000_000
exp_corporal = 125_000_000
elif clan.lower() == 'argus':
exp_general = 500_000_000
exp_captain = 250_000_000
exp_lieutenant = 125_000_000
exp_seargent = 60_000_000
exp_corporal = 30_000_000
clan = 'Atlantis Argus'
else:
return await ctx.send('Clã não reconhecido.')
rank_emoji = {
'Recruit': self.bot.setting.clan_settings['Recruit']['Emoji'],
'Corporal': self.bot.setting.clan_settings['Corporal']['Emoji'],
'Sergeant': self.bot.setting.clan_settings['Sergeant']['Emoji'],
'Lieutenant': self.bot.setting.clan_settings['Lieutenant']['Emoji'],
'Captain': self.bot.setting.clan_settings['Captain']['Emoji'],
'General': self.bot.setting.clan_settings['General']['Emoji'],
}
ranks_embed = discord.Embed(
title="__Ranks a Atualizar__",
description=" ",
)
found = False
clan = rs3clans.Clan(clan, set_exp=False)
clan_members = reversed([member for member in clan])
member: rs3clans.ClanMember
for member in clan_members:
if len(ranks_embed.fields) >= 20:
await ctx.send('Muitos ranks a serem atualizados, enviando apenas os 20 primeiros.')
break
if member.exp >= exp_corporal and member.rank == 'Recruit':
ranks_embed.add_field(
name=member.name,
value=f"Recruta {rank_emoji['Recruit']} ❯ Cabo {rank_emoji['Corporal']}\n"
f"**__Exp:__** {member.exp:,}\n{separator}",
inline=False)
found = True
elif member.exp >= exp_general and member.rank == 'Captain':
ranks_embed.add_field(
name=member.name,
value=f"Capitão {rank_emoji['Captain']} ❯ General {rank_emoji['General']}\n"
f"**__Exp:__** {member.exp:,}\n{separator}",
inline=False)
found = True
elif member.exp >= exp_captain and member.rank == 'Lieutenant':
ranks_embed.add_field(
name=member.name,
value=f"Tenente {rank_emoji['Lieutenant']} ❯ Capitão {rank_emoji['Captain']}\n"
f"**__Exp:__** {member.exp:,}\n{separator}",
inline=False)
found = True
elif member.exp >= exp_lieutenant and member.rank == 'Sergeant':
ranks_embed.add_field(
name=member.name,
value=f"Sargento {rank_emoji['Sergeant']} ❯ Tenente {rank_emoji['Lieutenant']}\n"
f"**__Exp:__** {member.exp:,}\n{separator}",
inline=False)
found = True
elif member.exp >= exp_seargent and member.rank == 'Corporal':
ranks_embed.add_field(
name=member.name,
value=f"Cabo {rank_emoji['Corporal']} ❯ Sargento {rank_emoji['Sergeant']}\n"
f"**__Exp:__** {member.exp:,}\n{separator}",
inline=False)
found = True
if not found:
ranks_embed.add_field(
name="Nenhum Rank a ser atualizado no momento :)",
value=separator,
inline=False
)
return await ctx.send(embed=ranks_embed)
def setup(bot):
bot.add_cog(Clan(bot))
| 1.640625 | 2 |
nehebn2.py | psifertex/nehebn2 | 0 | 4671 | #!/usr/bin/env python3
from components import ProgramState
import binaryninja as binja
import argparse
import os.path
import curses
# TODO...impliment live-refreashing the settings.json during run (add the keybinding and check for it here in the global input loop)
# TODO...support multi-key presses? Not sure if this already works or not
# TODO...make sure to support small terminals (I think it does right now, but I should add some more checks so nothing goes out of bounds)
def main(stdscr):
# Setup
parser = argparse.ArgumentParser(description='Nearly Headless BinaryNinja.')
parser.add_argument('filename', nargs='?', default="")
args = parser.parse_args()
program = ''
if not args.filename == "":
if os.path.isfile(args.filename):
bv = binja.BinaryViewType.get_view_of_file(''.join(args.filename), False)
bv.update_analysis()
while not str(bv.analysis_progress) == "Idle":
prog = bv.analysis_progress
stdscr.erase()
stdscr.border()
state = ''
if prog.state == binja.AnalysisState.DisassembleState:
state = "Disassembling"
else:
state = "Analyzing"
loadingText = "Loading File: "
prog = int((prog.count/(prog.total+1))*34.0)
stdscr.addstr(2, 4, loadingText)
stdscr.addstr(2, 4 + len(loadingText), state)
stdscr.addstr(4, 4, '[' + '#'*prog + ' '*(34-prog) + ']')
stdscr.refresh()
program = ProgramState(stdscr, bv)
else:
raise IOError("File does not exist.")
else:
program = ProgramState(stdscr)
key = ""
while program.is_running:
# Input Filtering
try:
key = stdscr.getkey()
except curses.error as err:
if not str(err) == "no input":
raise curses.error(str(err))
else:
key = "" # Clear Key Buffer
# Rendering and input
program.parseInput(key)
program.render()
curses.doupdate()
if __name__ == "__main__":
background = "2a2a2a"
text = "e0e0e0"
curses.wrapper(main)
| 1.710938 | 2 |
ls12/demo5.py | cklwblove/python-100-days-source-code | 0 | 4703 | # -*- coding: utf-8 -*-
"""
将耗时间的任务放到线程中以获得更好的用户体验。
"""
import time
import tkinter
import tkinter.messagebox
def download():
# 模拟下载任务需要花费10秒时间
time.sleep(10)
tkinter.messagebox.showinfo('提示', '下载完成')
def show_about():
tkinter.messagebox.showinfo('关于', '作者:罗浩')
def main():
top = tkinter.Tk()
top.title('单线程')
top.geometry('200x150')
top.wm_attributes('-topmost', True)
panel = tkinter.Frame(top)
button1 = tkinter.Button(panel, text='下载', command=download)
button1.pack(side='left')
button2 = tkinter.Button(panel, text='关于', command=show_about)
button2.pack(side='right')
panel.pack(side='bottom')
tkinter.mainloop()
if __name__ == '__main__':
main()
| 1.929688 | 2 |
tests/test_models/test_backbones/test_encoder_decoders/test_deepfill_encoder.py | Jian137/mmediting-1 | 1,884 | 4711 | # Copyright (c) OpenMMLab. All rights reserved.
import torch
from mmedit.models.backbones import ContextualAttentionNeck, DeepFillEncoder
from mmedit.models.common import SimpleGatedConvModule
def test_deepfill_enc():
encoder = DeepFillEncoder()
x = torch.randn((2, 5, 256, 256))
outputs = encoder(x)
assert isinstance(outputs, dict)
assert 'out' in outputs
res = outputs['out']
assert res.shape == (2, 128, 64, 64)
assert encoder.enc2.stride == (2, 2)
assert encoder.enc2.out_channels == 64
encoder = DeepFillEncoder(encoder_type='stage2_conv')
x = torch.randn((2, 5, 256, 256))
outputs = encoder(x)
assert isinstance(outputs, dict)
assert 'out' in outputs
res = outputs['out']
assert res.shape == (2, 128, 64, 64)
assert encoder.enc2.out_channels == 32
assert encoder.enc3.out_channels == 64
assert encoder.enc4.out_channels == 64
encoder = DeepFillEncoder(encoder_type='stage2_attention')
x = torch.randn((2, 5, 256, 256))
outputs = encoder(x)
assert isinstance(outputs, dict)
assert 'out' in outputs
res = outputs['out']
assert res.shape == (2, 128, 64, 64)
assert encoder.enc2.out_channels == 32
assert encoder.enc3.out_channels == 64
assert encoder.enc4.out_channels == 128
if torch.cuda.is_available():
encoder = DeepFillEncoder().cuda()
x = torch.randn((2, 5, 256, 256)).cuda()
outputs = encoder(x)
assert isinstance(outputs, dict)
assert 'out' in outputs
res = outputs['out']
assert res.shape == (2, 128, 64, 64)
assert encoder.enc2.stride == (2, 2)
assert encoder.enc2.out_channels == 64
encoder = DeepFillEncoder(encoder_type='stage2_conv').cuda()
x = torch.randn((2, 5, 256, 256)).cuda()
outputs = encoder(x)
assert isinstance(outputs, dict)
assert 'out' in outputs
res = outputs['out']
assert res.shape == (2, 128, 64, 64)
assert encoder.enc2.out_channels == 32
assert encoder.enc3.out_channels == 64
assert encoder.enc4.out_channels == 64
encoder = DeepFillEncoder(encoder_type='stage2_attention').cuda()
x = torch.randn((2, 5, 256, 256)).cuda()
outputs = encoder(x)
assert isinstance(outputs, dict)
assert 'out' in outputs
res = outputs['out']
assert res.shape == (2, 128, 64, 64)
assert encoder.enc2.out_channels == 32
assert encoder.enc3.out_channels == 64
assert encoder.enc4.out_channels == 128
encoder = DeepFillEncoder(
conv_type='gated_conv', channel_factor=0.75).cuda()
x = torch.randn((2, 5, 256, 256)).cuda()
outputs = encoder(x)
assert isinstance(outputs, dict)
assert 'out' in outputs
res = outputs['out']
assert res.shape == (2, 96, 64, 64)
assert isinstance(encoder.enc2, SimpleGatedConvModule)
assert encoder.enc2.conv.stride == (2, 2)
assert encoder.enc2.conv.out_channels == 48 * 2
def test_deepfill_contextual_attention_neck():
# TODO: add unittest for contextual attention module
neck = ContextualAttentionNeck(in_channels=128)
x = torch.rand((2, 128, 64, 64))
mask = torch.zeros((2, 1, 64, 64))
mask[..., 20:100, 23:90] = 1.
res, offset = neck(x, mask)
assert res.shape == (2, 128, 64, 64)
assert offset.shape == (2, 32, 32, 32, 32)
if torch.cuda.is_available():
neck.cuda()
res, offset = neck(x.cuda(), mask.cuda())
assert res.shape == (2, 128, 64, 64)
assert offset.shape == (2, 32, 32, 32, 32)
neck = ContextualAttentionNeck(
in_channels=128, conv_type='gated_conv').cuda()
res, offset = neck(x.cuda(), mask.cuda())
assert res.shape == (2, 128, 64, 64)
assert offset.shape == (2, 32, 32, 32, 32)
assert isinstance(neck.conv1, SimpleGatedConvModule)
| 1.460938 | 1 |
bkt/library/powerpoint/elements.py | pyro-team/bkt-toolbox | 12 | 4719 | # -*- coding: utf-8 -*-
'''
Created on 02.11.2017
@author: fstallmann
'''
from __future__ import absolute_import
from collections import deque
import bkt
from bkt import dotnet
Drawing = dotnet.import_drawing()
from . import helpers as pplib
class TextframeSpinnerBox(bkt.ribbon.RoundingSpinnerBox):
### Instance initialization
attr = 'MarginTop'
def __init__(self, **kwargs):
'''
attr examples: MarginTop, MarginBottom, MarginLeft, MarginRight
'''
#self.attr is automatically set through RibbonControl attribute handling
self.fallback_value = 0
my_kwargs = dict(
size_string = '###',
round_cm = True,
convert = 'pt_to_cm',
get_enabled = bkt.apps.ppt_selection_contains_textframe,
)
my_kwargs.update(kwargs)
super(TextframeSpinnerBox, self).__init__(**my_kwargs)
### Spinner Box callbacks ###
def get_text(self, shapes, selection):
value = self.get_attr_from_shapes(shapes, selection)
if value is None: #e.g. no textframe detected
return None
elif int(value) == -2147483648: #replace large negative number (values differ between selected items) with fallback value
return self.fallback_value
else:
return value
def on_change(self, shapes, selection, value):
self.set_attr_for_shapes(shapes, selection, value)
### Getter Methods ###
def get_attr_from_shapes(self, shapes, selection):
'''
Get attr for shapes
'''
for textframe in pplib.iterate_shape_textframes(shapes):
try:
return self.get_attr_from_textframe(textframe)
except:
# produces error for certain chart types, e.g. Treemap
continue
return None
def get_attr_from_textframe(self, textframe):
return getattr(textframe, self.attr)
### Setter methods ###
def set_attr_for_shapes(self, shapes, selection, value):
'''
Set attr for shapes
'''
value = max(0,value)
for textframe in pplib.iterate_shape_textframes(shapes):
self.set_attr_for_textframe(textframe, value)
def set_attr_for_textframe(self, textframe, value):
setattr(textframe, self.attr, value)
class ParagraphFormatSpinnerBox(bkt.ribbon.RoundingSpinnerBox):
### Instance initialization
attr = 'SpaceBefore'
def __init__(self, **kwargs):
'''
attr examples: SpaceBefore, SpaceAfter, LeftIndent, FirstLineIndent, LineSpacing
'''
#self.attr is automatically set through RibbonControl attribute handling
self.fallback_value = 0
my_kwargs = dict(
size_string = '-###',
get_enabled = bkt.apps.ppt_selection_contains_textframe,
)
if self.attr in ["SpaceBefore", "SpaceAfter", "SpaceWithin"]:
my_kwargs["round_pt"] = True
else:
my_kwargs["round_cm"] = True
my_kwargs["convert"] = "pt_to_cm"
if self.attr in ["LeftIndent", "FirstLineIndent"]:
my_kwargs["big_step"] = 0.25
my_kwargs["small_step"] = 0.125
my_kwargs["rounding_factor"] = 0.125
my_kwargs.update(kwargs)
super(ParagraphFormatSpinnerBox, self).__init__(**my_kwargs)
### Spinner Box callbacks ###
def get_text(self, shapes, selection):
value = self.get_attr_from_shapes(shapes, selection)
if value is None: #e.g. no textframe detected
return None
elif int(value) == -2147483648: #replace large negative number (values differ between selected items) with fallback value
return self.fallback_value
else:
return value
def on_change(self, shapes, selection, value):
self.set_attr_for_shapes(shapes, selection, value)
### Getter Methods ###
def get_attr_from_shapes(self, shapes, selection):
if selection.Type == 3:
# text selected
try:
# produces error if no text is selected
return self._get_attr(selection.TextRange2.Paragraphs(1,1).ParagraphFormat)
except:
try:
# produces error if there is no textrange, e.g. selection within a chart
return self._get_attr(selection.TextRange2.ParagraphFormat)
except:
return None
else:
# shapes selected
for textframe in pplib.iterate_shape_textframes(shapes):
try:
value = self.get_attr_from_textrange(textframe.TextRange)
except:
# produces error for certain chart types, e.g. Treemap
continue
try:
if int(value) == -2147483648: #different values for each paragraph, so get value from first paragraph
value = self._get_attr(textframe.TextRange.Paragraphs(1,1).ParagraphFormat)
except:
pass
return value
return None
def get_attr_from_textrange(self, textrange):
return self._get_attr(textrange.ParagraphFormat)
def _get_attr(self, par_format):
if self.attr in ["SpaceBefore", "SpaceAfter", "SpaceWithin"]:
if (self.attr == "SpaceBefore" and par_format.LineRuleBefore == 0) or (self.attr == "SpaceAfter" and par_format.LineRuleAfter == 0) or (self.attr == "SpaceWithin" and par_format.LineRuleWithin == 0):
self.huge_step = 10
self.big_step = 3
self.small_step = 1
self.round_at = 0
else:
self.huge_step = 0.5
self.big_step = 0.2
self.small_step = 0.1
self.round_at = 1
return getattr(par_format, self.attr)
### Setter methods ###
def set_attr_for_shapes(self, shapes, selection, value):
if self.attr != "FirstLineIndent": #FirstLineIndent can be negative!
value = max(0,value)
if selection.Type == 3:
# text selected
self.set_attr_for_textrange(selection.TextRange2, value) #need to use TextRange2 as TextRange does not contain LeftIndent, etc.
else:
for textframe in pplib.iterate_shape_textframes(shapes):
self.set_attr_for_textrange(textframe.TextRange, value)
def set_attr_for_textrange(self, textrange, value): #using textrange instead of textframe!
if self.attr == "SpaceBefore" and textrange.ParagraphFormat.LineRuleBefore == -2: #if values differ, set the same value as in the first paragraph
textrange.ParagraphFormat.LineRuleBefore = textrange.Paragraphs(1,1).ParagraphFormat.LineRuleBefore
if self.attr == "SpaceAfter" and textrange.ParagraphFormat.LineRuleAfter == -2: #if values differ, set the same value as in the first paragraph
textrange.ParagraphFormat.LineRuleAfter = textrange.Paragraphs(1,1).ParagraphFormat.LineRuleAfter
if self.attr == "SpaceWithin" and textrange.ParagraphFormat.LineRuleWithin == -2: #if values differ, set the same value as in the first paragraph
textrange.ParagraphFormat.LineRuleWithin = textrange.Paragraphs(1,1).ParagraphFormat.LineRuleWithin
setattr(textrange.ParagraphFormat, self.attr, value)
class PPTSymbolsSettings(object):
recent_symbols = deque(bkt.settings.get("bkt.symbols.recent_symbols", []), maxlen=3)
convert_into_shape = bkt.settings.get("bkt.symbols.convert_into_shape", True) #always convert newly inserted symbols into shapes
convert_into_bitmap = bkt.settings.get("bkt.symbols.convert_into_bitmap", False) #always convert newly inserted symbols into bitmap picture
unicode_font = bkt.settings.get("bkt.symbols.unicode_font", None) #insert unicode characters as symbol with special font (e.g. Arial Unicode)
@classmethod
def add_to_recent(cls, item):
try:
#try to remove if already exists and add to beginning
cls.recent_symbols.remove(item)
cls.recent_symbols.append(item)
except ValueError:
cls.recent_symbols.append(item)
bkt.settings["bkt.symbols.recent_symbols"] = cls.recent_symbols
@classmethod
def switch_unicode_font(cls, font=None):
cls.unicode_font = font #if font else SymbolsGallery.fallback_font
bkt.settings["bkt.symbols.unicode_font"] = cls.unicode_font
@classmethod
def convert_into_text(cls):
return not (cls.convert_into_shape or cls.convert_into_bitmap)
@classmethod
def switch_convert_into_text(cls, pressed):
cls.convert_into_shape = False
cls.convert_into_bitmap = False
bkt.settings["bkt.symbols.convert_into_shape"] = cls.convert_into_shape
bkt.settings["bkt.symbols.convert_into_bitmap"] = cls.convert_into_bitmap
@classmethod
def switch_convert_into_shape(cls, pressed):
cls.convert_into_shape = pressed
cls.convert_into_bitmap = False
bkt.settings["bkt.symbols.convert_into_shape"] = cls.convert_into_shape
bkt.settings["bkt.symbols.convert_into_bitmap"] = cls.convert_into_bitmap
@classmethod
def get_convert_into_shape(cls):
return (cls.convert_into_shape or bkt.get_key_state(bkt.KeyCodes.SHIFT)) and not bkt.get_key_state(bkt.KeyCodes.CTRL)
@classmethod
def switch_convert_into_bitmap(cls, pressed):
cls.convert_into_shape = False
cls.convert_into_bitmap = pressed
bkt.settings["bkt.symbols.convert_into_shape"] = cls.convert_into_shape
bkt.settings["bkt.symbols.convert_into_bitmap"] = cls.convert_into_bitmap
@classmethod
def get_convert_into_bitmap(cls):
return (cls.convert_into_bitmap or bkt.get_key_state(bkt.KeyCodes.CTRL)) and not bkt.get_key_state(bkt.KeyCodes.SHIFT)
class PPTSymbolsGallery(bkt.ribbon.SymbolsGallery):
@property
def fallback_font(self):
return PPTSymbolsSettings.unicode_font or bkt.ribbon.SymbolsGallery.fallback_font
def on_action_indexed(self, selected_item, index, context, selection, **kwargs):
''' create numberd shape according of settings in clicked element '''
item = self.symbols[index]
self._add_to_recent(item)
shift_or_ctrl = bkt.get_key_state(bkt.KeyCodes.CTRL) or bkt.get_key_state(bkt.KeyCodes.SHIFT)
if selection.Type == 3 and not shift_or_ctrl: #text selected
selection.TextRange2.Text = "" #remove selected text first and then insert symbol
self.insert_symbol_into_text(selection.TextRange2, item)
elif PPTSymbolsSettings.convert_into_text() and selection.Type == 2 and not shift_or_ctrl: #shapes selected
self.insert_symbol_into_shapes(pplib.get_shapes_from_selection(selection), item)
else: #convert into shape or bitmap
if PPTSymbolsSettings.get_convert_into_bitmap():
self.create_symbol_bitmap(selection.SlideRange(1), item)
else:
self.create_symbol_shape(selection.SlideRange(1), item)
def _add_to_recent(self, item):
PPTSymbolsSettings.add_to_recent(item)
def insert_symbol_into_text(self, textrange, item):
if item[0] or PPTSymbolsSettings.unicode_font is not None: #font name is given, then insert as symbol
font = item[0] or self.fallback_font
try:
char_number = ord(item[1]) #ord does not work for higher level unicode, e.g. emojis, and throws TypeError
if char_number > 61695: #for higher numbers (f0ff works, f100 doesnt work) InsertSymbol does not work anymore. Also the default ppt symbol-picker only shows unicode chars til f0ff.
raise TypeError("character number to large for InsertSymbol") #fallback to InsertAfter
placeholder_char = textrange.InsertAfter("X") #append placeholder symbol so that InsertSymbol behaves the same as InsertAfter
return placeholder_char.InsertSymbol(font, char_number, -1) #symbol: FontName, CharNumber (decimal), Unicode=True
except TypeError:
char_inserted = textrange.InsertAfter(item[1]) #append symbol text
#so, NameFarEast and NameComplexScript should be writable, but they are not if InsertSymbol is used before (it remains the font of the symbol). only way to replace these values and correctly show icon is setting it to '+mn-..'
char_inserted.Font.NameFarEast = "+mn-ea"
char_inserted.Font.NameComplexScript = "+mn-cs"
char_inserted.Font.Name = font #font name
return char_inserted
else:
return textrange.InsertAfter(item[1]) #append symbol text
# if item[0]:
# char_inserted.Font.Name = item[0] #font name
def insert_symbol_into_shapes(self, shapes, item):
#pplib.iterate_shape_textframes(shapes, lambda textframe: self.insert_symbol_into_text(textframe.TextRange, item))
for textframe in pplib.iterate_shape_textframes(shapes):
self.insert_symbol_into_text(textframe.TextRange, item)
# for shape in shapes:
# if shape.HasTextFrame == -1:
# self.insert_symbol_into_text(shape.TextFrame2.TextRange, item)
def create_symbol_shape(self, slide, item):
shape = slide.shapes.addTextbox(
#office.MsoAutoShapeType.msoShapeRectangle.value__,
1,
100,100,200,200)
shape.TextFrame2.WordWrap = 0
shape.TextFrame2.AutoSize = 1 #ppAutoSizeShapeToFitText
shape.TextFrame2.MarginBottom = 0
shape.TextFrame2.MarginTop = 0
shape.TextFrame2.MarginLeft = 0
shape.TextFrame2.MarginRight = 0
self.insert_symbol_into_text(shape.TextFrame2.TextRange, item)
# if item[0]:
# shape.TextFrame.TextRange.Font.Name = item[0] #font name
# shape.TextFrame.TextRange.Text = item[1] #symbol text
if PPTSymbolsSettings.get_convert_into_shape(): #convert into shape
try:
orig_fontsize = shape.TextFrame2.TextRange.Font.Size
shape.TextFrame2.TextRange.Font.Size = 60
shape.TextFrame2.TextRange.ParagraphFormat.Bullet.Visible = 0
new_shape = pplib.convert_text_into_shape(shape)
new_shape.TextFrame2.TextRange.Font.Size = orig_fontsize
except:
shape.select()
else:
new_shape.select()
else:
shape.select()
def create_symbol_bitmap(self, slide, item):
import tempfile, os
font = item[0] or self.fallback_font
img = bkt.ribbon.SymbolsGallery.create_symbol_image(font, item[1], 400, None)
tmpfile = os.path.join(tempfile.gettempdir(), "bkt-symbol.png")
img.Save(tmpfile, Drawing.Imaging.ImageFormat.Png)
shape = slide.shapes.AddPicture(tmpfile, 0, -1, 200, 200) #FileName, LinkToFile, SaveWithDocument, Left, Top
shape.select()
os.remove(tmpfile)
class PPTSymbolsGalleryRecent(PPTSymbolsGallery):
@property
def symbols(self):
return PPTSymbolsSettings.recent_symbols
@symbols.setter
def symbols(self, value):
pass
def get_item_image(self, index):
try:
return super(PPTSymbolsGalleryRecent, self).get_item_image(index)
except:
return super(PPTSymbolsGalleryRecent, self).create_symbol_image("Arial", "?")
def button_get_label(self, index):
try:
return self.symbols[index][2]
except:
return "Zuletzt verwendet: Undefined"
def button_get_visible(self, index):
try:
return self.symbols[index] is not None
except:
return False
def get_index_as_button(self, index):
return bkt.ribbon.Button(
id="{}_button_{}".format(self.id, index),
get_label=bkt.Callback(lambda: self.button_get_label(index)),
on_action=bkt.Callback(lambda context, selection: self.on_action_indexed(None, index, context, selection)),
get_image=bkt.Callback(lambda: self.get_item_image(index)),
get_visible=bkt.Callback(lambda: self.button_get_visible(index)),
)
class LocpinGallery(bkt.ribbon.Gallery):
def __init__(self, locpin=None, item_supertip="Shape-Fixpunkt bzw. Fixierung bei Änderung {}", **kwargs):
self.locpin = locpin or pplib.GlobalLocPin
self.items = [
("fix_locpin_tl", "Oben-links", item_supertip.format("oben-links")),
("fix_locpin_tm", "Oben-mitte", item_supertip.format("oben-mitte")),
("fix_locpin_tr", "Oben-rechts", item_supertip.format("oben-rechts")),
("fix_locpin_ml", "Mitte-links", item_supertip.format("mitte-links")),
("fix_locpin_mm", "Mitte-mitte", item_supertip.format("mitte-mitte")),
("fix_locpin_mr", "Mitte-rechts", item_supertip.format("mitte-rechts")),
("fix_locpin_bl", "Unten-links", item_supertip.format("unten-links")),
("fix_locpin_bm", "Unten-mitte", item_supertip.format("unten-mitte")),
("fix_locpin_br", "Unten-rechts", item_supertip.format("unten-rechts")),
]
my_kwargs = dict(
# get_enabled=bkt.apps.ppt_shapes_or_text_selected,
columns="3",
item_height="24",
item_width="24",
show_item_label=False,
on_action_indexed = bkt.Callback(self.locpin_on_action_indexed),
get_selected_item_index = bkt.Callback(lambda: self.locpin.index),
get_item_count = bkt.Callback(lambda: len(self.items)),
get_item_label = bkt.Callback(lambda index: self.items[index][1]),
get_item_image = bkt.Callback(self.locpin_get_image, context=True),
get_item_screentip = bkt.Callback(lambda index: self.items[index][1]),
get_item_supertip = bkt.Callback(lambda index: self.items[index][2]),
# children = [
# Item(image=gal_item[0], screentip=gal_item[1], supertip=gal_item[2])
# for gal_item in self.items
# ]
)
if not "image" in kwargs and not "image_mso" in kwargs:
my_kwargs["get_image"] = bkt.Callback(self.locpin_get_image, context=True)
my_kwargs.update(kwargs)
super(LocpinGallery, self).__init__(**my_kwargs)
def locpin_on_action_indexed(self, selected_item, index):
self.locpin.index = index
def locpin_get_image(self, context, index=None):
if index is None:
return context.python_addin.load_image(self.items[self.locpin.index][0])
else:
return context.python_addin.load_image(self.items[index][0])
class PositionGallery(bkt.ribbon.Gallery):
# items: [label, position, reference]
# position: [left, top, width, height]
# values can be absolute or percentage
# reference: CONTENTE / SLIDE / ABS
# values are converted according to reference
items = [
[u"<NAME>", [ 0, 0, 1, 1], 'CONTENT'],
[u"2/3 Links", [ 0, 0, 2./3, 1], 'CONTENT'],
[u"2/3 Rechts", [1./3, 0, 2./3, 1], 'CONTENT'],
[u"1/2 Links", [ 0, 0, .5, 1], 'CONTENT'],
[u"1/2 Mitte", [.25, 0, .5, 1], 'CONTENT'],
[u"1/2 Rechts", [ .5, 0, .5, 1], 'CONTENT'],
[u"1/3 Links", [ 0, 0, 1./3, 1], 'CONTENT'],
[u"1/3 Mitte", [1./3, 0, 1./3, 1], 'CONTENT'],
[u"1/3 Rechts", [2./3, 0, 1./3, 1], 'CONTENT'],
[u"1/6 Oben", [ 0, 0, 1, 1./6], 'CONTENT'],
[u"1/6 Unten", [ 0, 5./6, 1, 1./6], 'CONTENT']
]
def __init__(self, positions=None, label="Standardpositionen", columns=3, **kwargs):
self.items = positions or PositionGallery.items
super(PositionGallery, self).__init__(
label = label,
columns = columns,
image_mso='PositionAnchoringGallery',
supertip=u"Positioniere die ausgewählten Shapes auf eine Standardposition.",
children=[
bkt.ribbon.Button(
label="Benutzerdef. Bereich festlegen",
supertip="Der benutzerdefinierte Bereich wird anhand des gewählten Shapes festgelegt. Dieser Bereich ist anschließend über die Gallery wählbar und wird dauerhaft in der aktuellen Prästentation vorgehalten.",
on_action=bkt.Callback(self.set_userdefined_area),
get_enabled = bkt.get_enabled_auto
)
],
**kwargs
)
def on_action_indexed(self, selected_item, index, context, **kwargs):
''' reposition shapes according of settings in clicked element '''
item = self.items[index]
position = item[1]
reference = item[2]
#self.change_position(selection, shapes, item[1])
# reference size
if reference == 'CONTENT':
ref_left,ref_top,ref_width,ref_height = pplib.slide_content_size(context.slide)
else: # SLIDE / ABS
page_setup = context.presentation.PageSetup
ref_left,ref_top = 0, 0
ref_width,ref_height = page_setup.SlideWidth, page_setup.SlideHeight
# target size
left,top,width,height = self.rect_from_definition(position, ref_frame=[ref_left,ref_top,ref_width, ref_height])
frame = pplib.BoundingFrame.from_rect(left, top, width, height)
if 'on_position_change' in self._callbacks:
if context:
return context.invoke_callback(self._callbacks['on_position_change'], target_frame=frame, **kwargs)
def get_item_count(self, presentation):
self.init_userdefined_area_item(presentation)
return len(self.items)
# def get_enabled(self, shapes):
# return True
# def get_item_label(self, index):
# item = self.items[index]
# return "%s" % getattr(NumberedShapes, 'label_' + item['label'])[index%self.columns]
def get_item_image(self, index, presentation):
''' creates an item image with target area according to settings in the specified item '''
# retrieve item-settings
item = self.items[index]
return self.create_image(item[1], item[2], presentation)
def get_item_screentip(self, index):
# retrieve item-settings
item = self.items[index]
return 'Positionierung: ' + item[0]
def get_item_supertip(self, index):
return 'Verwende angezeigten Position/Größe.'
def create_image(self, position, reference, presentation):
# create bitmap, define pen/brush
height = 40
width = height*16./9
img = Drawing.Bitmap(width, height)
g = Drawing.Graphics.FromImage(img)
# reference size
if reference == 'CONTENT':
v_offset = height/5
v_ref = (height*4)/5
left,top,fill_width,fill_height = self.rect_from_definition(position, ref_frame=[0,v_offset,width, v_ref])
else: # SLIDE / ABS
ref_width,ref_height = presentation.PageSetup.SlideWidth, presentation.PageSetup.SlideHeight
left,top,fill_width,fill_height = self.rect_from_definition(position, ref_frame=[0,0,ref_width, ref_height])
left = left /ref_width * width
fill_width = fill_width /ref_width * width
top = top /ref_height * height
fill_height = fill_height/ref_height * height
color = Drawing.ColorTranslator.FromHtml('#ffdd0000')
brush = Drawing.SolidBrush(color)
g.FillRectangle(brush, Drawing.Rectangle(round(left),round(top), round(fill_width), round(fill_height)))
color = Drawing.ColorTranslator.FromHtml('#ff999999')
pen = Drawing.Pen(color,1)
g.DrawRectangle(pen, Drawing.Rectangle(0,0, width-1, height/5-1))
g.DrawRectangle(pen, Drawing.Rectangle(0,0, width-1, height-1))
return img
def rect_from_definition(self, pos_definition, ref_frame=[0,0,640,480]):
left = self.length_from_definition(pos_definition[0], ref_frame[2]) + ref_frame[0]
top = self.length_from_definition(pos_definition[1], ref_frame[3]) + ref_frame[1]
width = self.length_from_definition(pos_definition[2], ref_frame[2])
height = self.length_from_definition(pos_definition[3], ref_frame[3])
return left, top, width, height
def length_from_definition(self, length_definition, reference):
if type(length_definition) == list:
# allow [150, 50%]
l = 0
for ldef in length_definition:
l += self.length_from_definition(ldef, reference)
return l
elif type(length_definition) in [int, float, long]:
if length_definition < 0:
# negative values specify distance 'from right'
return reference - self.length_from_definition(-length_definition, reference)
elif length_definition <= 1:
# percentage values
return reference * length_definition
else:
# absolute values
return length_definition
else:
return 10
## userdefined area
def set_userdefined_area(self, presentation, shapes):
if len(shapes) == 1:
pplib.ContentArea.define_contentarea(presentation, shapes[0])
else:
frame = pplib.BoundingFrame.from_shapes(shapes)
pplib.ContentArea.define_contentarea(presentation, frame)
self.init_userdefined_area_item(presentation)
def init_userdefined_area_item(self, presentation):
#due to performance check first if tag exists at all
if pplib.ContentArea.isset_contentarea(presentation):
left, top, width, height = pplib.ContentArea.read_contentarea(presentation)
if len(self.items) == 12:
self.items.pop()
self.items.append([u"Benutzerdef. Bereich", [left, top, width, height], 'ABS'])
| 1.695313 | 2 |
clickhouse_plantuml/column.py | yonesko/clickhouse-plantuml | 0 | 4783 | #!/usr/bin/env python
# License: Apache-2.0
# Copyright (C) 2020 <NAME>
class Column(object):
"""
Represents ClickHouse column
"""
def __init__(
self,
database: str,
table: str,
name: str,
type: str,
default_kind: str,
default_expression: str,
comment: str,
compression_codec: str,
is_in_partition_key: bool,
is_in_sorting_key: bool,
is_in_primary_key: bool,
is_in_sampling_key: bool,
):
self.database = database
self.table = table
self.name = name
self.type = type
self.default_kind = default_kind
self.default_expression = default_expression
self.comment = comment
self.compression_codec = compression_codec
self.is_in_partition_key = is_in_partition_key
self.is_in_sorting_key = is_in_sorting_key
self.is_in_primary_key = is_in_primary_key
self.is_in_sampling_key = is_in_sampling_key
@property
def db_table(self):
return "{}.{}".format(self.database, self.table)
def __str__(self):
return self.name
| 1.421875 | 1 |
program/eggUI.py | otills/embryocv | 1 | 4791 | from pyqtgraph.Qt import QtCore, QtGui
import numpy as np
from scipy.spatial import distance as dist
import glob
import re
import os
from PyQt5 import QtGui
from PyQt5.QtCore import *
from PyQt5.QtGui import *
import sys
import cv2
import pandas as pd
from PyQt5.Qt import *
import pyqtgraph as pg
#from PyQt4.Qt import *
#%%
class eggUI(QDialog):
'''
createOpenCVEggROI : take eggID defined ROIs and visualise
'''
sliderUpdate = QtCore.pyqtSignal()
embryoUpdate = QtCore.pyqtSignal()
keyPressed = QtCore.pyqtSignal()
def __init__(self, parent=None):
super(eggUI, self).__init__(parent)
# Make QDialog
self.diag = QtGui.QDialog()
global parentPath, vidTime
self.diag.setWindowTitle('Identify eggs')
self.diag.imv = pg.ImageView()
self.btn_save = QPushButton('Save', self)
#==============================================================================
#
#==============================================================================
def showUI(self,ims,eggRotBBox, eggBoxPoints, embryoLabels, eggInt):
self.eggInt = eggInt
self.embryoLabels = embryoLabels
self.diag.setWindowTitle('Identify eggs')
# Make ImageView
self.diag.imv = pg.ImageView()
self.diag.resize(1000,600)
# Make ROI
self.importOpenCVROIs(eggRotBBox, eggBoxPoints)
if (eggRotBBox[0][0][0] != 'nan'):
self.createOpenCVEggROI()
self.diag.imv.addItem(self.roi)
# Remove buttons from ImageView widget
self.diag.imv.ui.roiBtn.hide()
self.diag.imv.ui.menuBtn.hide()
# Make tableview
self.diag.table = QtGui.QTableWidget()
self.diag.table.setShowGrid(True)
self.diag.table.setHorizontalHeaderLabels(['Embryo', 'Sorted'])
# Sets different alignment data just on the first column
self.diag.table.setRowCount(int(len(self.embryoLabels)))
self.diag.table.setColumnCount(2)
# Highlight first row
self.diag.table.selectRow(0)
# Make layout
checkLayout = QGridLayout()
# Deal with stretching for approrpraite formatting.
checkLayout.setColumnStretch(0, 3)
checkLayout.setColumnStretch(1, 1)
checkLayout.setRowStretch(0, 1)
checkLayout.setRowStretch(1, 3)
# Add to layout
checkLayout.addWidget(self.diag.imv,0,0,2,2)
checkLayout.addWidget(self.diag.table,1,5)
# Apply layout
self.diag.setLayout(checkLayout)
# Make buttons
self.cpROI_btn = QtGui.QPushButton('&Copy ROI')
self.cpROI_btn.setMinimumHeight(40);
self.useCpROI_btn = QtGui.QPushButton('&Use Copied ROI')
self.useCpROI_btn.setMinimumHeight(40);
self.noEgg_btn = QtGui.QPushButton('&No Egg')
self.noEgg_btn.setMinimumHeight(40);
self.approveROI_btn = QtGui.QPushButton('&Approve ROIs')
self.approveROI_btn.setMinimumHeight(40);
self.exit_btn = QtGui.QPushButton('Exit')
self.exit_btn.setMinimumHeight(40);
# Make button layout
self.btnLayout = QGridLayout()
self.btnLayout.addWidget(self.cpROI_btn,0,0)
self.btnLayout.addWidget(self.useCpROI_btn,0,1)
self.btnLayout.addWidget(self.noEgg_btn,1,1)
self.btnLayout.addWidget(self.approveROI_btn,1,0)
# Exit button not implemented, just use window x (topRight).
# self.btnLayout.addWidget(self.exit_btn,2,1)
# Add button layout to GridLayout.
checkLayout.addLayout(self.btnLayout,0,5)
# Format images for pyqtgraph and put in ImageView
# self.formatSequence(ims)
self.imImport()
self.diag.imv.setImage(self.compSeq)
# Add the ROI to ImageItem
self.diag.show()
# Call function to add data
self.dataForTable()
# Function for modifying the table when ROI is approved.
self.approveROI_btn.clicked.connect(self.updateTable)
# Copy current ROI
self.cpROI_btn.clicked.connect(self.cpROI)
# Apply copied ROI
self.useCpROI_btn.clicked.connect(self.applyCopiedROI)
# Assign nan to frames not containing egg
self.noEgg_btn.clicked.connect(self.recordNoEgg)
# Exit - prompt user to confirm
#self.exit_btn.clicked.connect(self.closeEvent)
# Connect changes in timeline so correct ROI is created and displayed.
self.diag.imv.timeLine.sigPositionChanged.connect(self.updateOpenCVEggROICurrEmbryo)
#self.diag.keyPressEvent(self.keyPressEvent)
#==============================================================================
# Generate data for populating the embryo/approveROI table.
#==============================================================================
def dataForTable(self):
self.tableData = {'Embryo':list(self.embryoLabels),
'ROI approved':['No'] * len(list(self.embryoLabels))}
self.tableCols = [QtGui.QColor(0,0,100,120)]* len(list(self.embryoLabels))
# Enter data onto Table
horHeaders = []
for n, key in enumerate(sorted(self.tableData.keys())):
horHeaders.append(key)
for m, item in enumerate(self.tableData[key]):
newitem = QtGui.QTableWidgetItem(item)
newitem.setBackground(QtGui.QColor(0,0,100,120))
self.diag.table.setItem(m, n, newitem)
# Add Header
self.diag.table.setHorizontalHeaderLabels(horHeaders)
# Adjust size of Table
self.diag.table.resizeRowsToContents()
# self.diag.table.resizeColumnsToContents()
#==============================================================================
# Update table when approve ROI button clicked.
#==============================================================================
def updateTable(self):
self.tableData['ROI approved'][self.diag.table.currentRow()] = 'Approved'
self.tableCols[self.diag.table.currentRow()] = QtGui.QColor(0,100,0,120)
horHeaders = []
for n, key in enumerate(sorted(self.tableData.keys())):
horHeaders.append(key)
for m, item in enumerate(self.tableData[key]):
newitem = QtGui.QTableWidgetItem(item)
self.diag.table.setItem(m, n, newitem)
newitem.setBackground(self.tableCols[m])
#Add Header
self.diag.table.setHorizontalHeaderLabels(horHeaders)
#Adjust size of Table
self.diag.table.resizeRowsToContents()
#==============================================================================
# Update the user interface
#==============================================================================
def updateUI(self,ims,eggRotBBox, eggBoxPoints):
self.imImport()
self.diag.imv.setImage(self.compSeq)
self.importOpenCVROIs(eggRotBBox, eggBoxPoints)
self.getSeqValsAndCurrROI()
self.updateOpenCVEggROINewEmbryo()
# Add the ROI to ImageItem
#self.diag.imv.addItem(self.roi)
#==============================================================================
# Deal with data from the dataHandling class
#==============================================================================
def formatSequence(self,ims):
# Format seq appropriately for pyqtgraph ROIs
self.tSeqd = np.zeros_like(ims)
for l in range(len(self.tSeqd)):
self.tSeqd[l] = ims[l].T
#==============================================================================
# Get folders for a particular embryo
#==============================================================================
def getEmbryoFolders(self, parentPath, embryo):
self.parentPath = parentPath
self.embryo = embryo
self.embryoFolders = glob.glob(parentPath + "*/" + embryo +"/")
self.embryoFolders.sort(key=os.path.getctime)
#==============================================================================
# Get image
#==============================================================================
def imImport(self):
for f in range(len(self.eggUIimPaths)):
im = cv2.imread(self.eggUIimPaths[f],cv2.IMREAD_ANYDEPTH)
ran = (im.max()-im.min())/255.
out = (im/ran)
out = out-out.min()
self.compSeq[int(f)] = out.astype(np.uint8)
self.compSeq[f] = self.compSeq[f].T
#==============================================================================
# Update image iteratively when slider moved
#==============================================================================
#==============================================================================
# def updateImage(self):
# self.getSeqValsAndCurrROI()
# #self.UI.compSeq[e*len(self.eggIDIms):(e*len(self.eggIDIms)+len(self.eggIDIms))] = self.seq
# #self.UI.comp(self.imImport(self.diag.imv.currentIndex()))
# im = cv2.imread(self.eggUIimPaths[self.diag.imv.currentIndex],cv2.IMREAD_ANYDEPTH)
# ran = (im.max()-im.min())/255.
# out = (im/ran)
# out = out-out.min()
# self.compSeq[self.diag.imv.currentIndex] = out.astype(np.uint8)
# self.diag.imv.setImage(self.compSeq.T)
# self.diag.imv.show()
# #========
#==============================================================================
#==============================================================================
# ROI functions
#==============================================================================
#==============================================================================
# Import OpenCV determined ROIs from dataHandling instance. Called from showUI and updateUI.
#==============================================================================
def importOpenCVROIs(self,eggRotBBox, eggBoxPoints):
self.eggRotBBox = eggRotBBox
self.eggBoxPoints = eggBoxPoints
self.originalEggRotBBox = eggRotBBox.copy()
self.originalEggBoxPoints = eggBoxPoints.copy()
#==============================================================================
# Get index values for ROI data.
#==============================================================================
def getSeqValsAndCurrROI(self):
# Calculate the indices for current frame
if self.eggInt != 1234:
self.divVal = self.diag.imv.currentIndex/float(len(self.eggRotBBox[1]))
self.intDivVal = int(self.divVal)
self.withinSeqVal = int((self.divVal - self.intDivVal)*len(self.eggRotBBox[self.intDivVal]))
self.currROI_eggRotBBox = self.eggRotBBox[self.intDivVal,self.withinSeqVal]
self.currROI_eggBoxPoints = self.eggBoxPoints[self.intDivVal,self.withinSeqVal]
else:
self.divVal = self.diag.imv.currentIndex
self.intDivVal = int(self.divVal)
self.currROI_eggRotBBox = self.eggRotBBox[0,self.intDivVal]
self.currROI_eggBoxPoints = self.eggBoxPoints[0,self.intDivVal]
#==============================================================================
# Generate a pyqtgraph ROI, using data from OpenCV.
#==============================================================================
def createOpenCVEggROI(self):
# Get relevant sequence position and ROI.
self.getSeqValsAndCurrROI()
if (self.currROI_eggRotBBox[0] != 'nan'):
# 0 or 90 degree angles seem very buggy. Shift to 1 and 89 as a bodge fix.
if self.currROI_eggRotBBox[4] == -90:
#self.currROI_eggRotBBox[4] = -89
# Get rotated bounding box points
ySorted = self.currROI_eggBoxPoints[np.argsort(self.currROI_eggBoxPoints[:, 1]), :]
# Get bottom most, and top most sorted corner points
bottomMost = ySorted[:2, :]
topMost = ySorted[2:, :]
# Get bottom most
bottomMost = bottomMost[np.argsort(bottomMost[:, 1]), :]
(bl, br) = bottomMost
# Use bottom-left coordinate as anchor to calculate the Euclidean distance between the
# The point with the largest distance will be our bottom-right point
D = dist.cdist(bl[np.newaxis], topMost, "euclidean")[0]
(tl, tr) = topMost[np.argsort(D)[::-1], :]
self.roi = pg.ROI([bl[0], bl[1]], [self.currROI_eggRotBBox[2], self.currROI_eggRotBBox[3]])
elif self.currROI_eggRotBBox[4] == -0:
#self.currROI_eggRotBBox[4] = -1
ySorted = self.currROI_eggBoxPoints[np.argsort(self.currROI_eggBoxPoints[:, 1]), :]
# Get bottom most, and top most sorted corner points
bottomMost = ySorted[:2, :]
topMost = ySorted[2:, :]
# Get bottom most
bottomMost = bottomMost[np.argsort(bottomMost[:, 1]), :]
(bl, br) = bottomMost
# Use bottom-left coordinate as anchor to calculate the Euclidean distance between the
# The point with the largest distance will be our bottom-right point
D = dist.cdist(bl[np.newaxis], topMost, "euclidean")[0]
(tl, tr) = topMost[np.argsort(D)[::-1], :]
self.roi = pg.ROI([bl[0], bl[1]], [self.currROI_eggRotBBox[2], self.currROI_eggRotBBox[3]])
elif self.currROI_eggRotBBox[4] == -180:
#self.currROI_eggRotBBox[4] = -179
ySorted = self.currROI_eggBoxPoints[np.argsort(self.currROI_eggBoxPoints[:, 1]), :]
# Get bottom most, and top most sorted corner points
bottomMost = ySorted[:2, :]
topMost = ySorted[2:, :]
# Get bottom most
bottomMost = bottomMost[np.argsort(bottomMost[:, 1]), :]
(bl, br) = bottomMost
# Use bottom-left coordinate as anchor to calculate the Euclidean distance between the
# The point with the largest distance will be our bottom-right point
D = dist.cdist(bl[np.newaxis], topMost, "euclidean")[0]
(tl, tr) = topMost[np.argsort(D)[::-1], :]
self.roi = pg.ROI([bl[0], bl[1]], [self.currROI_eggRotBBox[2], self.currROI_eggRotBBox[3]])
else:
# Get rotated bounding box points
ySorted = self.currROI_eggBoxPoints[np.argsort(self.currROI_eggBoxPoints[:, 1]), :]
# Get bottom most, and top most sorted corner points
bottomMost = ySorted[:2, :]
topMost = ySorted[2:, :]
# Get bottom most
bottomMost = bottomMost[np.argsort(bottomMost[:, 1]), :]
(bl, br) = bottomMost
# Use bottom-left coordinate as anchor to calculate the Euclidean distance between the
# The point with the largest distance will be our bottom-right point
D = dist.cdist(bl[np.newaxis], topMost, "euclidean")[0]
(tl, tr) = topMost[np.argsort(D)[::-1], :]
# Make ROI - note non 0,or 90 degree angles, require different of the X size
# Rectangular ROI used to enable more easy handling of corner handles for tracking user chagnges.
if (self.currROI_eggRotBBox[4] == -90.0) | (self.currROI_eggRotBBox[4] == -0.0)| (self.currROI_eggRotBBox[4] == 0.0):
self.roi = pg.ROI([bl[0], bl[1]], [self.currROI_eggRotBBox[2], self.currROI_eggRotBBox[3]])
# roi = pg.EllipseROI([bottomMost[0][0], bottomMost[0][1]], [eggRotBBox[vidTime][2], eggRotBBox[vidTime][3]])
# Debug
# print 'no angle'
else:
# Random angle ROIs
self.roi = pg.ROI([bottomMost[0][0], bottomMost[0][1]], [-self.currROI_eggRotBBox[2], self.currROI_eggRotBBox[3]])
self.roi.setAngle(self.currROI_eggRotBBox[4], update=True)
# roi = pg.EllipseROI([bottomMost[0][0], bottomMost[0][1]], [-eggRotBBox[vidTime][2], eggRotBBox[vidTime][3]])
# Add handles
self.roi.addRotateHandle([1, 0],[0.5,0.5])
self.roi.addRotateHandle([0, 1], [0.5,0.5])
self.roi.addScaleHandle([1, 1], [0, 0])
self.roi.addScaleHandle([0, 0], [1, 1])
self.roi.setPen('y',width=3)
self.roi.removable
self.roi.invertible = 'True'
# Make var for dealing with modifications to roi
self.updatedEggROI=[]
self.roi.sigRegionChangeFinished.connect(self.updateROI)
#else:
#==============================================================================
# Update the ROI for current embryo.
#==============================================================================
def updateOpenCVEggROICurrEmbryo(self):
# Remove previous
if (hasattr(self, 'roi')):
self.diag.imv.removeItem(self.roi)
# Get relevant video position and ROI.
self.getSeqValsAndCurrROI()
# 0 or 90 degree angles seem very buggy. Shift to 1 and 89 as a bodge fix.
if self.currROI_eggRotBBox[4] == -90:
#self.currROI_eggRotBBox[4] = -89
# Get rotated bounding box points
ySorted = self.currROI_eggBoxPoints[np.argsort(self.currROI_eggBoxPoints[:, 1]), :]
# Get bottom most, and top most sorted corner points
bottomMost = ySorted[:2, :]
topMost = ySorted[2:, :]
# Get bottom most
bottomMost = bottomMost[np.argsort(bottomMost[:, 1]), :]
(bl, br) = bottomMost
# Use bottom-left coordinate as anchor to calculate the Euclidean distance between the
# The point with the largest distance will be our bottom-right point
D = dist.cdist(bl[np.newaxis], topMost, "euclidean")[0]
(tl, tr) = topMost[np.argsort(D)[::-1], :]
self.roi = pg.ROI([bl[0], bl[1]], [self.currROI_eggRotBBox[2], self.currROI_eggRotBBox[3]])
elif self.currROI_eggRotBBox[4] == -0:
#self.currROI_eggRotBBox[4] = -1
ySorted = self.currROI_eggBoxPoints[np.argsort(self.currROI_eggBoxPoints[:, 1]), :]
# Get bottom most, and top most sorted corner points
bottomMost = ySorted[:2, :]
topMost = ySorted[2:, :]
# Get bottom most
bottomMost = bottomMost[np.argsort(bottomMost[:, 1]), :]
(bl, br) = bottomMost
# Use bottom-left coordinate as anchor to calculate the Euclidean distance between the
# The point with the largest distance will be our bottom-right point
D = dist.cdist(bl[np.newaxis], topMost, "euclidean")[0]
(tl, tr) = topMost[np.argsort(D)[::-1], :]
self.roi = pg.ROI([bl[0], bl[1]], [self.currROI_eggRotBBox[2], self.currROI_eggRotBBox[3]])
elif self.currROI_eggRotBBox[4] == -180:
#self.currROI_eggRotBBox[4] = -179
ySorted = self.currROI_eggBoxPoints[np.argsort(self.currROI_eggBoxPoints[:, 1]), :]
# Get bottom most, and top most sorted corner points
bottomMost = ySorted[:2, :]
topMost = ySorted[2:, :]
# Get bottom most
bottomMost = bottomMost[np.argsort(bottomMost[:, 1]), :]
(bl, br) = bottomMost
# Use bottom-left coordinate as anchor to calculate the Euclidean distance between the
# The point with the largest distance will be our bottom-right point
D = dist.cdist(bl[np.newaxis], topMost, "euclidean")[0]
(tl, tr) = topMost[np.argsort(D)[::-1], :]
self.roi = pg.ROI([bl[0], bl[1]], [self.currROI_eggRotBBox[2], self.currROI_eggRotBBox[3]])
else:
# Get rotated bounding box points
ySorted = self.currROI_eggBoxPoints[np.argsort(self.currROI_eggBoxPoints[:, 1]), :]
# Get bottom most, and top most sorted corner points
bottomMost = ySorted[:2, :]
topMost = ySorted[2:, :]
# Get bottom most
bottomMost = bottomMost[np.argsort(bottomMost[:, 1]), :]
(bl, br) = bottomMost
# Use bottom-left coordinate as anchor to calculate the Euclidean distance between the
# The point with the largest distance will be our bottom-right point
D = dist.cdist(bl[np.newaxis], topMost, "euclidean")[0]
(tl, tr) = topMost[np.argsort(D)[::-1], :]
# Make ROI - note non 0,or 90 degree angles, require different of the X size
# Rectangular ROI used to enable more easy handling of corner handles for tracking user chagnges.
if (self.currROI_eggRotBBox[4] == -90.0) | (self.currROI_eggRotBBox[4] == -0.0)| (self.currROI_eggRotBBox[4] == 0.0):
self.roi = pg.ROI([bl[0], bl[1]], [self.currROI_eggRotBBox[2], self.currROI_eggRotBBox[3]])
# roi = pg.EllipseROI([bottomMost[0][0], bottomMost[0][1]], [eggRotBBox[vidTime][2], eggRotBBox[vidTime][3]])
# Debug
# print 'no angle'
else:
# Random angle ROIs
self.roi = pg.ROI([bottomMost[0][0], bottomMost[0][1]], [-self.currROI_eggRotBBox[2], self.currROI_eggRotBBox[3]])
self.roi.setAngle(self.currROI_eggRotBBox[4], update=True)
# roi = pg.EllipseROI([bottomMost[0][0], bottomMost[0][1]], [-eggRotBBox[vidTime][2], eggRotBBox[vidTime][3]])
# roi = pg.EllipseROI([bottomMost[0][0], bottomMost[0][1]], [-eggRotBBox[vidTime][2], eggRotBBox[vidTime][3]])
# Add handles
self.roi.addRotateHandle([1, 0],[0.5,0.5])
self.roi.addRotateHandle([0, 1], [0.5,0.5])
self.roi.addScaleHandle([1, 1], [0, 0])
self.roi.addScaleHandle([0, 0], [1, 1])
self.roi.setPen('y',width=3)
self.roi.removable
self.roi.invertible = 'True'
# Make var for dealing with modifications to roi
self.updatedEggROI=[]
### Still to do...
self.diag.imv.addItem(self.roi)
self.roi.sigRegionChangeFinished.connect(self.updateROI)
#==============================================================================
# Update ROI for new embryo.
#==============================================================================
def updateOpenCVEggROINewEmbryo(self):
# Remove old ROI
if (hasattr(self, 'roi')):
self.diag.imv.removeItem(self.roi)
# Get relevant video position and ROI
self.getSeqValsAndCurrROI()
# 0 or 90 degree angles seem very buggy. Shift to 1 and 89 as a bodge fix.
if self.currROI_eggRotBBox[4] == -90:
#self.currROI_eggRotBBox[4] = -89
# Get rotated bounding box points
ySorted = self.currROI_eggBoxPoints[np.argsort(self.currROI_eggBoxPoints[:, 1]), :]
# Get bottom most, and top most sorted corner points
bottomMost = ySorted[:2, :]
topMost = ySorted[2:, :]
# Get bottom most
bottomMost = bottomMost[np.argsort(bottomMost[:, 1]), :]
(bl, br) = bottomMost
# Use bottom-left coordinate as anchor to calculate the Euclidean distance between the
# The point with the largest distance will be our bottom-right point
D = dist.cdist(bl[np.newaxis], topMost, "euclidean")[0]
(tl, tr) = topMost[np.argsort(D)[::-1], :]
self.roi = pg.ROI([bl[0], bl[1]], [self.currROI_eggRotBBox[2], self.currROI_eggRotBBox[3]])
elif self.currROI_eggRotBBox[4] == -0:
#self.currROI_eggRotBBox[4] = -1
ySorted = self.currROI_eggBoxPoints[np.argsort(self.currROI_eggBoxPoints[:, 1]), :]
# Get bottom most, and top most sorted corner points
bottomMost = ySorted[:2, :]
topMost = ySorted[2:, :]
# Get bottom most
bottomMost = bottomMost[np.argsort(bottomMost[:, 1]), :]
(bl, br) = bottomMost
# Use bottom-left coordinate as anchor to calculate the Euclidean distance between the
# The point with the largest distance will be our bottom-right point
D = dist.cdist(bl[np.newaxis], topMost, "euclidean")[0]
(tl, tr) = topMost[np.argsort(D)[::-1], :]
self.roi = pg.ROI([bl[0], bl[1]], [self.currROI_eggRotBBox[2], self.currROI_eggRotBBox[3]])
elif self.currROI_eggRotBBox[4] == -180:
#self.currROI_eggRotBBox[4] = -179
ySorted = self.currROI_eggBoxPoints[np.argsort(self.currROI_eggBoxPoints[:, 1]), :]
# Get bottom most, and top most sorted corner points
bottomMost = ySorted[:2, :]
topMost = ySorted[2:, :]
# Get bottom most
bottomMost = bottomMost[np.argsort(bottomMost[:, 1]), :]
(bl, br) = bottomMost
# Use bottom-left coordinate as anchor to calculate the Euclidean distance between the
# The point with the largest distance will be our bottom-right point
D = dist.cdist(bl[np.newaxis], topMost, "euclidean")[0]
(tl, tr) = topMost[np.argsort(D)[::-1], :]
self.roi = pg.ROI([bl[0], bl[1]], [self.currROI_eggRotBBox[2], self.currROI_eggRotBBox[3]])
else:
# Get rotated bounding box points
ySorted = self.currROI_eggBoxPoints[np.argsort(self.currROI_eggBoxPoints[:, 1]), :]
# Get bottom most, and top most sorted corner points
bottomMost = ySorted[:2, :]
topMost = ySorted[2:, :]
# Get bottom most
bottomMost = bottomMost[np.argsort(bottomMost[:, 1]), :]
(bl, br) = bottomMost
# Use bottom-left coordinate as anchor to calculate the Euclidean distance between the
# The point with the largest distance will be our bottom-right point
D = dist.cdist(bl[np.newaxis], topMost, "euclidean")[0]
(tl, tr) = topMost[np.argsort(D)[::-1], :]
# Make ROI - note non 0,or 90 degree angles, require different of the X size
# Rectangular ROI used to enable more easy handling of corner handles for tracking user chagnges.
if (self.currROI_eggRotBBox[4] == -90.0) | (self.currROI_eggRotBBox[4] == -0.0)| (self.currROI_eggRotBBox[4] == 0.0):
self.roi = pg.ROI([bl[0], bl[1]], [self.currROI_eggRotBBox[2], self.currROI_eggRotBBox[3]])
# roi = pg.EllipseROI([bottomMost[0][0], bottomMost[0][1]], [eggRotBBox[vidTime][2], eggRotBBox[vidTime][3]])
# Debug
# print 'no angle'
else:
# Random angle ROIs
self.roi = pg.ROI([bottomMost[0][0], bottomMost[0][1]], [-self.currROI_eggRotBBox[2], self.currROI_eggRotBBox[3]])
self.roi.setAngle(self.currROI_eggRotBBox[4], update=True)
# Add handles
self.roi.addRotateHandle([1, 0],[0.5,0.5])
self.roi.addRotateHandle([0, 1], [0.5,0.5])
self.roi.addScaleHandle([1, 1], [0, 0])
self.roi.addScaleHandle([0, 0], [1, 1])
self.roi.setPen('y',width=3)
self.roi.removable
self.roi.invertible = 'True'
# Make var for dealing with modifications to roi
self.updatedEggROI=[]
### Still to do...
self.diag.imv.addItem(self.roi)
self.roi.sigRegionChangeFinished.connect(self.updateROI)
#==============================================================================
# Update ROI.
#==============================================================================
def updateROI(self):
#global vidTime, xyPosHandles, ellipse, changeAngle, roiChanges,updatedEggROI, changeX, changeY, changeScaleX, changeScaleY, changeAngle
# Get changes to ROI scale, angle and position
roiChanges = self.roi.getGlobalTransform()
changeX = -roiChanges.getTranslation()[0]
changeY = roiChanges.getTranslation()[1]
changeScaleX = roiChanges.getScale()[0]
changeScaleY = roiChanges.getScale()[1]
changeAngle = roiChanges.getAngle()
# Update ROI, either updating the previously updated or taking the unaltered ROI from OpenCV as a starting point.
#if len(self.updatedEggROI) == 0:
self.updatedEggROI = (((self.currROI_eggRotBBox[0]-changeX),(self.currROI_eggRotBBox[1]+changeY)),((max((self.currROI_eggRotBBox[3]*changeScaleX),(self.currROI_eggRotBBox[2]*changeScaleY))),(min((self.currROI_eggRotBBox[3]*changeScaleX),(self.currROI_eggRotBBox[2]*changeScaleY)))),self.currROI_eggRotBBox[4]+changeAngle)
#else:
#self.updatedEggROI = (((self.updatedEggROI[0][0]-changeX),(self.updatedEggROI[0][1]+changeY)),((max((self.updatedEggROI[1][0]*changeScaleX),(self.updatedEggROI[1][1]*changeScaleY))),(min((self.updatedEggROI[1][0]*changeScaleX),(self.updatedEggROI[1][1]*changeScaleY)))),self.updatedEggROI[2]+changeAngle)
hh = self.roi.getHandles()
hh = [self.roi.mapToItem(self.diag.imv.getImageItem(), h.pos()) for h in hh]
# Handle on each corner. Get handle positions
self.xyPosHandles =[]
for h in hh:
self.xyPosHandles.append([h.x(),h.y()])
(eggBBX, eggBBY), (eggBBW, eggBBH), eggBBAng = cv2.minAreaRect(np.array(self.xyPosHandles, dtype=np.int32) )
if eggBBAng == -90:
eggBBAng = -89
elif eggBBAng == -180:
eggBBAng = -179
elif eggBBAng == -0:
eggBBAng = -1
# Save updated
# If more than one frame eggID per sequence..
if self.eggInt != 1234:
self.eggRotBBox[self.intDivVal,self.withinSeqVal] = [eggBBX, eggBBY, eggBBW, eggBBH, eggBBAng]
self.eggBoxPoints[self.intDivVal,self.withinSeqVal] = cv2.boxPoints(((eggBBX, eggBBY), (eggBBW, eggBBH), eggBBAng))
# Otherwise just save simply
else:
self.eggRotBBox[0,self.intDivVal] = [eggBBX, eggBBY, eggBBW, eggBBH, eggBBAng]
self.eggBoxPoints[0,self.intDivVal] = cv2.boxPoints(((eggBBX, eggBBY), (eggBBW, eggBBH), eggBBAng))
#==============================================================================
# Copy ROI on button click.
#==============================================================================
def cpROI(self):
self.originalEggRotBBox = self.currROI_eggRotBBox
self.originalEggBoxPoints = self.currROI_eggBoxPoints
#==============================================================================
# Assign nan to current ROI if 'No Egg' button clicked
#==============================================================================
def recordNoEgg(self):
# Remove ROI
self.diag.imv.removeItem(self.roi)
# Store nans in place of ROI
if self.eggInt != 1234:
self.eggRotBBox[self.intDivVal,self.withinSeqVal] = [np.nan, np.nan, np.nan, np.nan, np.nan]
self.eggBoxPoints[0,self.intDivVal] = [np.nan,np.nan,np.nan,np.nan]
else:
self.eggBoxPoints[0,self.intDivVal] = [np.nan,np.nan,np.nan,np.nan]
self.eggRotBBox[0,self.intDivVal] = [np.nan, np.nan, np.nan, np.nan, np.nan]
#==============================================================================
# Copy ROI on button click.
#==============================================================================
def applyCopiedROI(self):
self.getSeqValsAndCurrROI()
# Store copied ROI to embryo sequence ROIs
if self.eggInt != 1234:
self.divVal = self.diag.imv.currentIndex/float(len(self.eggRotBBox[1]))
self.intDivVal = int(self.divVal)
self.withinSeqVal = int((self.divVal - self.intDivVal)*len(self.eggRotBBox[self.intDivVal]))
self.eggRotBBox[self.intDivVal,self.withinSeqVal] = self.originalEggRotBBox
self.eggBoxPoints[self.intDivVal,self.withinSeqVal] = self.originalEggBoxPoints
else:
self.divVal = self.diag.imv.currentIndex
self.intDivVal = int(self.divVal)
self.eggRotBBox[0,self.intDivVal] = self.originalEggRotBBox
self.eggBoxPoints[0,self.intDivVal] = self.originalEggBoxPoints
self.updateOpenCVEggROICurrEmbryo()
#==============================================================================
#
#==============================================================================
#==============================================================================
# Close button - not implemented (hidden)
#==============================================================================
#==============================================================================
# def closeEvent(self, event):
#
# quit_msg = "Are you sure you want to exit the program?"
# reply = QtGui.QMessageBox.question(self, 'Message',
# quit_msg, QtGui.QMessageBox.Yes, QtGui.QMessageBox.No)
#
# if reply == QtGui.QMessageBox.Yes:
# #event.accept()
# app.quit()
# else:
# event.ignore()
#
#==============================================================================
#==============================================================================
# #self.originalEggRotBBox = eggRotBBox.copy()
# #self.originalEggBoxPoints = eggBoxPoints.copy()
# #self.currROI_eggRotBBox = self.eggRotBBox[self.intDivVal,self.withinSeqVal]
# #self.currROI_eggBoxPoints = self.eggBoxPoints[self.intDivVal,self.withinSeqVal]
#
# # Modified version of updateOpenCVEggROICurrEmbryo
# # Remove previous
# self.diag.imv.removeItem(self.roi)
# # Get relevant video position and ROI.
# self.getSeqValsAndCurrROI()
# # Get rotated bounding box points
# ySorted = self.originalEggBoxPoints[np.argsort(self.originalEggBoxPoints[:, 1]), :]
# # Get bottom most, and top most sorted corner points
# bottomMost = ySorted[:2, :]
# topMost = ySorted[2:, :]
# # Get bottom most
# bottomMost = bottomMost[np.argsort(bottomMost[:, 1]), :]
# (bl, br) = bottomMost
# # Use bottom-left coordinate as anchor to calculate the Euclidean distance between the
# # The point with the largest distance will be our bottom-right point
# D = dist.cdist(bl[np.newaxis], topMost, "euclidean")[0]
# (tl, tr) = topMost[np.argsort(D)[::-1], :]
# # Make ROI - note non 0,or 90 degree angles, require different of the X size
# # Rectangular ROI used to enable more easy handling of corner handles for tracking user chagnges.
# if (self.originalEggRotBBox[4] == -90.0) | (self.originalEggRotBBox[4] == -0.0)| (self.originalEggRotBBox[4] == 0.0):
# self.roi = pg.ROI([bottomMost[0][0], bottomMost[0][1]], [self.originalEggRotBBox[2], self.originalEggRotBBox[3]])
# # roi = pg.EllipseROI([bottomMost[0][0], bottomMost[0][1]], [eggRotBBox[vidTime][2], eggRotBBox[vidTime][3]])
# else:
# # Random angle ROIs
# self.roi = pg.ROI([bottomMost[0][0], bottomMost[0][1]], [-self.originalEggRotBBox[2], self.originalEggRotBBox[3]])
# self.roi.setAngle(self.originalEggRotBBox[4], update=True)
# # roi = pg.EllipseROI([bottomMost[0][0], bottomMost[0][1]], [-eggRotBBox[vidTime][2], eggRotBBox[vidTime][3]])
# # Add handles
# self.roi.addRotateHandle([1, 0],[0.5,0.5])
# self.roi.addRotateHandle([0, 1], [0.5,0.5])
# self.roi.addScaleHandle([1, 1], [0, 0])
# self.roi.addScaleHandle([0, 0], [1, 1])
# self.roi.setPen('y',width=3)
# self.roi.removable
# self.roi.invertible = 'True'
# # Make var for dealing with modifications to roi
# self.updatedEggROI=[]
# ### Still to do...
# self.diag.imv.addItem(self.roi)
# self.roi.sigRegionChangeFinished.connect(self.updateROI)
#==============================================================================
#=============== | 1.921875 | 2 |
items/coins.py | leerichoang/Legend-Of-Peach | 0 | 4807 | import pygame
from pygame.sprite import Sprite
class Coins(Sprite):
"""Coins"""
def __init__(self, hub, x, y, name='coin', state='floating'):
super().__init__()
# Values
self.name = name
self.hub = hub
self.original_pos = [x, y]
self.rest_height = y
self.rest_x = x
self.velY = 0
self.upwards = True
self.state = state
self.scale = (30, 50)
self.scale2 = (14, 50)
self.scale3 = (4, 50)
# Screen Camera
self.screen = self.hub.main_screen
self.screen_rect = self.screen.get_rect()
self.camera = hub.camera
# Images
self.index = 0
self.change_freq = 120
self.player_clock = pygame.time.get_ticks() + self.change_freq
self.frameRate = 30
self.clock = pygame.time.get_ticks() + self.frameRate
self.image_index = [pygame.image.load("imgs/Items/coin1.png"),
pygame.image.load("imgs/Items/coin2.png"),
pygame.image.load("imgs/Items/coin3.png"),
pygame.image.load("imgs/Items/coin2.png")]
self.image_index[0] = pygame.transform.scale(self.image_index[0], self.scale)
self.image_index[1] = pygame.transform.scale(self.image_index[1], self.scale2)
self.image_index[2] = pygame.transform.scale(self.image_index[2], self.scale3)
self.image_index[3] = pygame.transform.scale(self.image_index[3], self.scale2)
self.resting_index = [pygame.image.load("imgs/Items/CoinForBlackBG.png"),
pygame.image.load("imgs/Items/CoinForBlackBG1.png"),
pygame.image.load("imgs/Items/CoinForBlackBG2.png"),
pygame.image.load("imgs/Items/CoinForBlackBG1.png")]
for i in range(len(self.resting_index)):
self.resting_index[i] = pygame.transform.scale(self.resting_index[i], self.scale)
if self.state == "floating":
self.image = self.image_index[self.index]
else:
self.image = self.resting_index[self.index]
self.rect = self.image.get_rect()
self.rect.x = self.original_pos[0]
self.rect.y = self.original_pos[1]
def draw(self):
self.screen.blit(self.image, self.rect)
def update(self):
self.check_state()
def check_state(self):
if self.state == "floating":
self.start_anim()
elif self.state == "resting":
self.resting()
def start_anim(self):
"""Starts coin spin animation"""
self.velY = 5
if self.rect.y == (self.rest_height - 60):
self.upwards = False
if self.upwards:
self.rect.y -= self.velY
else:
self.rect.y += self.velY
# start timer
if pygame.time.get_ticks() > self.player_clock:
self.player_clock = pygame.time.get_ticks() + self.change_freq
if self.index == 0:
self.original_pos[0] += 8
elif self.index == 1:
self.original_pos[0] += 5
elif self.index == 2:
self.original_pos[0] -= 5
elif self.index == 3:
self.original_pos[0] -= 8
self.index += 1
self.index %= len(self.image_index)
self.image = self.image_index[self.index]
if self.rect.y == self.rest_height:
self.hub.gamemode.coins += 1
self.hub.gamemode.check_coins()
self.hub.gamemode.score += 200
self.kill()
def resting(self):
"""Starts coin rest animation"""
# start timer
if pygame.time.get_ticks() > self.player_clock:
self.player_clock = pygame.time.get_ticks() + self.change_freq
self.index += 1
self.index %= len(self.resting_index)
self.image = self.resting_index[self.index]
| 2.265625 | 2 |
sample_project/exam/exam.py | pcse/gitlab_tools | 0 | 4831 | """
These methods can be called inside WebCAT to determine which tests are loaded
for a given section/exam pair. This allows a common WebCAT submission site to
support different project tests
"""
def section():
# Instructor section (instructor to change before distribution)
#return 8527
#return 8528
return 8529
def exam():
# A or B exam (instructor to change to match specific project distribution
return "A"
#return "B"
| 1.492188 | 1 |
check_perm.py | codecakes/random_games | 0 | 4855 | """
PermCheck
Check whether array A is a permutation.
https://codility.com/demo/results/demoANZ7M2-GFU/
Task description
A non-empty zero-indexed array A consisting of N integers is given.
A permutation is a sequence containing each element from 1 to N once, and only once.
For example, array A such that:
A[0] = 4
A[1] = 1
A[2] = 3
A[3] = 2
is a permutation, but array A such that:
A[0] = 4
A[1] = 1
A[2] = 3
is not a permutation, because value 2 is missing.
The goal is to check whether array A is a permutation.
Write a function:
def solution(A)
that, given a zero-indexed array A, returns 1 if array A is a permutation and 0 if it is not.
For example, given array A such that:
A[0] = 4
A[1] = 1
A[2] = 3
A[3] = 2
the function should return 1.
Given array A such that:
A[0] = 4
A[1] = 1
A[2] = 3
the function should return 0.
Assume that:
N is an integer within the range [1..100,000];
each element of array A is an integer within the range [1..1,000,000,000].
Complexity:
expected worst-case time complexity is O(N);
expected worst-case space complexity is O(N), beyond input storage (not counting the storage required for input arguments).
Elements of input arrays can be modified.
"""
def solution(A):
# write your code in Python 2.7
s = set(A)
N_set = len(s) #O(n)
N = len(A)
if N != N_set: return 0
sum_N = N*(N+1)/2 #O(1)
sum_A = sum(A) #O(n)
return 1 if sum_N == sum_A else 0 | 3.375 | 3 |
cli.py | checktheroads/deenis | 4 | 4879 | #!/usr/bin/env python3
"""
CLI for Accessing Deenis
"""
# Standard Imports
import sys
from pathlib import Path
# Module Imports
import click
# Path Fixes
working_dir = Path(__file__).resolve().parent
sys.path.append(str(working_dir))
# Project Imports
from deenis import Deenis
@click.group(
help=(
"Deenis can be used to group and automate boring DNS tasks. For example, "
"`host` can take a hostname, IPv4 Address, and IPv6 Address, and create "
"forward A & AAAA, and reverse PTR records (4 actions) with a single command."
)
)
def add_records():
"""Click Command Group Definition"""
# pylint: disable=unnecessary-pass
# Dear Pylint: This is how Click likes to do things. Get over it bruh.
pass
@add_records.command("host", help="Add a Host Record")
@click.option("-c", "--config-file", "config_file", help="Path to YAML Config File")
@click.option("-4", "--ipv4-address", "ipv4", default=None, help="IPv4 Address")
@click.option("-6", "--ipv6-address", "ipv6", default=None, help="IPv6 Address")
@click.option("-f", "--fqdn", "fqdn", required=True, help="FQDN")
def host(**click_input):
"""Add host records from CLI"""
if not click_input["config_file"]:
config_path = Path.cwd().joinpath("deenis.yaml")
if not config_path.exists():
raise click.UsageError(
click.style(
(
f"Config file not specified and not found at {config_path}. "
"Please specify a config file path."
),
fg="red",
bold=True,
)
)
elif click_input["config_file"]:
config_path = Path().resolve(click_input["config_file"])
if not click_input["ipv4"] and not click_input["ipv6"]:
raise click.UsageError(
click.style("At least one IP Address is required", fg="red", bold=True)
)
try:
responses = Deenis(str(config_path)).AddHost(
{
"hostname": click_input["fqdn"],
"ipv4": click_input["ipv4"],
"ipv6": click_input["ipv6"],
}
)
if responses:
for res in responses:
status, record_record, record, target, errors = res
if status == "Success":
click.echo(
"Added "
+ click.style(record_record, fg="green", bold=True)
+ " Record for "
+ click.style(record, fg="yellow", bold=True)
+ " Pointing to "
+ click.style(target, fg="blue", bold=True)
)
elif status == "Failure":
click.echo(
"Error Adding "
+ click.style(record_record, fg="magenta", bold=True)
+ " Record for "
+ click.style(record, fg="cyan", bold=True)
+ " Pointing to "
+ click.style(target, fg="red", bold=True)
+ f"\nErrors:\n"
)
for err in errors:
click.secho(err, fg="red")
if not responses:
click.secho("\nNo records were added", fg="magenta", bold=True)
except (RuntimeError, AttributeError) as error_exception:
raise click.UsageError(click.style(str(error_exception), fg="red", bold=True))
@add_records.command("tenant", help="Bulk Add PTR Records for a Tenant/Customer")
@click.option("-c", "--config-file", "config_file", help="Path to YAML Config File")
@click.option(
"-i", "--crm-id", "crm_id", default=None, help="Unique Tenant Indentifier"
)
@click.option(
"-4", "--ipv4-prefix", "prefix4", default=None, help="IPv4 Prefix Assignment"
)
@click.option(
"-6", "--ipv6-prefix", "prefix6", default=None, help="IPv6 Prefix Assignment"
)
@click.option(
"-f4", "--ipv4-fqdn", "host4", default=None, help="FQDN for IPv4 PTR Target"
)
@click.option(
"-f6", "--ipv6-fqdn", "host6", default=None, help="FQDN for IPv6 PTR Target"
)
def tenant_reverse(**click_input):
"""Add Tenant Records from CLI"""
if not click_input["config_file"]:
config_path = Path.cwd().joinpath("deenis.yaml")
if not config_path.exists():
raise click.UsageError(
click.style(
(
f"Config file not specified and not found at {config_path}. "
"Please specify a config file path."
),
fg="red",
bold=True,
)
)
elif click_input["config_file"]:
config_path = Path().resolve(click_input["config_file"])
if not click_input["prefix4"] and not click_input["prefix6"]:
raise click.UsageError(
click.style("At least one prefix is required", fg="red", bold=True)
)
try:
responses = Deenis(str(config_path)).TenantReverse(
{
"crm_id": click_input["crm_id"],
"host4": click_input["host4"],
"host6": click_input["host6"],
"prefix4": click_input["prefix4"],
"prefix6": click_input["prefix6"],
}
)
"""
Response format:
[
(
'Success',
'A',
'test011.omnificent.io',
'172.16.31.10',
[]
),
(
'Success',
'PTR',
'250',
'test011.omnificent.io',
[]
)
]
"""
nl = "\n"
tab = " "
_text = {"fg": "white", "bold": True}
_stat_suc = {"fg": "green", "bold": True}
_stat_fail = {"fg": "red", "bold": True}
_rec_type = {"fg": "yellow", "bold": True}
_rec_name = {"fg": "magenta", "bold": True}
_rec_trgt = {"fg": "cyan", "bold": True}
_error = {"fg": "red"}
click.secho(nl + "Records:" + nl, **_text)
for res in responses:
status, rec_type, rec_name, rec_trgt, errors = res
if status == "Success":
_status = ("⚡ " + status, _stat_suc)
elif status == "Failure":
_status = ("☝ " + status, _stat_fail)
click.echo(
tab
+ click.style(_status[0], **_status[1])
+ nl
+ tab * 4
+ click.style(rec_type, **_rec_type)
+ click.style(" ⟫ ", **_text)
+ click.style(rec_name, **_rec_name)
+ click.style(" ⟩ ", **_text)
+ click.style(rec_trgt, **_rec_trgt)
)
if errors:
click.echo(tab * 4 + click.style("Errors: ", **_stat_fail))
for err in errors:
if isinstance(err, dict):
for ename in err.keys():
click.echo(
tab * 6
+ click.style(str(ename) + ":", **_error)
+ tab
+ click.style(str(err[ename]), **_error)
)
elif isinstance(err, str):
click.echo(tab * 4 + click.style(err, **_error))
except (AttributeError, RuntimeError) as tenant_error:
raise click.ClickException(tenant_error)
if __name__ == "__main__":
add_records()
| 1.507813 | 2 |
ngraph/test/frontend/paddlepaddle/test_models/gen_scripts/generate_slice.py | monroid/openvino | 2,406 | 4887 | #
# slice paddle model generator
#
import numpy as np
from save_model import saveModel
import paddle as pdpd
import sys
data_type = 'float32'
def slice(name : str, x, axes : list, start : list, end : list):
pdpd.enable_static()
with pdpd.static.program_guard(pdpd.static.Program(), pdpd.static.Program()):
node_x = pdpd.static.data(name='x', shape=x.shape, dtype = data_type)
out = pdpd.fluid.layers.slice(node_x, axes = axes, starts = start, ends = end)
cpu = pdpd.static.cpu_places(1)
exe = pdpd.static.Executor(cpu[0])
# startup program will call initializer to initialize the parameters.
exe.run(pdpd.static.default_startup_program())
outs = exe.run(
feed={'x': x},
fetch_list=[out])
saveModel(name, exe, feedkeys=['x'], fetchlist=[out], inputs=[x], outputs=[outs[0]], target_dir=sys.argv[1])
return outs[0]
def main():
x = np.linspace(1, 60, num = 60, dtype=np.int32).reshape(4, 3, 5).astype(data_type)
slice("slice", x, axes=[1, 2], start=(0, 1), end=(-1, 3))
x = np.linspace(1, 60, num = 60, dtype=np.int32).reshape(2, 30).astype(data_type)
slice("slice_1d", x, axes=[0], start=[0], end=[1])
if __name__ == "__main__":
main() | 1.65625 | 2 |