max_stars_repo_path
stringlengths 4
197
| max_stars_repo_name
stringlengths 6
120
| max_stars_count
int64 0
191k
| id
stringlengths 1
8
| content
stringlengths 6
964k
| score
float64 -0.88
3.95
| int_score
int64 0
4
|
---|---|---|---|---|---|---|
hotdog/show_image.py | rparkin1/inceptionV3_hotdog | 0 | 12784555 | <gh_stars>0
#!/usr/bin/python
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
from IPython.display import Image, HTML, display
root = "images"
butions = dict(attributions)
def show_image(image_path):
display(Image(image_path))
image_rel = image_path.replace(root,'')
caption = "Image " + ' - '.join(attributions[image_rel].split(' - ')[:-1])
display(HTML("<div>%s</div>" % caption))
| 1.929688 | 2 |
server/mlabns/db/nagios_config_wrapper.py | mtlynch/m-lab.ns | 10 | 12784683 | import logging
from google.appengine.api import memcache
from mlabns.db import model
from mlabns.util import constants
def get_nagios_config():
"""Retrieves nagios config info. First checks memcache, then datastore.
Returns:
Nagios model instance
"""
nagios = memcache.get(constants.DEFAULT_NAGIOS_ENTRY)
if not nagios:
nagios = model.Nagios.get_by_key_name(constants.DEFAULT_NAGIOS_ENTRY)
if nagios:
memcache.set(constants.DEFAULT_NAGIOS_ENTRY, nagios)
else:
logging.error('Datastore does not have the Nagios credentials.')
return nagios
| 1.28125 | 1 |
app.py | RuFalcon/password_generator | 0 | 12784811 | <reponame>RuFalcon/password_generator
from PyQt5.QtWidgets import (
QWidget,
QSlider,
QLabel,
QApplication,
QMainWindow,
QCheckBox,
QLineEdit)
from PyQt5 import uic
from PyQt5.QtGui import QIcon
import sys
import random
import string
class Window(QMainWindow):
def __init__(self):
super().__init__()
uic.loadUi('password.ui', self)
self.setGeometry(200, 200, 800, 553)
self.setWindowIcon(QIcon('images/password.svg'))
self.setFixedHeight(553)
self.setFixedWidth(800)
self.lowcase_letters = string.ascii_lowercase
self.upcase_letters = string.ascii_uppercase
self.digits = string.digits
self.punctuation = string.punctuation
self.password_characters = self.lowcase_letters + \
self.upcase_letters + self.digits
self.check_lowcase = self.findChild(QCheckBox, "check_lowcase")
self.state_changed(self.check_lowcase)
self.check_upcase = self.findChild(QCheckBox, "check_upcase")
self.state_changed(self.check_upcase)
self.check_numbers = self.findChild(QCheckBox, "check_numbers")
self.state_changed(self.check_numbers)
self.check_symbols = self.findChild(QCheckBox, "check_symbols")
self.state_changed(self.check_symbols)
self.slider = self.findChild(QSlider, "horizontalSlider")
self.slider.valueChanged.connect(self.changed_slider)
self.password = self.findChild(QLineEdit, "password")
self.password_length = self.findChild(QLabel, "password_length")
self.get_random_password(int(self.password_length.text()))
def state_changed(self, checkbox):
"""Отслеживаем изменения нажатия чекбоксов и вызываем функцию change_password"""
return checkbox.stateChanged.connect(self.change_password)
def changed_slider(self):
"""Отслеживаем значение слайдера и пересобираем пароль с такой же длиной"""
value = self.slider.value()
self.password_length.setText(str(value))
self.get_random_password(value)
def get_random_password(self, length):
"""Собираем пароль с заданной длиной"""
password = ''.join(random.choice(self.password_characters)
for i in range(length))
self.password.setText(str(password))
def change_password(self):
"""Меняем пароль в зависимости от включенных чекбоксов"""
self.password_characters = 'x'
if self.check_lowcase.isChecked():
self.password_characters += self.lowcase_letters
if self.check_upcase.isChecked():
self.password_characters += self.upcase_letters
if self.check_numbers.isChecked():
self.password_characters += self.digits
if self.check_symbols.isChecked():
self.password_characters += self.punctuation
self.get_random_password(int(self.password_length.text()))
app = QApplication(sys.argv)
window = Window()
window.show()
sys.exit(app.exec_())
| 1.828125 | 2 |
src/posts/migrations/0003_auto_20170327_1306.py | ChrisMunene/Blog | 0 | 12784939 | <filename>src/posts/migrations/0003_auto_20170327_1306.py
# -*- coding: utf-8 -*-
# Generated by Django 1.9 on 2017-03-27 10:06
from __future__ import unicode_literals
import datetime
from django.db import migrations, models
from django.utils.timezone import utc
class Migration(migrations.Migration):
dependencies = [
('posts', '0002_auto_20170326_1643'),
]
operations = [
migrations.AlterField(
model_name='post',
name='publish',
field=models.DateField(default=datetime.datetime(2017, 3, 27, 10, 6, 44, 221000, tzinfo=utc)),
),
]
| 0.621094 | 1 |
extract_pin_function_from_liberty.py | hrshishym/ExtractPinFunctionFromLibertySource | 0 | 12785067 | <reponame>hrshishym/ExtractPinFunctionFromLibertySource<filename>extract_pin_function_from_liberty.py
#!/usr/bin/env python
### Setting
cell_attributes = ["clock_gating_integrated_cell"]
ff_attributes = []
pin_attributes = ["direction", "clock", "function", "state_function"]
import os
import sys
import re
sys.path.insert(0, os.path.join(os.path.dirname(__file__), 'modules'))
import argparse
parser = argparse.ArgumentParser()
parser.add_argument("input")
parser.add_argument("-d", "--debug", action="store_true")
args = parser.parse_args()
from liberty.parser import parse_liberty
tdata = re.sub("\\\\\n", "", open(args.input).read())
tlib = parse_liberty(tdata)
if args.debug:
import pprint
pprint.pprint(tlib)
# library (sxlib013) {
print("library ({}) {{".format(tlib.args[0]))
for eachlib in tlib.groups:
if eachlib.group_name != "cell":
continue
cellname = eachlib.args[0]
# cell(a2_x2) { /* 2008-01-10:21h05 */
print("cell({}) {{".format(cellname))
if args.debug:
print("==")
pprint.pprint(eachlib)
print("==")
pprint.pprint(eachlib.attributes)
print("==")
### Print cell attributes
for eachattr in eachlib.attributes:
for eachattr in cell_attributes:
if eachattr in eachlib.attributes.keys():
print(" {} : {} ;".format(eachattr, eachlib.attributes[eachattr]))
### Print sub group
for eachgroup in eachlib.groups:
if args.debug:
print("====")
pprint.pprint(eachgroup)
print("====")
if eachgroup.group_name == "ff":
# ff
print(" ff({}) {{ ".format(",".join(eachgroup.args)))
for eachkey in eachgroup.attributes.keys():
print(" {} : {} ;".format(eachkey, eachgroup.attributes[eachkey]))
print(" }")
elif eachgroup.group_name == "pin":
## pin
print(" pin({}) {{".format(eachgroup.args[0]))
for eachattr in pin_attributes:
if eachattr in eachgroup.attributes.keys():
print(" {} : {} ;".format(eachattr, eachgroup.attributes[eachattr]))
print(" }")
elif eachgroup.group_name == "statetable":
## statetable
tarr = []
for i in eachgroup.args:
tarr.append(str(i))
print(" statetable( {} ) {{".format(" , ".join(tarr)))
if "table" in eachgroup.attributes.keys():
print(" {} : {} ;".format("table", re.sub(",", ", \\\n", str(eachgroup.attributes["table"]))))
print(" }")
print("}")
print("}")
| 1.828125 | 2 |
mergeMetadata/modules/utils/exceptions.py | isabella232/ALM-SF-DX-Python-Tools | 5 | 12785195 | <filename>mergeMetadata/modules/utils/exceptions.py<gh_stars>1-10
class NotCreatedDescribeLog( Exception ):
'''Exception launched when describe.log didn´t exist on the specific folder'''
ERROR_CODE = 117
def __init__( self, pathDescribe ):
super().__init__( f'Describe log didnt exist, please place it on {pathDescribe}' )
class NoFullNameError( Exception ):
def __init__( self, tagName ):
super().__init__( f'No tag found for {tagName}' ) | 1.195313 | 1 |
removeNthFromEnd.py | xiaochuan-cd/leetcode | 0 | 12785323 | <gh_stars>0
# Definition for singly-linked list.
# class ListNode:
# def __init__(self, x):
# self.val = x
# self.next = None
class Solution:
def removeNthFromEnd(self, head, n):
"""
:type head: ListNode
:type n: int
:rtype: ListNode
"""
f, s, fi, si = head, head, 0, 0
while f:
f = f.next
fi += 1
if fi - n > si + 1:
si += 1
s = s.next
if s == head and n == fi-si:
head = head.next
elif n < fi-si:
s.next = s.next.next
return head
| 2.859375 | 3 |
core/intents/welcome.py | p-panagiotis/venom-virtual-assistant | 0 | 12785451 | from datetime import datetime
from core.modules.output_mod import output
def greet(master):
hour = datetime.now().hour
if 5 <= hour < 12:
output(f"Good morning {master}")
elif 12 <= hour < 18:
output(f"Good afternoon {master}")
else:
output(f"Good evening {master}")
| 1.804688 | 2 |
make/help.py | abhishekgahlot/flexx | 1 | 12785579 | <reponame>abhishekgahlot/flexx<gh_stars>1-10
# License: consider this public domain
"""
Show the list of available commands, or details on a command.
* python make help - show list of commands
* python make help foo - show details on command "foo"
"""
import os
import sys
from make import THIS_DIR, NAME
def help(command=''):
if not command:
# List all commands
fnames = [fname for fname in os.listdir(THIS_DIR) if
os.path.isfile(os.path.join(THIS_DIR, fname)) and
fname.endswith('.py') and
fname.count('.') == 1 and
not fname.startswith('_')]
print('Developer tools for project %s\n' % NAME.capitalize())
print(' python make <command> [arg]\n')
for fname in sorted(fnames):
modname = fname[:-3]
doc = get_doc_for_file(fname)
summary = doc.split('\n', 1)[0] if doc else ''
print(modname.ljust(15) + ' ' + summary)
else:
# Give more detailed info on command
fname = command + '.py'
if not os.path.isfile(os.path.join(THIS_DIR, fname)):
sys.exit('Not a known command: %r' % command)
doc = get_doc_for_file(fname) or ''
print('\n%s - %s\n' % (command, doc))
def get_doc_for_file(fname):
""" Get the module docstring of the given file. Returns string with
quotes and whitespace stripped, and only LF newlines.
"""
# Read code
try:
code = open(os.path.join(THIS_DIR, fname), 'rt').read()
except Exception as err:
return 'Error: could not read %r: %s' % (fname, str(err))
# Search for closes multiline string
qsingle, qdouble = "'''", '"""'
ii = [(code.find(needle), needle) for needle in (qsingle, qdouble)]
ii = [(i, needle) for i, needle in ii if i >= 0]
ii.sort(key=lambda x: x[0])
# Find where it ends
if ii:
i1, needle = ii[0]
i2 = code.find(needle, i1+3)
if i2 > 0:
doc = code[i1:i2].strip('"\'').strip()
return doc.replace('\r\n', '\n').replace('\r', '\n')
| 2.3125 | 2 |
cq/views.py | furious-luke/django-cq | 31 | 12785707 | from django.db import transaction
from rest_framework import viewsets
from .serializers import TaskSerializer, CreateTaskSerializer
from .models import Task
class TaskViewSet(viewsets.ModelViewSet):
queryset = Task.objects.all()
serializer_class = TaskSerializer
def get_serializer(self, data=None, *args, **kwargs):
if getattr(self, 'creating', False):
return CreateTaskSerializer(data=data)
return super().get_serializer(data, *args, **kwargs)
def create(self, request, *args, **kwargs):
self.creating = True
with transaction.atomic():
return super().create(request, *args, **kwargs)
| 1.109375 | 1 |
lightfield_plane.py | IDLabMedia/blender-lightfield-addon | 1 | 12785835 | <filename>lightfield_plane.py
import math
import random
import bpy
import bmesh
from mathutils import Color
from .lightfield import LightfieldPropertyGroup
from .camera_position import CameraPosition
class LightfieldPlane(LightfieldPropertyGroup):
def construct(self):
visuals = self.default_construct()
self.lf_type = 'PLANE'
self.obj_empty.empty_display_type = 'PLAIN_AXES'
# Update lightfield references
self.obj_visuals.add().obj_visual = visuals[0]
self.obj_visuals.add().obj_visual = visuals[1]
self.obj_visuals.add().obj_visual = visuals[2]
self.obj_grid = visuals[0]
self.set_camera_to_first_view()
def construct_visuals(self, collection):
grid = self.create_grid()
space = self.create_space()
front = self.create_front()
# Add to lightfield collection
collection.objects.link(grid)
collection.objects.link(space)
collection.objects.link(front)
return [grid, space, front]
def create_space(self):
"""
Create visual that represents the space the lightfield is occupying.
:return: Object.
"""
name = self.construct_names()['space']
bpy.ops.mesh.primitive_plane_add(location=(0, 0, 0))
p1 = bpy.context.object
dumped_mesh = p1.data
bpy.ops.mesh.primitive_plane_add(location=(0, 0, 0))
space = bpy.context.object
space.name = name
p1.select_set(True)
bpy.ops.object.join()
space.scale = [0.5] * 3
space.rotation_euler[0] = 0.5 * math.pi
# Remove mesh-data created by p1 which is not necessary anymore
bpy.data.meshes.remove(dumped_mesh)
space.data.name = name
# Unlink the object from its current collection
space.users_collection[0].objects.unlink(space)
space_mat = bpy.data.materials.new(name)
col = Color()
col.hsv = (random.random(), 1.0, 0.8)
space_mat.diffuse_color = col[:] + (0.1,)
space.data.materials.append(space_mat)
space.show_wire = True
space.hide_render = True
return space
@staticmethod
def construct_names():
base = "LFPlane"
return {'lightfield': base,
'camera': "{}_Camera".format(base),
'grid': "{}_Grid".format(base),
'space': "{}_Space".format(base),
'front': "{}_Front".format(base)}
def position_generator(self):
cube = self.cube_camera
for y in range(self.num_cams_y):
for x in range(self.num_cams_x):
# TODO: implement cube_camera in plane lightfield
yield self.get_camera_pos(x, y)
def get_camera_pos(self, x, y):
base_x = 1 / (self.num_cams_x - 1)
base_y = 1 / (self.num_cams_y - 1)
return CameraPosition("view_{:04d}f".format(y * self.num_cams_x + x),
-0.5 + x * base_x,
0.0,
0.5 - y * base_y,
alpha=0.5*math.pi)
| 2.046875 | 2 |
src/illumideskdummyauthenticator/tests/test_authenticator.py | IllumiDesk/illumidesk | 41 | 12785963 | import json
from unittest.mock import Mock
from unittest.mock import patch
import pytest
from illumideskdummyauthenticator.authenticator import IllumiDeskDummyAuthenticator
from illumideskdummyauthenticator.validators import IllumiDeskDummyValidator
from tornado.web import RequestHandler
@pytest.mark.asyncio
async def test_authenticator_returns_auth_state(make_dummy_authentication_request_args):
"""
Ensure we get a valid authentication dictionary.
"""
with patch.object(
IllumiDeskDummyValidator, "validate_login_request", return_value=True
):
authenticator = IllumiDeskDummyAuthenticator()
handler = Mock(
spec=RequestHandler,
get_secure_cookie=Mock(return_value=json.dumps(["key", "secret"])),
request=Mock(
arguments=make_dummy_authentication_request_args(),
headers={},
items=[],
),
)
result = await authenticator.authenticate(handler, None)
expected = {
"name": "foobar",
"auth_state": {
"assignment_name": "lab101",
"course_id": "intro101",
"lms_user_id": "abc123",
"user_role": "Student",
},
}
assert result == expected
| 1.664063 | 2 |
offer/53_03_IntegerIdenticalToIndex.py | DevRoss/python-offer-code | 1 | 12786091 | #!/usr/bin/python3
# -*- coding: utf-8 -*-
# Created by Ross on 19-1-24
def solve(array: list):
if not array:
return None
start = 0
end = len(array) - 1
while start <= end:
mid = (start + end) >> 1
if array[mid] == mid:
return mid
elif array[mid] < mid:
start = mid + 1
else:
end = mid - 1
return None
if __name__ == '__main__':
print(solve([-6, -3, -1, 3, 5, 6, 7]))
print(solve([-6, -3, -1, 88, 5, 6, 7]))
print(solve([0, 2, 5, 7, 83]))
print(solve([-1, 0, 2]))
| 2.375 | 2 |
py/jpy/ci/appveyor/dump-dlls.py | devinrsmith/deephaven-core | 210 | 12786219 | import psutil, os
p = psutil.Process(os.getpid())
for dll in p.memory_maps():
print(dll.path)
| 0.828125 | 1 |
backend/backend/routing.py | Trevor-Mansfield/WalmartReceiptSplitter | 0 | 12786347 | from channels.auth import AuthMiddlewareStack
from channels.routing import ProtocolTypeRouter, URLRouter, ChannelNameRouter
import cost_claimer.routing
import cost_claimer.consumers
application = ProtocolTypeRouter({
'websocket': AuthMiddlewareStack(
URLRouter(
cost_claimer.routing.websocket_urlpatterns
)
),
"channel": ChannelNameRouter({
"user_action": cost_claimer.consumers.GroupCostWorker,
}),
})
| 0.980469 | 1 |
accounts.py | qwerith/Weather-Project | 0 | 12786475 | <gh_stars>0
import psycopg2
import os
import re
import string
import random
import logging
from dotenv import load_dotenv, find_dotenv
from flask_bcrypt import Bcrypt
from flask import redirect, session
bcrypt = Bcrypt()
logger = logging.getLogger(__name__)
logger.setLevel(logging.INFO)
formatter = logging.Formatter("""%(asctime)s:%(name)s:
%(filename)s:%(funcName)s:
%(levelname)s:%(message)s""")
handler = logging.FileHandler("logs.log")
handler.setFormatter(formatter)
handler.setLevel(logging.INFO)
logger.addHandler(handler)
#loading environment variables
try:
load_dotenv(find_dotenv())
con = psycopg2.connect(host = os.getenv("HOST"), database = os.getenv("DATABASE"),
user = os.getenv("USER"), password = os.<PASSWORD>("<PASSWORD>"),
port=5432)
cur = con.cursor()
except RuntimeError("Database credentials error"):
logger.exception("Database credentials error")
raise
class Accounts():
"""Manages user accounts, queries data for session module, password changes and recovery"""
def __init__(self, email, password):
self.email = email.strip(" ")
self.password = password.strip(" ")
def register(self, username):
try:
con = psycopg2.connect(host = os.getenv("HOST"), database = os.getenv("DATABASE"),
user = os.getenv("USER"), password = os.getenv("db_PASSWORD"),
port=5432)
cur = con.cursor()
except:
logger.error(RuntimeError("Database credentials error"))
raise RuntimeError("Database credentials error")
cur.execute("SELECT EXISTS(SELECT 1 FROM users WHERE email = %s LIMIT 1)", (self.email, ))
con.commit()
result = cur.fetchall()
if result[0][0] != False:
return ["Account already exists"]
else:
try:
cur.execute("""INSERT INTO users (username, email, password)
VALUES ( %s, %s, %s )""",(username.strip(" "), self.email,
bcrypt.generate_password_hash(self.password).decode("utf-8")))
con.commit()
con.close()
return ["Your account has been successfully created"]
except:
con.close()
return ["Registration failed"]
def user_verification(self):
cur.execute("SELECT id, username, email, password FROM users WHERE email=%s LIMIT 1",
(self.email, ))
con.commit()
user = cur.fetchall()
if user and bcrypt.check_password_hash(user[0][3], self.password):
return(True, user)
else:
return None
def delete(self):
cur.execute("DELETE FROM users WHERE email=%s", (self.email, ))
con.commit()
def change_password(self, new_password):
cur.execute("UPDATE users SET password=%s WHERE email=%s",
(bcrypt.generate_password_hash(new_password).decode("utf-8"), self.email))
con.commit()
def restore_password(self, temp_password_hash, temp_password):
if bcrypt.check_password_hash(temp_password_hash, temp_password):
cur.execute("UPDATE users SET password=%s WHERE email=%s",
(bcrypt.generate_password_hash(self.password).decode("utf-8"), self.email))
con.commit()
return True
return None
# Generates random password for recovery process
def generate_temporary_password(email):
cur.execute("SELECT EXISTS(SELECT 1 FROM users WHERE email = %s LIMIT 1)", (email, ))
con.commit()
result = cur.fetchall()
if result[0][0] != False:
chars = string.ascii_uppercase + string.ascii_lowercase + string.digits
size = random.randint(5, 10)
temp_password = ''.join(random.choice(chars) for x in range(size))
password_hash = bcrypt.generate_password_hash(temp_password).decode("utf-8")
return password_hash, temp_password
return None, ""
def input_validation(user_input):
#regex form for email validation
try:
len(user_input) > 1
for i in user_input:
if type(i) != str:
return ["Invalid data type"]
except IndexError:
logger.error(IndexError)
raise
response = []
email_pattern = r'\b[A-Za-z0-9._%+-]+@[A-Za-z0-9.-]+\.[A-Z|a-z]{2,}\b'
punctuation = """[!#$%&'()*+, -./:;"<=>?@[\]^_`{|}~:]"""
if not (re.match(email_pattern, user_input[0])):
response.append("Invalid email")
if (not len(user_input[1]) >= 5 and len(user_input[1]) <= 10 or
re.findall(punctuation, user_input[1]) != []):
response.append("Password must be 5 to 10 characters long")
if len(user_input) == 3:
if not user_input[1] == user_input[2] or re.findall(punctuation, user_input[2]) != []:
response.append("Passwords do not match")
if len(user_input) == 4:
if not user_input[2] == user_input[3] or re.findall(punctuation, user_input[3]) != []:
response.append("Passwords do not match")
return response
def login_required(func):
def wrapper(*args, **kwargs):
if session.get("user_id") != None:
func(*args, **kwargs)
return func(*args, **kwargs)
return redirect("/login")
return wrapper
| 1.523438 | 2 |
.ipynb_checkpoints/pyKinectProjectilePrediction-checkpoint.py | PMcGloin/pyKinectProjectilePrediction | 0 | 12786603 | from pykinect2 import PyKinectV2
from pykinect2.PyKinectV2 import *
from pykinect2 import PyKinectRuntime
import ctypes
import _ctypes
import pygame
import sys
import numpy as np
import cv2
#if sys.hexversion >= 0x03000000:
# import _thread as thread
#else:
# import thread
class DepthRuntime(object):
def __init__(self):
pygame.init()
# Used to manage how fast the screen updates
self._clock = pygame.time.Clock()
# Loop until the user clicks the close button.
self._done = False
# Used to manage how fast the screen updates
self._clock = pygame.time.Clock()
# Kinect runtime object, we want only color and body frames
self._kinect = PyKinectRuntime.PyKinectRuntime(PyKinectV2.FrameSourceTypes_Depth)
# back buffer surface for getting Kinect depth frames, 8bit grey, width and height equal to the Kinect depth frame size
self._frame_surface = pygame.Surface((self._kinect.depth_frame_desc.Width, self._kinect.depth_frame_desc.Height), 0, 24)
# here we will store skeleton data
self._bodies = None
# Set the width and height of the screen [width, height]
self._infoObject = pygame.display.Info()
self._screen = pygame.display.set_mode((self._kinect.depth_frame_desc.Width, self._kinect.depth_frame_desc.Height), pygame.HWSURFACE|pygame.DOUBLEBUF|pygame.RESIZABLE, 32)
pygame.display.set_caption("Kinect for Windows v2 Depth")
#def background_subtraction(self, current_frame, previous_frame):
# previousFrame = [0] * 217088
# return frame
def draw_depth_frame(self, frame, target_surface):
if frame is None: # some usb hub do not provide the infrared image. it works with Kinect studio though
return
target_surface.lock()
f8=np.uint8(frame.clip(1,4000)/16.)
frame8bit=np.dstack((f8,f8,f8))
address = self._kinect.surface_as_array(target_surface.get_buffer())
ctypes.memmove(address, frame8bit.ctypes.data, frame8bit.size)
del address
target_surface.unlock()
def run(self):
# -------- Main Program Loop -----------
frame = [0] * 217088
frames = [frame] * 5
fgbg = cv2.createBackgroundSubtractorKNN()
# fgbg = cv2.createBackgroundSubtractorMOG2()
# print (len(previousFrames))
# print(previousFrames)
while not self._done:
# --- Main event loop
for event in pygame.event.get(): # User did something
if event.type == pygame.QUIT: # If user clicked close
self._done = True # Flag that we are done so we exit this loop
elif event.type == pygame.VIDEORESIZE: # window resized
self._screen = pygame.display.set_mode(event.dict['size'], pygame.HWSURFACE|pygame.DOUBLEBUF|pygame.RESIZABLE, 32)
# --- Getting frames and drawing
if self._kinect.has_new_depth_frame():
frame = self._kinect.get_last_depth_frame()
fgmask = fgbg.apply(frame)
# flattenMask = []
# for item in fgmask:
# flattenMask.append(item)
flattenMask = [value for element in fgmask for value in element]
# print (type(flattenMask[0]))
flattenMask = np.array(flattenMask)
# flattenMask = np.array(fgmask)
# flattenMask = flattenMask / 255
# print ("flattenMask\n",flattenMask)
frameMask = []
# frameMask = np.array(frameMask)
for val in np.nditer(flattenMask):
# i = 0
if val == 255:
frameMask.append(1)
# val = 1
else:
frameMask.append(0)
# val = 0
# i += 1
frameMask = np.array(frameMask)
# np.set_printoptions(threshold=sys.maxsize)
# print("frame\n",frame)
# print ("flattenMask\n",flattenMask)
# print ("frameMask\n",frameMask)
outputFrame = np.multiply(frame, frameMask)
# frames.append(outputFrame)
# frames.pop(0)
# outputFrame2 = []
# cv2.fastNlMeansDenoisingMulti(frames, 4, 4, outputFrame2)
# outputFrame2 = cv2.fastNlMeansDenoising(outputFrame)
# outputFrame = np.multiply(frame, fgmask)
# cv2.imshow('frame',fgmask)
self.draw_depth_frame(outputFrame, self._frame_surface)
# k = cv2.waitKey(30) & 0xff
# if k == 27:
# break
# frames.append(frame)
# frames.pop(0)
# outputFrame = np.subtract(frames[0], frames[1])
# self.draw_depth_frame(outputFrame, self._frame_surface)
#self.draw_depth_frame(frame, self._frame_surface)
#frame = np.average(np.array([frame, previousFrame]), axis=0)
#np.set_printoptions(threshold=sys.maxsize)
#print(outputFrame)
#print(frame.size)
# outputFrame = (np.array(previousFrames[0]) + np.array(previousFrames[1]) + np.array(previousFrames[2]) + np.array(previousFrames[3]) + np.array(previousFrames[4])) / 5
# self.draw_depth_frame(outputFrame.astype(int), self._frame_surface)
# frame2 = cv.fastNlMeansDenoisingMulti(previousFrames, 2 , 3)
frame = None
outputFrame = None
self._screen.blit(self._frame_surface, (0,0))
pygame.display.update()
# --- Go ahead and update the screen with what we've drawn.
pygame.display.flip()
# --- Limit to 60 frames per second
self._clock.tick(60)
# Close our Kinect sensor, close the window and quit.
self._kinect.close()
pygame.quit()
__main__ = "Kinect v2 Depth"
game =DepthRuntime();
game.run(); | 2.203125 | 2 |
conta/main/tests/views/test_InformesView.py | osso73/contabilidad | 0 | 12786731 | from pytest_django.asserts import assertTemplateUsed
from fixtures_views import *
class TestInformesView:
@pytest.fixture
def form_parametros(self, django_app):
resp = django_app.get(reverse('main:informes'), user='username')
return resp.forms['parametros']
@pytest.fixture
def populate_db_informes(self, populate_database):
_, cuentas, _ = populate_database
adicionales = [
[5, '2021-01-28', 'Compra del pan', 2.50, 0, cuentas[0]],
[5, '2021-01-28', 'Compra del pan', 0, 2.50, cuentas[3]],
[6, '2021-02-15', 'Compra de fruta', 10.75, 0, cuentas[0]],
[6, '2021-02-15', 'Compra de fruta', 0, 10.75, cuentas[3]],
[7, '2021-03-18', 'Calcetines y calzoncillos', 15.85, 0, cuentas[1]],
[7, '2021-03-18', 'Calcetines y calzoncillos', 0, 15.85, cuentas[3]],
[8, '2021-04-20', 'Abrigo de invierno', 54, 0, cuentas[1]],
[8, '2021-04-20', 'Abrigo de invierno', 0, 54, cuentas[3]],
]
for num, fecha, descripcion, debe, haber, cuenta in adicionales:
Movimiento.objects.create(num=num, fecha=fecha,
descripcion=descripcion, debe=debe, haber=haber, cuenta=cuenta)
@pytest.mark.parametrize('page', ['/informes/', reverse('main:informes')])
def test_redirect_if_not_logged_in(self, page, django_app):
resp = django_app.get(page)
assert resp.status_code == 302
assert resp.url.startswith('/accounts/login/')
@pytest.mark.parametrize('page', ['/informes/', reverse('main:informes')])
def test_view_url_exists_at_desired_location(self, page, django_app):
resp = django_app.get(page, user='username')
assert resp.status_code == 200
@pytest.mark.parametrize('page', ['/informes/', reverse('main:informes')])
def test_view_uses_correct_template(self, page, django_app):
resp = django_app.get(page, user='username')
assertTemplateUsed(resp, 'main/informes.html')
def test_parametros_form_attributes(self, form_parametros):
form = form_parametros
assert form.id == 'parametros'
assert form.method == 'post'
assert form.action == '/informes/'
assert form.action == reverse('main:informes')
fields = form.fields.keys()
for f in ['f_fecha_inicial', 'f_fecha_final', 'f_tipo', 'f_cuenta', 'f_etiqueta']:
assert f in fields
@pytest.mark.parametrize('tipo, fecha_col', [
('diario', 'Fecha'), ('semanal', 'Semana'), ('mensual', 'Mes'),
('trimestral', 'Trimestre'), ('anual', 'Año')
])
def test_parametros_form_attributes_tipo(self, form_parametros, populate_db_informes, tipo, fecha_col):
populate_db_informes
form = form_parametros
form['f_tipo'].select(text=tipo)
resp = form.submit()
# check title and subtitle
for text in ['Todas las cuentas', f'Informe {tipo}, todas las fechas']:
assert text in resp.text
# check columns of table
for col in [fecha_col, 'Debe', 'Haber', 'Total']:
assert col in resp.text
@pytest.mark.parametrize('fecha_ini, fecha_fin, expected_subtitle', [
('', '2022-01-29', 'Informe diario, desde el principio hasta 2022-01-29'),
('2022-01-29', '', 'Informe diario, desde 2022-01-29 hasta el final'),
('2022-01-01', '2022-01-31', 'Informe diario, desde 2022-01-01 hasta 2022-01-31'),
], ids=['fecha-inicial', 'fecha-final', 'ambas-fechas'])
def test_form_fechas(self, form_parametros, populate_db_informes, fecha_ini, fecha_fin, expected_subtitle):
populate_db_informes
form = form_parametros
form['f_fecha_inicial'] = fecha_ini
form['f_fecha_final'] = fecha_fin
resp = form.submit()
# check title and subtitle
for text in ['Todas las cuentas', expected_subtitle]:
assert text in resp.text
@pytest.mark.parametrize('cuenta, etiqueta, expected_title', [
('100: Caja', '', 'Cuenta 100: Caja'),
('', 'gastos', 'Cuentas del tipo: Gastos corrientes'),
('100: Caja', 'gastos', 'Cuenta 100: Caja'),
], ids=['cuenta-solo', 'etiqueta-solo', 'cuenta-y-etiqueta'])
def test_form_cuentas(self, form_parametros, populate_db_informes, cuenta, etiqueta, expected_title):
populate_db_informes
form = form_parametros
form['f_cuenta'] = cuenta
form['f_etiqueta'] = etiqueta
resp = form.submit()
# check title and subtitle
for text in [expected_title, 'Informe diario, todas las fechas']:
assert text in resp.text
| 1.195313 | 1 |
PU_Bayesian_classifiers/PSTAN.py | chengning-zhang/Bayesian-Classifers-for-PU_learning | 4 | 12786859 | class PSTAN(Bayes_net_PU):
name = "PSTAN"
def __init__(self, alpha = 1,starting_node = 0):
self.starting_node = starting_node
self.alpha = alpha
def Findparent(self, M):
M = M.copy() # to avoid change global M
np.fill_diagonal(M,0)
p = int(M.shape[0])
V = range(p) # set of all nodes
st = self.starting_node
Vnew = [st] # vertex that already found their parent. intitiate it with starting node. TAN randomly choose one
parent = {st:None} # use a dict to show nodes' interdepedency
while set(Vnew) != set(V): # when their are still nodes whose parents are unknown.
index_i = [] # after for loop, has same length as Vnew, shows the closest node that not in Vnew with Vnew.
max_inf = [] # corresponding distance
for i in range(len(Vnew)): # can be paralelled
vnew = Vnew[i]
ListToSorted = [e for e in M[:,vnew]] # does not need int(e)
index = sorted(range(len(ListToSorted)),key = lambda k: ListToSorted[k],reverse = True)
index_i.append([ele for ele in index if ele not in Vnew][0])
max_inf.append(M[index_i[-1],vnew])
index1 = sorted(range(len(max_inf)),key = lambda k: max_inf[k],reverse = True)[0] ## relative position, Vnew[v1,v2] index_i[v4,v5] max_inf[s1,s2] index1 is the position in those 3 list
Vnew.append(index_i[index1]) # add in that node
parent[index_i[index1]] = Vnew[index1] # add direction, it has to be that the new added node is child, otherwise some nodes has 2 parents which is wrong.
return parent
def fit(self,X_L, X_u, pri, M, case_control = True): # this is based on trainning data !!!
X_L = check_array(X_L)
X_u = check_array(X_u)
if X_L.shape[1] != X_u.shape[1]:
raise ValueError('labeled data and unlabeled data have different number of features ')
n_L,p = X_L.shape
# n_u,p = X_u.shape
if case_control:
X_U_or_UL = X_u
else:
X_U_or_UL = np.concatenate((X_L,X_u),axis = 0)
#
n_U_or_UL = X_U_or_UL.shape[0]
parent = self.Findparent(M)
# part 1: proba that can be estimated from labeled examples. 1 P(xij|1,xkl), 2 p(x_root|1) = N_L(x_root)/N_L, P(xij|1,xkl) = N_L(xi=j,xk=l)/N_L(xkl)
# part 2: learn from U, N_U(xij,xkl), and N_U(xkl)
# part 3: p(xij|0,xkl),p(x_root|0) from previous list
#
List_prob_1 = {} # 1 P(xij|1,xkl), 2 p(x_root|1)
List_count_1 = {} # N_L(xij,xpal) and N_L(xij)
#
List_count_U_or_UL = {} # N_U(xij,xkl) and N_U(xij)
#
List_prob_0 = {} # p(xij|0,xkl),p(x_root|0)
K = {}
# for root node
root_i = self.starting_node
x_i_L = X_L[:,root_i]
x_i_L_counter = Counter(x_i_L)
x_i_U_or_UL = X_U_or_UL[:,root_i]
x_i_U_or_UL_counter = Counter(x_i_U_or_UL)
x_i_values = list(set(x_i_L_counter.keys()).union(x_i_U_or_UL_counter.keys()))
K[root_i] = len(list(x_i_values))
# part 1
x_i_L_prob = {key: (x_i_L_counter[key]+self.alpha)/(K[root_i]*self.alpha + n_L ) for key in x_i_values}
List_prob_1[root_i] = x_i_L_prob
List_count_1[root_i] = x_i_L_counter
# part 2
List_count_U_or_UL[root_i] = x_i_U_or_UL_counter
# part 3
x_i_0_prob = {key: max([0,x_i_U_or_UL_counter[key] - x_i_L_prob[key] * pri * n_U_or_UL]) for key in x_i_values} # N_U(xi =j) - N_u*p(xij, y =1) = N_U(xij,y=0) numeritor, can be negative, make it >=0
x_i_0_prob = {key:(self.alpha + value)/ (K[root_i]*self.alpha + n_U_or_UL * (1-pri) ) for key,value in x_i_0_prob.items()} # add psudo count and divied by dem
x_i_0_prob = {key: value/(sum(np.array(list(x_i_0_prob.values())))) for key,value in x_i_0_prob.items() } # normalize prob sum to 1, however, due to computation problem, it is not sum to 1
List_prob_0[root_i] = x_i_0_prob
#
for i in [e for e in range(0,p) if e != root_i]:
x_i_values = list(set(X_L[:,i]).union(X_U_or_UL[:,i]))
x_i_parent_Value = list(set(X_L[:,parent[i]]).union(X_U_or_UL[:,parent[i] ] ) )
K[i] = len(x_i_values)
# part 1, P(xij|1,xkl) = N_L(xi=j,xk=l)/N_L(xkl)
List_count_1[i] = {v2: {v1:X_L[(X_L[:,i] == v1) & (X_L[:,parent[i]] == v2)].shape[0] for v1 in x_i_values} for v2 in x_i_parent_Value} # {pva1: {'1': , '2':, '3': }, pval2:{}}
List_prob_1[i] = {v2: {v1:(X_L[(X_L[:,i] == v1) & (X_L[:,parent[i]] == v2)].shape[0] + self.alpha)/ (X_L[(X_L[:,parent[i]] == v2)].shape[0] + self.alpha*K[i]) for v1 in x_i_values} for v2 in x_i_parent_Value}
# part 2
List_count_U_or_UL[i] = {v2: {v1:X_U_or_UL[(X_U_or_UL[:,i] == v1) & (X_U_or_UL[:,parent[i]] == v2)].shape[0] for v1 in x_i_values} for v2 in x_i_parent_Value}
# part 3
x_i_0_prob = {v2: {v1: List_count_U_or_UL[i][v2][v1] - List_prob_1[i][v2][v1]*pri* sum(list(List_count_U_or_UL[i][v2].values())) for v1 in x_i_values} for v2 in x_i_parent_Value}
x_i_0_prob = {v2: {v1: max([0,x_i_0_prob[v2][v1] ]) for v1 in x_i_values} for v2 in x_i_parent_Value}
x_i_0_prob = {v2: {v1:(x_i_0_prob[v2][v1] + self.alpha)/(self.alpha*K[i] + (1-pri)*sum(list(List_count_U_or_UL[i][v2].values())) ) for v1 in x_i_values} for v2 in x_i_parent_Value}
x_i_0_prob = {v2: {v1:x_i_0_prob[v2][v1]/sum(list(x_i_0_prob[v2].values())) for v1 in x_i_values} for v2 in x_i_parent_Value} # normalize
List_prob_0[i] = x_i_0_prob
self.case_control_ = case_control
self.is_fitted_ = True
self.parent_ = parent
self.n_features_, self.K_, self.List_count_1_,self.List_prob_1_, self.List_count_U_, self.List_prob_0_, self.prevalence_ = p, K, List_count_1,List_prob_1,List_count_U_or_UL,List_prob_0, pri
return self
def predict_proba(self,X):
check_is_fitted(self)
X = check_array(X)
Prob_1 = []
root_i = self.starting_node
for ins in X:
P1 = self.prevalence_
P0 = 1 - P1
# root_i
P1 = P1 * (self.List_prob_1_[root_i][ins[root_i]])
P0 = P0 * (self.List_prob_0_[root_i][ins[root_i]])
for i in [e for e in range(0,self.n_features_) if e != root_i]:
pValue = ins[self.parent_[i]]
P1 = P1 * (self.List_prob_1_[i][pValue][ins[i]])
P0 = P0 * (self.List_prob_0_[i][pValue][ins[i]])
P = P1 + P0
P1 = P1/P; P0 = P0/P
Prob_1.append(P1)
#
Prob_1 = np.array(Prob_1)
return Prob_1
| 2.90625 | 3 |
src/cake/registry.py | anlongfei/cake | 14 | 12786987 | """Utilities for querying the Windows registry.
@see: Cake Build System (http://sourceforge.net/projects/cake-build)
@copyright: Copyright (c) 2010 <NAME>, <NAME>.
@license: Licensed under the MIT license.
"""
import _winreg as winreg # Do this so Python 2to3 conversion works.
import sys
import cake.system
_shownWow64Warning = False
# Define locally here since some versions of the winreg module don't have them
KEY_WOW64_64KEY = 0x0100
KEY_WOW64_32KEY = 0x0200
if cake.system.isWindows64():
_readAccessModes = (winreg.KEY_READ | KEY_WOW64_64KEY, winreg.KEY_READ | KEY_WOW64_32KEY)
else:
_readAccessModes = (winreg.KEY_READ,)
def queryString(key, subKey, name):
"""Queries a string value from the Windows registry.
On 64-bit Windows this function will first try to query the value from
the 64-bit registry. If the value doesn't exist there it will then try to
find the value in the 32-bit registry.
@param key: The key to query, eg: winreg.HKEY_LOCAL_MACHINE
@type key: string
@param subKey: The subkey to query, eg: r"SOFTWARE\Microsoft"
@type subKey: string
@param name: The name to query, eg: "InstallDir"
@type name: string
@return: The value queried.
@rtype: string
@raise WindowsError: If the value could not be found/read.
"""
for sam in _readAccessModes:
try:
keyHandle = winreg.OpenKey(key, subKey, 0, sam)
try:
return str(winreg.QueryValueEx(keyHandle, name)[0])
finally:
winreg.CloseKey(keyHandle)
except WindowsError:
if sam is _readAccessModes[-1]:
raise
| 1.65625 | 2 |
spock/addons/s3/__init__.py | gbmarc1/spock | 58 | 12787115 | # -*- coding: utf-8 -*-
# Copyright FMR LLC <<EMAIL>>
# SPDX-License-Identifier: Apache-2.0
"""
Spock is a framework that helps manage complex parameter configurations for Python applications
Please refer to the documentation provided in the README.md
"""
from spock.addons.s3.configs import S3Config, S3DownloadConfig, S3UploadConfig
__all__ = ["configs", "utils", "S3Config", "S3DownloadConfig", "S3UploadConfig"]
| 1.21875 | 1 |
src/engine/datastore/models/section.py | thomasmauerhofer/search-engine | 0 | 12787243 | #!/usr/bin/env python3
# encoding: utf-8
import pprint
from enum import Enum
from engine.datastore.models.paper_structure import PaperStructure
from engine.datastore.models.text import Text
from engine.preprocessing.text_processor import TextProcessor
from engine.utils.objects.word_hist import WordHist
class Section(PaperStructure):
def __init__(self, data):
self.heading_raw = data.get('heading_raw')
self.heading_proceed = data.get('heading_proceed') if 'heading_proceed' in data else \
TextProcessor.proceed_string(data.get('heading_raw'))
self.section_type = SectionType[data.get('section_type')]
self.imrad_types = [IMRaDType[imrad_type] for imrad_type in data.get('imrad_types')] if 'imrad_types' in data else []
self.text = [Text(text) for text in data.get('text')] if 'text' in data else []
self.subsections = [Section(subsection) for subsection in data.get('subsections')] if 'subsections' in data else []
self.word_hist = WordHist(data.get('word_hist')) if "word_hist" in data else WordHist()
def __str__(self):
pp = pprint.PrettyPrinter(indent=4)
return pp.pformat(self.to_dict())
def to_dict(self):
data = {'section_type': self.section_type.name, 'heading_raw': self.heading_raw, 'heading_proceed': self.heading_proceed,
'text': [], 'subsections': [], 'imrad_types': [], 'word_hist': self.word_hist}
for text in self.text:
data['text'].append(text.to_dict())
for subsection in self.subsections:
data['subsections'].append(subsection.to_dict())
for imrad_type in self.imrad_types:
data['imrad_types'].append(imrad_type.name)
return data
def get_combined_word_hist(self):
if not self.word_hist:
for word in self.heading_proceed.split():
self.word_hist[word] = self.word_hist[word] + 1 if word in self.word_hist else 1
for text in self.text:
for word in text.text_proceed.split():
self.word_hist[word] = self.word_hist[word] + 1 if word in self.word_hist else 1
ret = WordHist(self.word_hist.copy())
for subsection in self.subsections:
ret.append(subsection.get_combined_word_hist())
return ret
def add_text_object(self, text_type, text_raw):
if len(self.subsections):
self.subsections[-1].add_text_object(text_type, text_raw)
else:
self.text.append(Text({"text_type": text_type.name, "text_raw": text_raw}))
def add_subsection(self, section_type, heading):
self.subsections.append(Section({'section_type': section_type.name, 'heading_raw': heading}))
def add_to_imrad(self, imrad_type):
if not any(imrad_type is x for x in self.imrad_types) and \
(not (self.heading_raw.isspace() or self.heading_raw is '')):
self.imrad_types.append(imrad_type)
for subsection in self.subsections:
subsection.add_to_imrad(imrad_type)
def title_exist(self):
return bool(self.heading_proceed)
def text_exist(self):
return any([text for text in self.text if text.text_proceed])
class SectionType(Enum):
ABSTRACT = 1
SECTION = 2
SUBSECTION = 3
SUBSUBSECTION = 4
class IMRaDType(Enum):
ABSTRACT = 0
INTRODUCTION = 1
BACKGROUND = 2
METHODS = 3
RESULTS = 4
DISCUSSION = 5
ACKNOWLEDGE = 6
| 1.585938 | 2 |
nnet/separate.py | on1262/conv-tasnet | 149 | 12787371 | <gh_stars>100-1000
#!/usr/bin/env python
# wujian@2018
import os
import argparse
import torch as th
import numpy as np
from conv_tas_net import ConvTasNet
from libs.utils import load_json, get_logger
from libs.audio import WaveReader, write_wav
logger = get_logger(__name__)
class NnetComputer(object):
def __init__(self, cpt_dir, gpuid):
self.device = th.device(
"cuda:{}".format(gpuid)) if gpuid >= 0 else th.device("cpu")
nnet = self._load_nnet(cpt_dir)
self.nnet = nnet.to(self.device) if gpuid >= 0 else nnet
# set eval model
self.nnet.eval()
def _load_nnet(self, cpt_dir):
nnet_conf = load_json(cpt_dir, "mdl.json")
nnet = ConvTasNet(**nnet_conf)
cpt_fname = os.path.join(cpt_dir, "best.pt.tar")
cpt = th.load(cpt_fname, map_location="cpu")
nnet.load_state_dict(cpt["model_state_dict"])
logger.info("Load checkpoint from {}, epoch {:d}".format(
cpt_fname, cpt["epoch"]))
return nnet
def compute(self, samps):
with th.no_grad():
raw = th.tensor(samps, dtype=th.float32, device=self.device)
sps = self.nnet(raw)
sp_samps = [np.squeeze(s.detach().cpu().numpy()) for s in sps]
return sp_samps
def run(args):
mix_input = WaveReader(args.input, sample_rate=args.fs)
computer = NnetComputer(args.checkpoint, args.gpu)
for key, mix_samps in mix_input:
logger.info("Compute on utterance {}...".format(key))
spks = computer.compute(mix_samps)
norm = np.linalg.norm(mix_samps, np.inf)
for idx, samps in enumerate(spks):
samps = samps[:mix_samps.size]
# norm
samps = samps * norm / np.max(np.abs(samps))
write_wav(
os.path.join(args.dump_dir, "spk{}/{}.wav".format(
idx + 1, key)),
samps,
fs=args.fs)
logger.info("Compute over {:d} utterances".format(len(mix_input)))
if __name__ == "__main__":
parser = argparse.ArgumentParser(
description=
"Command to do speech separation in time domain using ConvTasNet",
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument("checkpoint", type=str, help="Directory of checkpoint")
parser.add_argument(
"--input", type=str, required=True, help="Script for input waveform")
parser.add_argument(
"--gpu",
type=int,
default=-1,
help="GPU device to offload model to, -1 means running on CPU")
parser.add_argument(
"--fs", type=int, default=8000, help="Sample rate for mixture input")
parser.add_argument(
"--dump-dir",
type=str,
default="sps_tas",
help="Directory to dump separated results out")
args = parser.parse_args()
run(args) | 1.820313 | 2 |
py-numpy/numpy_test.py | DeercoderPractice/exp-code | 0 | 12787499 | #!/usr/bin/env python
import numpy as np
def test_concatenate():
x = np.array([11, 22])
y = np.array([18, 7, 6])
z = np.array([1, 3, 5])
print np.concatenate((x,y,z))
def test_concatenate2():
x = np.array(range(24))
x = x.reshape((3,4,2))
y = np.array(range(100, 124))
y = y.reshape((3,4,2))
z0 = np.concatenate((x,y))
z1 = np.concatenate((x,y), axis = 1)
z2 = np.concatenate((x,y), axis = 2)
print z0
print z1
print z2
def test_slicing():
x = np.array([2, 5, 18, 14, 4])
y = x[:, np.newaxis]
print x
print y
'''
' This function is used for generating the one/zero matrix
'''
def test_ones():
print "============="
x = np.ones((2, 3))
print x
y = np.ones((3, 4), dtype=int) #add type for thies
print y
z = np.zeros((2, 4))
print z
x = np.array([2,5,8,13,14,4])
print np.ones_like(x)
print np.zeros_like(x)
def main():
test_concatenate()
test_concatenate2()
test_slicing()
test_ones()
if __name__ == '__main__':
main()
| 2.453125 | 2 |
docker-register.py | poporisil/docker-register | 0 | 12787627 | <gh_stars>0
#!/usr/bin/env python
import os
import sys
import argparse
import threading
import subprocess
import time
from urlparse import urlparse
import docker
import etcd
import json
import logging
import logging.handlers
parser = argparse.ArgumentParser(description='Docker Register')
parser.add_argument('-e','--etcd-url', default='http://localhost:4001',
help='etcd url (default: http://localhost:4001)')
parser.add_argument('-d','--docker-url', default='unix://var/run/docker.sock',
help='docker url (default: unix://var/run/docker.sock)')
parser.add_argument('--docker-api-ver', default='1.23',
help='docker api version (default: 1.23)')
parser.add_argument('-t','--ttl', type=int, default=15,
help='register ttl (default: 15)')
parser.add_argument('-l','--log-path', default='.',
help='log path (default: .)')
args = parser.parse_args()
etcdUrl = args.etcd_url
dockerUrl = args.docker_url
dockerApiVer = args.docker_api_ver
ttl = args.ttl
logPath = args.log_path
logger = logging.getLogger('DockerRegister')
handler = logging.handlers.RotatingFileHandler(logPath + '/docker-register.log', backupCount=10)
formatter = logging.Formatter(fmt='%(asctime)s] (%(levelname)s) %(name)s - %(message)s', datefmt='%Y-%m-%d %H:%M:%S')
handler.setFormatter(formatter)
logger.addHandler(handler)
logger.setLevel(logging.DEBUG)
logger.info('-- Parameters --------------------------')
logger.info('etcdUrl = %s' % etcdUrl)
logger.info('dockerUrl = %s' % dockerUrl)
logger.info('dockerApiVer = %s' % dockerApiVer)
logger.info('ttl = %d' % ttl)
logger.info('logPath = %s' % logPath)
logger.info('-----------------------------------------')
class DockerRegister(threading.Thread):
def __init__(self):
threading.Thread.__init__(self)
self.dc = docker.Client(base_url=dockerUrl, version=dockerApiVer)
self.ec = etcd.Client(host=urlparse(etcdUrl).hostname, port=urlparse(etcdUrl).port, protocol=urlparse(etcdUrl).scheme)
self.internalIp = subprocess.check_output("ip route get 8.8.8.8 | awk '{print $NF; exit}'", shell=True).strip()
def getContainers(self):
logger.debug('get container list...')
bindedContainers = {}
try:
for container in self.dc.containers():
binded = []
for port in container['Ports']:
if 'PublicPort' in port:
binded.append('%s:%d-%d/%s'%(self.internalIp, port['PublicPort'], port['PrivatePort'], port['Type']))
if binded:
key = container['Image'].split(':')[0] + '/' + container['Id']
bindedContainers[key] = ','.join(binded)
except Exception:
logger.exception('get containers fail')
return None
return bindedContainers
def registerContainers(self, containers):
logger.debug('register containers...')
for key, value in containers.iteritems():
logger.debug('register %s' % key)
try:
self.ec.write('/containers/' + key, value, ttl=ttl)
except Exception:
logger.exception('etcd write fail')
pass
def run(self):
logger.info('start agent!')
while True:
containers = self.getContainers()
if containers:
self.registerContainers(containers)
time.sleep(10)
pass
if __name__ == '__main__':
t = DockerRegister()
t.start()
t.join() | 1.375 | 1 |
medium/103-Binary Tree Zigzag Level Order Traversal.py | Davidxswang/leetcode | 2 | 12787755 | """
https://leetcode.com/problems/binary-tree-zigzag-level-order-traversal/
Given a binary tree, return the zigzag level order traversal of its nodes' values. (ie, from left to right, then right to left for the next level and alternate between).
For example:
Given binary tree [3,9,20,null,null,15,7],
3
/ \
9 20
/ \
15 7
return its zigzag level order traversal as:
[
[3],
[20,9],
[15,7]
]
"""
# time complexity: O(n), space complexity: O(n)
# I used two stacks here for clarity, one stores the layer that we should read from left to right and the other stores the layer that we should read from right to left and we just need to pay attention to what order we should follow when we put in the left and right children into the stack
# Definition for a binary tree node.
# class TreeNode:
# def __init__(self, val=0, left=None, right=None):
# self.val = val
# self.left = left
# self.right = right
class Solution:
def zigzagLevelOrder(self, root: TreeNode) -> List[List[int]]:
result = [[]]
stack_toright = [root]
stack_toleft = []
toright = True
size = 1
while stack_toright or stack_toleft:
node = stack_toright.pop() if toright else stack_toleft.pop()
size -= 1
if node:
result[-1].append(node.val)
if toright:
if node.left:
stack_toleft.append(node.left)
if node.right:
stack_toleft.append(node.right)
else:
if node.right:
stack_toright.append(node.right)
if node.left:
stack_toright.append(node.left)
if size == 0 and (stack_toright or stack_toleft):
size = len(stack_toright) + len(stack_toleft)
result.append([])
toright = not toright
if result and not result[-1]:
result.pop()
return result
| 3.03125 | 3 |
python/dungeon-game/dungeon.py | davejlin/treehouse | 0 | 12787883 | <reponame>davejlin/treehouse<gh_stars>0
import random
CELLS = [(0,0), (0,1), (0,2),
(1,0), (1,1), (1,2),
(2,0), (2,1), (2,2)]
player = (0,0)
door = (0,0)
dragon = (0,0)
def set_initial_positions():
while True:
player = random.choice(CELLS)
door = random.choice(CELLS)
dragon = random.choice(CELLS)
if player != door and door != dragon and player != dragon:
break
return player, door, dragon
def move_player(player, move):
if move == 'UP':
player = (player[0]-1, player[1])
elif move == 'DOWN':
player = (player[0]+1, player[1])
elif move == 'RIGHT':
player = (player[0], player[1]+1)
elif move == 'LEFT':
player = (player[0], player[1]-1)
return player
def get_moves(player):
xPlayer = player[1]
yPlayer = player[0]
moves = []
if yPlayer > 0:
moves.append('UP')
if yPlayer < 2:
moves.append('DOWN')
if xPlayer < 2:
moves.append('RIGHT')
if xPlayer > 0:
moves.append('LEFT')
return moves
def check_win_lose(player, door, dragon):
if player == door:
print("\n*** Congratulations! You escaped!! ***\n")
return True
elif player == dragon:
print("\n*** Sorry, the dragon got you! ***\n")
return True
else:
return False
def draw_map(player):
print(' _ _ _')
tile = '|{}'
for idx, cell in enumerate(CELLS):
if idx in [0, 1, 3, 4, 6, 7]:
if cell == player:
print(tile.format('X'), end = '')
else:
print(tile.format('_'), end = '')
else:
if cell == player:
print(tile.format('X|'))
else:
print(tile.format('_|'))
# main
print("Welcome to the dungeon!")
(player, door, dragon) = set_initial_positions()
while True:
print("You're currently in room {}.".format(player))
draw_map(player)
valid_moves = get_moves(player)
print("You can move {}.".format(valid_moves))
print("Enter QUIT to quit.")
move = input("> ")
move = move.upper()
if move == 'QUIT':
break
if move not in valid_moves:
print("\n*** Sorry, you cannot move {}. Try again!\n".format(move))
continue
player = move_player(player, move)
if check_win_lose(player, door, dragon):
break
| 2.75 | 3 |
hokudai_furima/chat/migrations/0005_auto_20180514_0455.py | TetsuFe/hokuma | 1 | 12788011 | # Generated by Django 2.0.3 on 2018-05-14 04:55
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('chat', '0004_auto_20180319_1220'),
]
operations = [
migrations.AddField(
model_name='chat',
name='product_seller',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='product_seller', to=settings.AUTH_USER_MODEL),
),
migrations.AddField(
model_name='chat',
name='product_wanting_user',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='product_wanting_user', to=settings.AUTH_USER_MODEL),
),
]
| 0.894531 | 1 |
github-webhooks/Event.py | srnd/mattermost-code-review | 2 | 12788139 | class Event:
def __init__(self,data,event=None):
assert type(data) is dict, "data is of type {} not dict".format(type(data))
self.data = data
def __getattr__(self, item):
if item not in self.data:
return False
output = self.data[item]
while type(output) is dict:
output = Event(output) # So nested attributes work - probably a better way to do this
return output
def __repr__(self):
if type(self.data) is dict:
return self.data
else:
return object.__repr__(self)
def __str__(self):
return str(self.__repr__()) # override to force string - repr can return dict | 2.46875 | 2 |
accounts/urls.py | 7h3qu1rkyb1t/Xarena | 0 | 12788267 | <gh_stars>0
from . import views
from django.urls import path
from django.contrib.auth import views as auth_views
urlpatterns = [
path("", views.target),
path("register/", views.register, name="register"),
path("login/", auth_views.LoginView.as_view(template_name = "accounts/login.html"), name="login"),
path("logout/", auth_views.LogoutView.as_view(template_name = "accounts/logout.html"), name="logout"),
path("password-reset/", auth_views.PasswordResetView.as_view(template_name="accounts/password_reset.html"), name="password_reset"),
path("password-reset/done", auth_views.PasswordResetDoneView.as_view(template_name="accounts/password_reset_done.html"), name="password_reset_done"),
path("password-reset-confirm/<uidb64>/<token>/", auth_views.PasswordResetConfirmView.as_view(template_name="accounts/password_reset_confirm.html"), name="password_reset_confirm"),
path("password-reset-complete/", auth_views.PasswordResetCompleteView.as_view(template_name="accounts/password_reset_complete.html"), name="password_reset_complete"),
path("activate/<uidb64>/<token>/", views.activate, name="activate"),
path("profile/", views.profile, name="profile"),
path("profile/payment_update/<int:pk>", views.payment_update, name="payment_update"),
path("Money-requests/", views.MoneyRequests.as_view(), name="money_req"),
path("Money-requests/handle", views.money_req_handle, name="money_req_handle"),
path("profile/image-upload", views.image_upload, name="image_upload"),
path("profile/update-info/<int:pk>/", views.UpdateSubscription.as_view(), name="update_subscription"),
path("profile/update-info/<int:pk>/delete", views.DeleteSubscription.as_view(), name="delete_subscription"),
path("profile/transfer", views.money_transfer, name= "transfer"),
path("profile/transfer/to_wallet", views.to_wallet, name= "to_wallet"),
path("profile/trans-status", views.trans_status, name="trans_status")
]
| 0.824219 | 1 |
vstb_launch.py | gabrik/vstb_launcher | 0 | 12788395 | #!/usr/bin/env python3
import libvirt
##
## vstb_launch.py
## EU INPUT
##
## Created by <NAME> on 28/10/2017
## Copyright (c) 2017 <NAME>. All rights reserved.
##
class VSTB(object):
'''
This class define, start, and then destroy and undefine the vSTB VMs
'''
def __init__(self, base_path, domains):
self.base_path = base_path
self.conn = libvirt.open("qemu:///system")
self.domains = domains
def define_domans(self):
'''
This methods load the proper xml file for each domain and then define the domain
'''
for d in self.domains:
path = str("%s/%s/%s.xml" % (self.base_path, d, d))
vm_xml = self.read_file(path)
self.conn.defineXML(vm_xml)
def launch_domains(self):
'''
This method start each domain
'''
for d in self.domains:
dom = self.conn.lookupByName(d)
dom.create()
def stop_domains(self):
'''
This method stop each domain (stop means that the vm is destroyed)
'''
for d in self.domains:
dom = self.conn.lookupByName(d)
dom.destroy()
def undefine_domains(self):
'''
This method undefine each domain
'''
for d in self.domains:
dom = self.conn.lookupByName(d)
dom.undefine()
def read_file(self, file_path):
'''
This method read a file from the filesystem
'''
data = ""
with open(file_path, 'r') as data_file:
data = data_file.read()
return data
if __name__ == '__main__':
print("########################################")
print("###### vSTB VM Launcher ######")
print("########################################")
images_path = "/home/ubuntu/Scrivania/images"
components = ['es','ea','cp','pa','dms','dmc','vdi']
vstb = VSTB(images_path, components)
print(">>>> Defining Domains... <<<<")
vstb.define_domans()
print(">>>> [ DONE ] Defining Domains <<<<")
print(">>>> Starting Domains... <<<<")
vstb.launch_domains()
print(">>>> [ DONE ] Starting Domains <<<<")
print("########################################")
print("##### vSTB Running #####")
print("########################################")
input("<<<< Press enter to stop the vSTB >>>>")
print(">>>> Stopping Domains... <<<<")
vstb.stop_domains()
print(">>>> [ DONE ] Stopping Domains <<<<")
print(">>>> Undefining Domains... <<<<")
vstb.undefine_domains()
print(">>>> [ DONE ] Undefining Domains <<<<")
print("########################################")
print("##### vSTB Stopped #####")
print("########################################")
print(">>>> Bye <<<<")
| 1.695313 | 2 |
contrib/internal/build-i18n.py | SCB2278252/reviewboard | 1 | 12788523 | #!/usr/bin/env python
from __future__ import unicode_literals
import os
import sys
import django
from django.core.management import call_command
import reviewboard
if __name__ == '__main__':
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'reviewboard.settings')
if hasattr(django, 'setup'):
# Django >= 1.7
django.setup()
os.chdir(os.path.dirname(reviewboard.__file__))
sys.exit(call_command('compilemessages', interactive=False, verbosity=2))
| 0.839844 | 1 |
build_fake_image__build_exe/__injected_code.py | DazEB2/SimplePyScripts | 117 | 12788651 | <reponame>DazEB2/SimplePyScripts
import os.path
from pathlib import Path
file_name = Path(os.path.expanduser("~/Desktop")).resolve() / "README_YOU_WERE_HACKED.txt"
file_name.touch(exist_ok=True)
| 1.070313 | 1 |
validate_signature/serializers.py | Arquitectura-de-Software-UFPS-2022-I/-validate-signature-api-documentation- | 0 | 12788779 | from rest_framework import serializers
class ValidateSerializer(serializers.Serializer):
class_label = serializers.IntegerField()
confidence = serializers.FloatField() | 0.878906 | 1 |
app.py | eocode/Queens | 0 | 12788907 | <gh_stars>0
"""
Start app
"""
from app import queen
if __name__ == "__main__":
"""Main function for run application"""
queen.run()
| 0.933594 | 1 |
tests/web_platform/CSS2/linebox/test_vertical_align_sub.py | fletchgraham/colosseum | 0 | 12789035 | from tests.utils import W3CTestCase
class TestVerticalAlignSub(W3CTestCase):
vars().update(W3CTestCase.find_tests(__file__, 'vertical-align-sub-'))
| 0.59375 | 1 |
scraper/storage_spiders/giadungsmartcom.py | chongiadung/choinho | 0 | 12789163 | # Auto generated by generator.py. Delete this line if you make modification.
from scrapy.spiders import Rule
from scrapy.linkextractors import LinkExtractor
XPATH = {
'name' : "//div[@class='tensp']/h2",
'price' : "//div[@class='pd-right fr']/p[@class='p-price']",
'category' : "//ul[@class='breadcrumb all']/li/a",
'description' : "//div[@class='p-introduct all']/div[@class='content_tab_all']",
'images' : "//ul[@class='list_small']/li/a/@href",
'canonical' : "",
'base_url' : "",
'brand' : ""
}
name = 'giadungsmart.<EMAIL>'
allowed_domains = ['giadungsmart.com']
start_urls = ['http://giadungsmart.com/']
tracking_url = ''
sitemap_urls = ['']
sitemap_rules = [('', 'parse_item')]
sitemap_follow = []
rules = [
Rule(LinkExtractor(allow=['/[a-zA-Z0-9-]+\d.*\.html$']), 'parse_item'),
Rule(LinkExtractor(allow=['/[a-zA-Z-]+\.html($|\?Page=\d+$)']), 'parse'),
#Rule(LinkExtractor(), 'parse_item_and_links'),
]
| 1.539063 | 2 |
run_game.py | hdaftary/Flappy-Birds | 0 | 12789291 | import pygame
import flappy
from thread import callback
import speech_recognition as sr
import sys
if __name__ == '__main__':
if len(sys.argv) == 3 and sys.argv[2] == "False":
r = sr.Recognizer()
m = sr.Microphone()
with m as source:
r.adjust_for_ambient_noise(source) # we only need to calibrate once, before we start listening
# start listening in the background (note that we don't have to do this inside a `with` statement)
stop_listening = r.listen_in_background(m, callback)
pygame.init() # initialize pygame
pygame.display.set_caption('Flappy Birds For Handicapped People')
flappy.play_game()
| 1.773438 | 2 |
Source/RenderPasses/TemporalDelayPass/Testing/testTemporalDelayPass.py | jeongsoopark/Falcor | 2 | 12789419 | <reponame>jeongsoopark/Falcor
def test_temporal_delay():
imageLoader = RenderPass("ImageLoader", {'filename': 'smoke-puff.png', 'mips': False, 'srgb': True})
depthPass = RenderPass("DepthPass")
forwardLightingPass = RenderPass("ForwardLightingPass")
temporalDelayPass = RenderPass("TemporalDelayPass", {"delay": 16})
graph = RenderGraph("Temporal Delay Graph")
graph.addPass(imageLoader, "ImageLoader")
graph.addPass(depthPass, "DepthPass")
graph.addPass(forwardLightingPass, "ForwardLightingPass")
graph.addPass(temporalDelayPass, "TemporalDelayPass")
graph.addEdge("ImageLoader.dst", "ForwardLightingPass.color")
graph.addEdge("DepthPass.depth", "ForwardLightingPass.depth")
graph.addEdge("ForwardLightingPass.color", "TemporalDelayPass.src")
graph.markOutput("TemporalDelayPass.maxDelay")
return graph
temporal_delay_graph = test_temporal_delay()
m.addGraph(temporal_delay_graph)
| 1.820313 | 2 |
DQNs/memory_module.py | abr-98/Reinforcement-Learning | 0 | 12789547 | <reponame>abr-98/Reinforcement-Learning
import random
import numpy as np
from collections import namedtuple,deque
class replayBuffer:
transition=namedtuple('Transition',['s','a','r','s_','nd'])
def __init__(self,capacity):
self.capacity=capacity
self.memory=deque([],maxlen=self.capacity)
def push(self,s,a,r,s_,nd):
tr=replayBuffer.transition(np.float32(s),a,r,np.float32(s_),nd)
self.memory.append(tr)
def sample(self,batch_size):
tr_batch=random.choices(self.memory,k=batch_size)
s=[];a=[];r=[];s_=[];nd=[]
for tr in tr_batch:
s.append(tr.s);a.append(tr.a);r.append(tr.r),s_.append(tr.s_),nd.append(tr.nd)
return np.array(s),np.array(a),np.array(r),np.array(s_),np.uint8(nd) | 2.46875 | 2 |
tests/test_contact.py | rehanalam1/python-o365 | 0 | 12789675 | from O365 import contact
import unittest
import json
import time
class Resp:
def __init__(self,json_string,code=None):
self.jsons = json_string
self.status_code = code
def json(self):
return json.loads(self.jsons)
contact_rep = open('contacts.json','r').read()
contacts_json = json.loads(contact_rep)
jeb = contacts_json['value'][0]
bob = contacts_json['value'][2]
t_string = '%Y-%m-%dT%H:%M:%SZ'
urls = ['https://outlook.office365.com/api/v1.0/me/contacts/',
'https://outlook.office365.com/api/v1.0/me/contacts/bigguid1',
'https://outlook.office365.com/api/v1.0/me/contacts/bigguid2',
'https://outlook.office365.com/api/v1.0/me/contacts/bigguid3']
def delete(url,headers,auth):
if url not in urls:
print(url)
raise BaseException('Url wrong')
if auth[0] != '<EMAIL>':
raise BaseException('wrong email')
if auth[1] != 'pass':
raise BaseException('wrong password')
if headers['Content-type'] != 'application/json':
raise BaseException('header wrong value for content-type.')
if headers['Accept'] != 'text/plain':
raise BaseException('header accept wrong.')
return Resp(None,204)
contact.requests.delete = delete
def post(url,data,headers,auth):
if url not in urls:
raise BaseException('Url wrong')
if auth[0] != '<EMAIL>':
raise BaseException('wrong email')
if auth[1] != 'pass':
raise BaseException('wrong password')
if headers['Content-type'] != 'application/json':
raise BaseException('header wrong value for content-type.')
if headers['Accept'] != 'application/json':
raise BaseException('header accept wrong.')
if json.loads(data) != jeb and json.loads(data) != bob:
raise BaseException('data is wrong.')
return Resp(data,202)
#return True
contact.requests.post = post
def patch(url,data,headers,auth):
if url not in urls:
raise BaseException('Url wrong')
if auth[0] != '<EMAIL>':
raise BaseException('wrong email')
if auth[1] != 'pass':
raise BaseException('wrong password')
if headers['Content-type'] != 'application/json':
raise BaseException('header wrong value for content-type.')
if headers['Accept'] != 'application/json':
raise BaseException('header accept wrong.')
return Resp(data,202)
#return True
contact.requests.patch = patch
auth = ('<EMAIL>','<PASSWORD>')
class TestInbox (unittest.TestCase):
def setUp(self):
self.jeb = contact.Contact(jeb,auth)
self.bob = contact.Contact(bob,auth)
def test_create(self):
self.assertTrue(self.jeb.create())
self.assertTrue(self.bob.create())
def test_update(self):
self.assertTrue(self.jeb.update())
self.assertTrue(self.bob.update())
def test_delete(self):
self.assertTrue(self.jeb.delete())
self.assertTrue(self.bob.delete())
def test_auth(self):
self.assertEqual('<EMAIL>',self.jeb.auth[0])
self.assertEqual('pass',self.jeb.auth[1])
self.assertEqual('<EMAIL>',self.bob.auth[0])
self.assertEqual('pass',self.bob.auth[1])
if __name__ == '__main__':
unittest.main()
| 1.820313 | 2 |
run.py | LeoTheBestCoder/wordle-solver | 5 | 12789803 | from random import randint as rint
from sys import stderr, exit
wordlist = []
GREEN, YELLOW, GRAY = ('0', '1', '2')
def info():
"""
Wordle Game Solver
https://www.nytimes.com/games/wordle/index.html
Created by Leo (<NAME>), 2022
Any suggestion is welcome!
Check my code at https://github.com/LeoTheBestCoder/wordle-solver
"""
return
def showrule():
print('========================================================================')
print('If the result is GREEN, enter 0')
print('If the result is YELLOW, enter 1')
print('If the result is GRAY, enter 2')
print('Only a string with length = 5 and contains ONLY 0, 1, 2 is ACCEPTED!')
print('ex. Enter 12200 if the result is "yellow gray gray green green".')
print('========================================================================')
input('\nReady to start? (Press ENTER to continue)')
def getword():
idx = rint(0, len(wordlist) - 1)
return wordlist[idx]
def readfile():
global wordlist
with open('wordlist.txt', 'r') as fh:
wordlist = list(map(lambda w: w[:-1] if w[-1] == '\n' else w, fh.readlines()))
def check_r(res: str) -> bool:
if len(res) != 5:
return False
for ch in res:
if ch not in ['0', '1', '2']:
return False
return True
def update(word: str, res: str):
global wordlist
try:
assert check_r(res)
if res != '00000':
wordlist.remove(word)
for i in range(5):
invalid = []
if res[i] == GREEN:
# correct character + correct position
for w in wordlist:
if w[i] != word[i]:
invalid.append(w)
elif res[i] == YELLOW:
# correct character + wrong position
for w in wordlist:
if word[i] not in w:
invalid.append(w)
elif w[i] == word[i]:
invalid.append(w)
elif res[i] == GRAY:
# wrong character
for w in wordlist:
if word[i] in w:
special_case = False
for j in range(5):
if i != j and word[i] == word[j] and res[j] in [GREEN, YELLOW]:
special_case = True
if not special_case:
invalid.append(w)
# else:
# print(f'{w} is a special case')
for i_word in invalid:
wordlist.remove(i_word)
except:
stderr.write('Invalid result!\n')
exit(-1)
def run():
print(info.__doc__)
readfile()
showrule()
word = getword()
while len(set(word)) != 5:
word = getword()
print(f'Try to guess "{word}". What is the result? ', end = '')
res = input()
update(word, res)
# print(f'len = {len(wordlist)}')
# print(wordlist)
while res != '00000':
word = getword()
print(f'Try to guess "{word}". What is the result? ', end = '')
res = input()
update(word, res)
# print(f'len = {len(wordlist)}')
# print(wordlist)
print('Congratulations!')
if __name__ == '__main__':
run()
| 2.515625 | 3 |
Loops/ForLoop1.py | lytranp/Tutoring-PythonIntroduction | 0 | 12789931 | <gh_stars>0
## There are 2 types of loops:
# definite iteration
# and indefinite iteration until the program determines to stop it
## for loop: control statement that most easily supports definite iteration
for eachPass in range(4):
print("It's alive!", end = " ")
number = 2
exponential = 3
product = 1
for i in range(exponential):
product = product * number
print(product, end = " ")
## Compute sum of a sequence of numbers from a lowers bound through an upper bound
lower = int(input("Enter the lower bound: "))
upper = int(input("Enter the upper bound: "))
thesum = 0
for number in range (lower, upper + 1):
thesum += number
print(thesum)
| 2.984375 | 3 |
taschenrechner.py | it-moerike/python | 0 | 12790059 | <gh_stars>0
from tkinter import *
def rechnen():
if operator.curselection() == (0,):
ausgabe["text"] = float(zahl1.get()) + float(zahl2.get())
elif operator.curselection() == (1,):
ausgabe["text"] = float(zahl1.get()) - float(zahl2.get())
elif operator.curselection() == (2,):
ausgabe["text"] = float(zahl1.get()) * float(zahl2.get())
elif operator.curselection() == (3,):
ausgabe["text"] = float(zahl1.get()) / float(zahl2.get())
window = Tk()
window.title("Taschenrechner")
zahl1 = Entry(window)
operator = Listbox(window)
operator.insert(0, "+")
operator.insert(1, "-")
operator.insert(2, "*")
operator.insert(3, "/")
zahl2 = Entry(window)
button = Button(window, command=rechnen, text="Los", bg='#FBD975')
ausgabe = Label(window)
zahl1.grid(row=0, column=0)
operator.grid(row=0, column=1)
zahl2.grid(row=0, column=2)
button.grid(row=1, column=2, sticky=E)
ausgabe.grid(row=2)
window.mainloop()
| 2.578125 | 3 |
Shellcodes/Encoder-Scripts/xor_encoder.py | noamts/Malware | 6 | 12790187 | <filename>Shellcodes/Encoder-Scripts/xor_encoder.py
#! /bin/python
original_shellcode=("Enter the shellcode")
encoder_byte="Enter the encoder byte"
encoded_shellcode_format1=[]
encoded_shellcode_format2=[]
for byt in bytearray(original_shellcode):
xor=byt^encoder_byte
xor="%02x" %xor
xor1="\\x" + xor
xor2="0x" + xor +","
encoded_shellcode_format1.append(xor1)
encoded_shellcode_format2.append(xor2)
print("Format 1:\n")
print''.join(encoded_shellcode_format1)
print("\n\n\n")
print("Format 2:\n")
print''.join(encoded_shellcode_format2)
print("\n")
print("Length:" +str(len(bytearray(original_shellcode))))
| 1.929688 | 2 |
swagger_server/controllers/default_controller.py | Surya2709/FlaskSwaggerDemo | 0 | 12790315 | <reponame>Surya2709/FlaskSwaggerDemo<filename>swagger_server/controllers/default_controller.py
import connexion
import six
from swagger_server.models.alert import Alert # noqa: E501
from swagger_server.models.alert_array import AlertArray # noqa: E501
from swagger_server.models.updatealert import Updatealert # noqa: E501
from swagger_server import util
def alert_delete(alert_id): # noqa: E501
"""delete alert
takes the alert id as feed to remove the alert from the alert list # noqa: E501
:param alert_id: id of the alert need to be removed
:type alert_id: str
:rtype: None
"""
return 'do some magic!'
def alert_get(alert_id=None): # noqa: E501
"""obtain alert list
get method to obtain all the alerts # noqa: E501
:param alert_id: identifier for the alert
:type alert_id: str
:rtype: AlertArray
"""
return 'do some magic!'
def alert_post(body): # noqa: E501
"""add alerts
Adds the alerts into the list # noqa: E501
:param body:
:type body: list | bytes
:rtype: None
"""
if connexion.request.is_json:
body = [Alert.from_dict(d) for d in connexion.request.get_json()] # noqa: E501
return 'do some magic!'
def alert_put(body): # noqa: E501
"""update the alerts
updates the alerts in the alerts list # noqa: E501
:param body:
:type body: dict | bytes
:rtype: None
"""
if connexion.request.is_json:
body = Updatealert.from_dict(connexion.request.get_json()) # noqa: E501
return 'do some magic!'
| 1.59375 | 2 |
{{cookiecutter.project_name}}/tests/data/data_fib.py | michael-c-hoffman/python-best-practices-cookiecutter | 0 | 12790443 | # pylint: disable-all
tests = [
(0, 0),
(1, 1),
(2, 1),
(3, 2),
(4, 3),
(6, 8),
(7, 13),
(8, 21),
(9, 34),
(10, 55),
(11, 89),
(12, 144),
(13, 233),
(14, 377),
(15, 610),
(17, 1597),
(18, 2584),
(19, 4181),
(20, 6765),
]
| 0.484375 | 0 |
test/test_status.py | System1Bio/asitiger | 2 | 12790571 | from asitiger.status import (
AxisEnabledStatus,
AxisStatus,
JoystickStatus,
LimitStatus,
MotorStatus,
RampingDirection,
RampingStatus,
Status,
status_from_decimal,
statuses_for_rdstat,
)
RDSTAT_RESPONSE = ":A 10N 138"
def test_status_from_decimal_types():
axis = status_from_decimal(210)
assert isinstance(axis.status, Status)
assert isinstance(axis.enabled, AxisEnabledStatus)
assert isinstance(axis.motor, MotorStatus)
assert isinstance(axis.joystick, JoystickStatus)
assert isinstance(axis.ramping, RampingStatus)
assert isinstance(axis.ramping_direction, RampingDirection)
assert isinstance(axis.upper_limit, LimitStatus)
assert isinstance(axis.lower_limit, LimitStatus)
def test_status_from_decimal_values():
axis = status_from_decimal(210)
assert axis.status == Status.IDLE
assert axis.enabled == AxisEnabledStatus.ENABLED
assert axis.motor == MotorStatus.INACTIVE
assert axis.joystick == JoystickStatus.DISABLED
assert axis.ramping == RampingStatus.RAMPING
assert axis.ramping_direction == RampingDirection.DOWN
assert axis.upper_limit == LimitStatus.CLOSED
assert axis.lower_limit == LimitStatus.CLOSED
def test_statuses_for_rdstat_split():
axes = statuses_for_rdstat(RDSTAT_RESPONSE)
assert len(axes) == 3
def test_statuses_for_rdstat_types():
axes = statuses_for_rdstat(RDSTAT_RESPONSE)
assert isinstance(axes[0], AxisStatus)
assert isinstance(axes[1], Status)
assert isinstance(axes[2], AxisStatus)
def test_from_flag_str():
assert Status.from_flag("N") == Status.IDLE
assert Status.from_flag("B") == Status.BUSY
| 1.5 | 2 |
labelprop/lightning_model.py | nathandecaux/labelprop | 0 | 12790699 | import torch
from torch import nn
import torch.nn.functional as F
import pytorch_lightning as pl
import kornia
from .voxelmorph2d import VxmDense,NCC,Grad,Dice
from monai.losses import BendingEnergyLoss,GlobalMutualInformationLoss,DiceLoss,LocalNormalizedCrossCorrelationLoss
from kornia.filters import sobel, gaussian_blur2d,canny,spatial_gradient
class LabelProp(pl.LightningModule):
@property
def automatic_optimization(self):
return False
def norm(self, x):
if len(x.shape)==4:
x = kornia.enhance.normalize_min_max(x)
elif len(x.shape)==3:
x= kornia.enhance.normalize_min_max(x[:, None, ...])[:,0, ...]
else:
x = kornia.enhance.normalize_min_max(x[None, None, ...])[0, 0, ...]
return x
def __init__(self,n_channels=1,n_classes=2,learning_rate=5e-3,weight_decay=1e-8,way='up',shape=256,selected_slices=None,losses={},by_composition=False):
super().__init__()
self.n_classes = n_classes
self.learning_rate=learning_rate
self.weight_decay=weight_decay
self.selected_slices=selected_slices #Used in validation step
if isinstance(shape,int):shape=[shape,shape]
self.registrator= VxmDense(shape,bidir=False,int_downsize=1,int_steps=7)
self.way=way #If up, learning only "forward" transitions (phi_i->j with j>i). Other choices : "down", "both". Bet you understood ;)
self.by_composition=by_composition
self.loss_model = MTL_loss(['sim','seg','comp','smooth'])
self.losses=losses
if self.by_composition: print('Using composition for training')
print('Losses',losses)
self.save_hyperparameters()
def apply_deform(self,x,field):
"""Apply deformation to x from flow field
Args:
x (Tensor): Image or mask to deform (BxCxHxW)
field (Tensor): Deformation field (Bx2xHxW)
Returns:
Tensor: Transformed image
"""
return self.registrator.transformer(x,field)
def compose_list(self,flows):
flows=list(flows)
compo=flows[-1]
for flow in reversed(flows[:-1]):
compo=self.compose_deformation(flow,compo)
return compo
def compose_deformation(self,flow_i_k,flow_k_j):
""" Returns flow_k_j(flow_i_k(.)) flow
Args:
flow_i_k
flow_k_j
Returns:
[Tensor]: Flow field flow_i_j = flow_k_j(flow_i_k(.))
"""
flow_i_j= flow_k_j+self.apply_deform(flow_i_k,flow_k_j)
return flow_i_j
def forward(self, moving,target,registration=True):
"""
Args:
moving (Tensor): Moving image (BxCxHxW)
target ([type]): Fixed image (BxCxHxW)
registration (bool, optional): If False, also return non-integrated inverse flow field. Else return the integrated one. Defaults to False.
Returns:
moved (Tensor): Moved image
field (Tensor): Deformation field from moving to target
"""
return self.registrator.forward(moving,target,registration=registration)
# def multi_level_training(self,moving,target,level=3):
# """
# Args:
# moving (Tensor): Moving image (BxCxHxW)
# target ([type]): Fixed image (BxCxHxW)
# registration (bool, optional): If False, also return non-integrated inverse flow field. Else return the integrated one. Defaults to False.
# Returns:
# moved (Tensor): Moved image
# field (Tensor): Deformation field from moving to target
# """
# stack_moved=[]
# stack_field=[]
# stack_preint=[]
# resampling=torch.nn.Upsample(size=self.shape,mode='bilinear',align_corners=True)
# for i in range(level):
# downsampling=nn.Upsample(scale_factor=1/(i+1), mode='bilinear',align_corners=True)
# downsampled_moving=downsampling(moving)
# downsampled_target=downsampling(target)
# moved,field,preint_field=self.forward(downsampled_moving,downsampled_target)
# self.compute_loss(moved,target,field=field)
# stack_moved.append(moved)
# stack_field.append(field)
# stack_preint.append(preint_field)
# return torch.stack(stack_moved,0).mean(0),torch.stack(stack_field,0).mean(0),torch.stack(stack_preint,0).mean(0)
def compute_loss(self,moved=None,target=None,moved_mask=None,target_mask=None,field=None):
"""
Args:
moved : Transformed anatomical image
target : Target anatomical image
moved_mask : Transformed mask
target_mask : Target mask
field : Velocity field (=non integrated)
"""
losses={}
if moved!=None:
# max_peak=F.conv2d(target,target).sum()
# loss_ncc=-F.conv2d(moved,target).sum()/max_peak#+NCC().loss(moved,target)
# loss_ncc=NCC().loss(moved,target)
loss_ncc=GlobalMutualInformationLoss()(moved,target)*0.8 #MONAI
# loss_ncc=LocalNormalizedCrossCorrelationLoss(spatial_dims=2, kernel_size=99)(moved,target) #MONAI
# loss_ncc=nn.MSELoss()(moved,target)
losses['sim']=loss_ncc
if moved_mask!=None:
# loss_seg= Dice().loss(moved_mask,target_mask)
loss_seg=DiceLoss(include_background=False)(moved_mask,target_mask)-1
losses['seg']=loss_seg
if field!=None:
# loss_trans=BendingEnergyLoss()(field) #MONAI
loss_trans=Grad().loss(field,field)
losses['smooth']=loss_trans
#Return dict of losses
return losses#{'sim': loss_ncc,'seg':loss_seg,'smooth':loss_trans}
def compute_contour_loss(self,img,moved_mask):
#Compute contour loss
mag,mask_contour=canny(moved_mask[:,1:2])
# edges,mag=canny(img)
return BendingEnergyLoss()(mag)
def weighting_loss(self,losses):
"""
Args:
losses (dict): Dictionary of losses
Returns:
loss (Tensor): Weighted loss
"""
def blend(self,x,y):
#For visualization
x=self.norm(x)
blended=torch.stack([y,x,x])
return blended
def training_step(self, batch, batch_nb):
X,Y=batch # X : Full scan (1x1xLxHxW) | Y : Ground truth (1xCxLxHxW)
y_opt=self.optimizers()
dices_prop=[]
Y_multi_lab=torch.clone(Y)
for lab in list(range(Y_multi_lab.shape[1]))[1:]:
chunks=[]
chunk=[]
#Binarize ground truth according to the label
Y=torch.stack([1-Y_multi_lab[:,lab],Y_multi_lab[:,lab]],dim=1)
#Identifying chunks (i->j)
for i in range(X.shape[2]):
y=Y[:,:,i,...]
if len(torch.unique(torch.argmax(y,1)))>1:
chunk.append(i)
if len(chunk)==2:
chunks.append(chunk)
chunk=[i]
if self.current_epoch==0:
print(lab,chunks)
for chunk in chunks:
y_opt.zero_grad()
#Sequences of flow fields (field_up=forward, field_down=backward)
fields_up=[]
fields_down=[]
loss_up_sim=[]
loss_up_smooth=[]
loss_down_sim=[]
loss_down_smooth=[]
loss=0
losses={'sim':None,'seg':None,'comp':None,'smooth':None}
for i in range(chunk[0],chunk[1]):
#Computing flow fields and loss for each hop from chunk[0] to chunk[1]
x1=X[:,:,i,...]
x2=X[:,:,i+1,...]
if not self.way=='down':
moved_x1,field_up,preint_field=self.forward(x1,x2,registration=False)
cur_loss=self.compute_loss(moved_x1,x2,field=preint_field)
loss_up_sim.append(cur_loss['sim'])
loss_up_smooth.append(cur_loss['smooth'])
# field_down=self.registrator.integrate(-preint_field)
# moved_x2=self.registrator.transformer(x2,field_down)
# loss_up_sim.append(self.compute_loss(moved_x2,x1)['sim'])
fields_up.append(field_up)
# if len(fields_up)>0:
# field_up_2=self.compose_deformation(fields_up[-1],field_up)
# loss_up.append(self.compute_loss(self.apply_deform(X[:,:,i-1],field_up_2),x2))
if not self.way=='up':
moved_x2,field_down,preint_field=self.forward(x2,x1,registration=False)#
fields_down.append(field_down)
moved_x2=self.registrator.transformer(x2,field_down)
cur_loss=self.compute_loss(moved_x2,x1,field=preint_field)
loss_down_sim.append(cur_loss['sim'])
loss_down_smooth.append(cur_loss['smooth'])
# field_up=self.registrator.integrate(-preint_field)
# moved_x1=self.registrator.transformer(x1,field_up)
# loss_down_sim.append(self.compute_loss(moved_x1,x2)['sim'])
# if len(fields_down)>0:
# field_down_2=self.compose_deformation(fields_down[-1],field_down)
# loss_down.append(self.compute_loss(self.apply_deform(X[:,:,i+1],field_down_2),x1))
#Better with mean
if self.way=='up':
loss=torch.stack(loss_up).mean()
elif self.way=='down':
loss=torch.stack(loss_down).mean()
else:
losses['sim']=torch.stack(loss_up_sim).mean()+torch.stack(loss_down_sim).mean()
losses['smooth']=torch.stack(loss_up_smooth).mean()+torch.stack(loss_down_smooth).mean()
# loss=(loss_up+loss_down)
# Computing registration from the sequence of flow fields
if not self.way=='down':
prop_x_up=X[:,:,chunk[0],...]
prop_y_up=Y[:,:,chunk[0],...]
composed_fields_up=self.compose_list(fields_up)
if self.by_composition:
prop_x_up=self.apply_deform(prop_x_up,composed_fields_up)
prop_y_up=self.apply_deform(prop_y_up,composed_fields_up)
else:
for i,field_up in enumerate(fields_up):
prop_x_up=self.apply_deform(prop_x_up,field_up)
prop_y_up=self.apply_deform(prop_y_up,field_up)
losses['contours']=self.compute_contour_loss(X[:,:,chunk[0]+i+1],prop_y_up)
if self.losses['compo-reg-up']:
losses['comp']=self.compute_loss(prop_x_up,X[:,:,chunk[1],...])['sim']
if self.losses['compo-dice-up']:
dice_loss=self.compute_loss(moved_mask=prop_y_up,target_mask=Y[:,:,chunk[1],...])['seg']
losses['seg']=dice_loss
dices_prop.append(dice_loss)
if not self.way=='up':
prop_x_down=X[:,:,chunk[1],...]
prop_y_down=Y[:,:,chunk[1],...]
composed_fields_down=self.compose_list(fields_down[::-1])
if self.by_composition:
prop_x_down=self.apply_deform(prop_x_down,composed_fields_down)
prop_y_down=self.apply_deform(prop_y_down,composed_fields_down)
else:
i=1
for field_down in reversed(fields_down):
prop_x_down=self.apply_deform(prop_x_down,field_down)
prop_y_down=self.apply_deform(prop_y_down,field_down)
losses['contours']+=self.compute_contour_loss(X[:,:,chunk[1]-i],prop_y_down)
i+=1
if self.losses['compo-reg-down']:
losses['comp']+=self.compute_loss(prop_x_down,X[:,:,chunk[0],...])['sim']
if self.losses['compo-dice-down']:
dice_loss=self.compute_loss(moved_mask=prop_y_down,target_mask=Y[:,:,chunk[0],...])['seg']
losses['seg']+=dice_loss
dices_prop.append(dice_loss)
#Additionnal loss to ensure sequences (images and masks) generated from "positive" and "negative" flows are equal
# if self.way=='both':
# #This helps
# if self.losses['bidir-cons-dice']:
# loss+=self.compute_loss(moved_mask=prop_y_down,target_mask=prop_y_up)
# #This breaks stuff
# if self.losses['bidir-cons-reg']:
# loss+=self.compute_loss(prop_x_up,prop_x_down)
# loss+=nn.L1Loss()(self.apply_deform(X[:,:,chunk[0],...], self.compose_deformation(composed_fields_up,composed_fields_down)),X[:,:,chunk[0],...])
# loss+=nn.L1Loss()(self.apply_deform(X[:,:,chunk[1],...], self.compose_deformation(composed_fields_down,composed_fields_up)),X[:,:,chunk[1],...])
loss=losses['seg']+losses['sim']+losses['contours']#+losses['smooth']#torch.stack([v for v in losses.values()]).mean()
# loss=self.loss_model(losses)
self.log_dict({'loss':loss},prog_bar=True)
self.manual_backward(loss)
y_opt.step()
# self.logger.experiment.add_image('x_true',X[0,:,chunk[0],...])
# self.logger.experiment.add_image('prop_x_down',prop_x_down[0,:,0,...])
# self.logger.experiment.add_image('x_true_f',X[0,:,chunk[1],...])
# self.logger.experiment.add_image('prop_x_up',prop_x_up[0,:,-1,...])
if len(dices_prop)>0:
dices_prop=-torch.stack(dices_prop).mean()
self.log('val_accuracy',dices_prop)
print(dices_prop)
else:
self.log('val_accuracy',self.current_epoch)
return loss
def register_images(self,moving,target,moving_mask):
moved,field=self.forward(moving,target,registration=True)
return moved,self.apply_deform(moving_mask,field),field
def configure_optimizers(self):
return torch.optim.Adam(self.parameters(), lr=self.learning_rate, weight_decay=self.weight_decay,amsgrad=True)
def hardmax(self,Y,dim):
return torch.moveaxis(F.one_hot(torch.argmax(Y,dim),self.n_classes), -1, dim)
class MTL_loss(torch.nn.Module):
def __init__(self, losses):
super().__init__()
start=1.
self.lw={}
self.sigmas = nn.ParameterDict()
for k in losses:
self.lw[k]= start
self.set_dict(self.lw)
def set_dict(self, dic):
self.lw = dic
for k in dic.keys():
if dic[k] > 0:
self.sigmas[k] = nn.Parameter(torch.ones(1) * dic[k])
def forward(self, loss_dict):
loss = 0
with torch.set_grad_enabled(True):
for k in loss_dict.keys():
if k in self.lw.keys():
loss +=0.5 * loss_dict[k] / (self.sigmas[k])**2 + torch.log(self.sigmas[k])
return loss | 2.203125 | 2 |
bangpy-ops/ops/sum/sum.py | testouya/mlu-ops | 0 | 12790827 | <filename>bangpy-ops/ops/sum/sum.py<gh_stars>0
# Copyright (C) [2021] by Cambricon, Inc.
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
# IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
# CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
# TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
# SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
# pylint: disable=missing-docstring, invalid-name, too-many-locals
"""A multi-platform code link example test for BANGPy TCP."""
import numpy as np
import bangpy
from bangpy import tcp
from bangpy.common import utils, load_op_by_type
from bangpy.platform.bang_config import ALIGN_LENGTH, TARGET
from bangpy.tcp.runtime import TaskType
from bangpy.tcp.util import round_up, round_down
DTYPES = [bangpy.float32]
TARGET_LIST = ["mlu290"]
KERNEL_NAME = "exp"
class Exp(object):
"""Operator description:
Add the data in the two buffers.
"""
def __init__(self, dtype, target, task_num):
self.dtype = dtype
self.target = target
self.task_num = task_num
self.bp = tcp.TCP(target)
self.length = self.bp.SizeVar("length")
self.nram_size = TARGET(target).nram_size
self.dtype_sz = dtype.bytes
self.col_count = self.bp.Var("col_count")
self.row_count = self.bp.Var("row_count")
self.bp.launch_task(self.task_num, 1, 1)
# buffer 源数据
# start_index 起始索引
# end_index 结束索引
# 计算一维数组 start_index 至 end_index 范围内的和 并将结果写在start_index除
def one_dimensional_sum(self,buffer,start_index,end_index):
data_length = self.bp.Scalar(bangpy.int32,"data_length",end_index - start_index +1 )#传进来得数据长度
count_for_128_align =self.bp.Scalar(bangpy.int32,"count_for_128_align",128 // self.dtype_sz)#128字节是几个占几个索引
remain = self.bp.Scalar(bangpy.int32,"remain",data_length % count_for_128_align)#128对齐后 不足对齐得数据个数
current_end_index = self.bp.Scalar(bangpy.int32,"current_end_index",end_index - remain +1)#刨去不足后 剩余可以对齐长度得末尾索引 +1是因为python数组切片语法[a:b]会对b自动-1 这里图省事就直接加上
#将末尾不能对齐的部分循环加到第一个元素上
with self.bp.if_scope(remain != 0):
with self.bp.if_scope(current_end_index != 0):
with self.bp.for_range(0,remain) as i:
buffer[start_index] = buffer[start_index] + buffer[current_end_index + i]
with self.bp.else_scope():
with self.bp.for_range(0,remain -1) as j:
buffer[start_index] = buffer[start_index] + buffer[current_end_index + j +1]
data_length.assign(data_length - remain)#刨除不足部分 重新定义数据长度
#当数据长度不足一次对齐时 不进行下面
#当满足一次对齐时 对其直接进行sum
#1.每行128字节
#2.算出多少行
#3.reshape (行,128字节数据个数)
#3.对其sumpool 因为之后每行第一个元素是需要的 所以最终结果直接在buffer[start_index]上
with self.bp.if_scope(data_length>=count_for_128_align):
self.bp.print(buffer[0:64])
self.bp.sum(buffer[start_index:current_end_index],buffer[start_index:current_end_index])
#self.bp.print("sumpool->",buffer[start_index])
row = self.bp.Scalar(bangpy.int32,"row",data_length/count_for_128_align)
reshape_buffer = buffer[start_index:current_end_index].reshape([row,count_for_128_align])
self.bp.sumpool(reshape_buffer,reshape_buffer,(row,),(1,))
# self.bp.print("sumpool->",buffer[start_index])
def two_dimension_row_sum(self,buffer,row_count,col_count):#按行 计算二维数组每行的和 结果放在每行首位
with self.bp.for_range(0,row_count) as i:
self.one_dimensional_sum(buffer[i][:],0,col_count-1)
#buffer 源数据
#temp_buffer 与buffer所占内存空间大小相等的nram存储
#row_count 行数
#col_count 列数
#该函数将计算传入的行数 0 - row_count 列数0 - col_count的这个矩形范围内每列的和 并将结果写在源数据的首行
def two_dimension_col_sum(self,buffer,temp_buffer,row_count,col_count):
count_for_128_align =self.bp.Scalar(bangpy.int32,"count_for_128_align",128 // self.dtype_sz) # 当前数据类型下 128个字节 对应了多少个元素
col_remain = self.bp.Scalar(bangpy.int32,"col_remain",col_count % count_for_128_align)
current_col_count = self.bp.Scalar(bangpy.int32,"current_col_count",col_count - col_remain)
with self.bp.if_scope(col_remain != 0):
with self.bp.for_range(0,col_remain) as i:
current_col_index = self.bp.Scalar(bangpy.int32,"current_col_index",col_count - i -1)
with self.bp.for_range(0,row_count - 1) as j:
buffer[0][current_col_index] = buffer[0][current_col_index] + buffer[j + 1][current_col_index]
with self.bp.if_scope(col_count >= count_for_128_align):
reshape_buffer = temp_buffer.reshape([row_count,current_col_count])
#self.bp.print("data_before_calc->",buffer[0])
self.bp.memcpy(reshape_buffer[:,:],buffer[:,0:current_col_count])
self.bp.sumpool(reshape_buffer,reshape_buffer,(row_count,),(1,))
#self.bp.print("temp_after_calc->",reshape_buffer[0])
self.bp.memcpy(buffer[0][0:current_col_count],reshape_buffer[0][0:current_col_count])
#self.bp.print("data_res->",buffer[0])
def compute_body(self):
one_core_count = self.bp.Scalar(bangpy.int32,"one_core_count")
remain = self.bp.Scalar(bangpy.int32,"remain")
current_core_start = self.bp.Scalar(bangpy.int32,"current_core_start") #当前核心数据开始索引
current_core_end = self.bp.Scalar(bangpy.int32,"current_core_end") #当前核心数据结束索引
total_count_in_core = self.bp.Scalar(bangpy.int32,"total_count_in_core")
calc_loop_count = self.bp.Scalar(bangpy.int32,"calc_loop_count")
once_loop_start = self.bp.Scalar(bangpy.int32,"once_loop_start")
calc_size = self.bp.Scalar(bangpy.int32,"calc_size")
nram_avable_size = round_down( (TARGET(self.target).nram_size - 30* 1024) // 2 ,128)#self.bp.Scalar(bangpy.int32,"nram_avable_size")
one_core_count.assign(self.length // self.task_num)#每个核均摊计算量(按索引分)
remain.assign(self.length % self.task_num)#分任务时的余数
process_count = nram_avable_size // self.dtype_sz #核心一次最多计算的长度
with self.bp.if_scope(self.bp.taskId < remain): #如果存在余数 将其均摊给各核 taskId从0起
current_core_start.assign((one_core_count + 1) * self.bp.taskId )
current_core_end.assign((one_core_count + 1) * (self.bp.taskId + 1) - 1) #此处应该不需要减1 待验证 python切片会自动将上标减1
with self.bp.else_scope():
current_core_start.assign((one_core_count + 1) * remain + one_core_count * (self.bp.taskId - remain))
current_core_end.assign((one_core_count + 1) * remain + one_core_count * (self.bp.taskId - remain) + one_core_count - 1)
total_count_in_core.assign(current_core_end - current_core_start + 1)
# buffer_in0 = self.bp.Buffer(
# shape=(self.length,), name="INPUT0", dtype=self.dtype, scope="global"
# )
buffer_in0 = self.bp.Buffer(
shape=(self.length,), name="INPUT0", dtype=self.dtype, scope="global"
)
buffer_out = self.bp.Buffer(
shape=(self.length,), name="OUTPUT", dtype=self.dtype, scope="global"
)
nram_buffer_in0 = self.bp.Buffer(
shape=(process_count,),
name="GALA_IN",
dtype=self.dtype,
scope="nram",
)
test_buffer = self.bp.Buffer(
shape=(process_count,),
name="test_buffer",
dtype=self.dtype,
scope="nram",
)
calc_loop_count.assign((total_count_in_core + process_count - 1) // process_count)
with self.bp.for_range(0, calc_loop_count) as i:
once_loop_start.assign(current_core_start + process_count * i) #当前核心数据开始的位置 + 第i次循环所应偏移的长度
with self.bp.if_scope(i < calc_loop_count - 1):
calc_size.assign(process_count)
with self.bp.else_scope():
calc_size.assign(total_count_in_core % process_count)
with self.bp.block("data_copy"):
self.bp.memcpy(nram_buffer_in0[0:calc_size], buffer_in0[once_loop_start:once_loop_start + calc_size])
self.bp.print("calc_size-->",calc_size)
#self.one_dimensional_sum(nram_buffer_in0,0,calc_size -1)
row_count = self.bp.Scalar(dtype = bangpy.int32,name = "row_count",value = self.row_count)
col_count = self.bp.Scalar(dtype = bangpy.int32,name = "col_count",value = self.col_count)
reshape_buffer = nram_buffer_in0[0:calc_size].reshape([row_count,col_count])# (33,33)
#二维数组按列求和
#此处需注意 第二个buffer参数上标索引越界的问题 此处只是展示 并未进行处理 实际使用时应以每次传入函数数据的实际尺寸计算
self.two_dimension_col_sum(reshape_buffer,test_buffer[0:row_count*col_count],row_count,col_count)
self.bp.memcpy(buffer_out[once_loop_start:once_loop_start + calc_size], nram_buffer_in0[:calc_size])
# build a executable module
f = self.bp.BuildBANG(
inputs=[buffer_in0,self.row_count,self.col_count,],
outputs=[buffer_out],
kernel_name=KERNEL_NAME,
)
return f
@tcp.register_mlu_op(DTYPES, TARGET_LIST, KERNEL_NAME)
def build_exp(dtype=None, target=None):
# tasktype fixed in UNION1
task_num = 1 #由4 改为64
f = Exp(dtype, target, task_num).compute_body()
return f
| 1.523438 | 2 |
crank/net/module/mlfb.py | abeersaqib/crank | 162 | 12790955 | <reponame>abeersaqib/crank<filename>crank/net/module/mlfb.py
#! /usr/bin/env python
# -*- coding: utf-8 -*-
# vim:fenc=utf-8
#
# Copyright (c) 2021 <NAME> <<EMAIL>>
#
# Distributed under terms of the MIT license.
"""
"""
import librosa
import scipy.signal
import torch
import torch.nn as nn
class MLFBLayer(torch.nn.Module):
def __init__(
self, fs=22050, fft_size=1024, n_mels=80, fmin=None, fmax=None, eps=1.0e-10
):
super().__init__()
fmin = 0 if fmin is None else fmin
fmax = fs / 2 if fmax is None else fmax
mel_basis = librosa.filters.mel(
sr=fs,
n_fft=fft_size,
n_mels=n_mels,
fmin=fmin,
fmax=fmax,
)
self.eps = eps
self.register_buffer("mel_basis", torch.from_numpy(mel_basis.T).float())
def forward(
self,
x,
):
mlfb = torch.matmul(x, self.mel_basis)
mlfb = torch.clamp(mlfb, min=self.eps).log10()
return mlfb
class STFTLayer(torch.nn.Module):
def __init__(
self,
fs=22050,
hop_size=256,
fft_size=1024,
win_length=None,
window="hann",
center=True,
pad_mode="reflect",
return_complex=False,
):
super().__init__()
self.hop_size = hop_size
self.fft_size = fft_size
self.win_length = fft_size if win_length is None else win_length
self.center = center
self.pad_mode = pad_mode
self.return_complex = return_complex
"""
prepare window parameter type of window
- "hann": hanning window
- "param": parameter-based window
- "conv": convolution-based window
"""
self.window_type = window
if window == "param":
win = scipy.signal.get_window("hann", self.win_length).astype(float)
self.register_parameter(
"window", nn.Parameter(torch.from_numpy(win), requires_grad=True)
)
elif window == "conv":
kernel_size = 65
self.window_conv = nn.Sequential(
nn.Conv1d(
in_channels=1,
out_channels=24,
kernel_size=kernel_size,
stride=1,
padding=(kernel_size - 1) // 2,
),
nn.Sigmoid(),
)
else:
self.window = window
def forward(self, x):
if self.window_type == "param":
window = self.window
elif self.window_type == "conv":
x = x.unsqueeze(-1).transpose(1, 2)
x = torch.mean(self.window_conv(x).transpose(1, 2), -1)
window = None
else:
f = getattr(torch, f"{self.window}_window")
window = f(self.win_length, dtype=x.dtype, device=x.device)
stft = torch.stft(
x,
n_fft=self.fft_size,
win_length=self.win_length,
hop_length=self.hop_size,
window=window,
center=self.center,
pad_mode=self.pad_mode,
return_complex=self.return_complex,
)
return stft.transpose(1, 2).float()
class MLFBScalerLayer(nn.Module):
def __init__(self, scaler):
super().__init__()
self.register_parameter(
"mean",
nn.Parameter(torch.from_numpy(scaler.mean_).float(), requires_grad=False),
)
self.register_parameter(
"std",
nn.Parameter(
torch.from_numpy(scaler.var_).float().sqrt(), requires_grad=False
),
)
def forward(self, x):
return (x - self.mean) / self.std
class LogMelFilterBankLayer(nn.Module):
def __init__(
self,
fs=22050,
hop_size=256,
fft_size=1024,
win_length=None,
window="hann",
center=True,
pad_mode="reflect",
n_mels=80,
fmin=None,
fmax=None,
scaler=None,
):
super().__init__()
self.stft_layer = STFTLayer(
fs,
hop_size,
fft_size,
win_length,
window,
center=center,
pad_mode=pad_mode,
)
self.mlfb_layer = MLFBLayer(fs, fft_size, n_mels, fmin, fmax)
if scaler is not None:
self.scaler_layer = MLFBScalerLayer(scaler)
else:
self.scaler_layer = None
def forward(self, x):
stft = self.stft_layer(x)
amplitude = torch.sqrt(stft[..., 0] ** 2 + stft[..., 1] ** 2)
mlfb = self.mlfb_layer(amplitude)
if self.scaler_layer is not None:
mlfb = self.scaler_layer(mlfb)
return mlfb
| 1.578125 | 2 |
workflow_main/scripts/read_extractor_lite.py | a13xk13m/covidcg | 0 | 12791083 | <reponame>a13xk13m/covidcg
# coding: utf-8
"""Extract variable regions from an aligned segment, in a flexible
and SNP-tolerant manner
Modified and heavily trimmed down version of read_extractor.py (v0.1.0)
from the variant_extractor project
Author: <NAME> - Vector Engineering Team (<EMAIL>)
"""
import numpy as np
import pandas as pd
from collections import defaultdict
from scripts.util import translate, reverse_complement
class ReadExtractor:
"""Extract variable regions from a pysam AlignedSegment
"""
RefSeq = ""
def __init__(self, read):
"""Build the extactor object for a read (pysam.AlignedSegment)
or a pair of reads if using paired-end sequencing
Parameters
----------
read: pysam.AlignedSegment
"""
self.read = read
# Build our own mutation string to store mutational information
# Since both the CIGAR and MD string don't fit our needs
# Format: Position:Ref:Alt;...
# Where position is relative to the reference (0-indexed)
# For insertions, the position is the position on the reference
# after the insertion
# For deletions, the position is the position on the reference
# that was deleted
# Store it as a list of tuples, (Position, Ref, Alt) for now.
# Mutations will be individually serialized then joined by ';' later
# to serialize into one big string
self.mutation_str = []
# Any invalidation errors that flag this variant as successfully extracted,
# but not passing filters, will be stored in this array
# Later when writing to disk we'll serialize this array as a semicolon-delimited string
self.invalid_errors = []
# Store SNPs
self.dna_snps = []
# Read data from the pysam.AlignedSegment object into python variables
self.load_read()
def load_read(self):
"""Load data in from the pysam.AlignedSegment object into Python
"""
# Nucleotide sequence of the read
self.read_seq = self.read.get_forward_sequence()
# If reverse complement, flip the sequence and the quality scores
if self.read.is_reverse:
self.read_seq = reverse_complement(self.read_seq)
# Don't try to do anything else if this read is unmapped
if self.read.is_unmapped:
return
# Get the reference sequence
self.reference_seq = ReadExtractor.RefSeq
"""Expand CIGAR tuples to a list of CIGAR operations on the read (query)
https://pysam.readthedocs.io/en/latest/api.html#pysam.AlignedSegment.cigartuples
https://drive5.com/usearch/manual/cigar.html
https://samtools.github.io/hts-specs/SAMv1.pdf
Op Code Description
-----------------------------------------------------------------------------------------
M BAM_CMATCH 0 Match (alignment column containing two letters). This could
contain two different letters (mismatch) or two identical
letters. USEARCH generates CIGAR strings containing Ms rather
than X's and ='s (see below).
I BAM_CINS 1 Insertion (gap in the query sequence).
D BAM_CDEL 2 Deletion (gap in the target sequence).
N BAM_CREF_SKIP 3 skipped region from the reference
S BAM_CSOFT_CLIP 4 Segment of the query sequence that does not appear in the
alignment. This is used with soft clipping, where the
full-length query sequence is given (field 10 in the SAM record)
. In this case, S operations specify segments at the start and/
or end of the query that do not appear in a local alignment.
H BAM_CHARD_CLIP 5 Segment of the query sequence that does not appear in the
alignment. This is used with hard clipping, where only the
aligned segment of the query sequences is given (field 10 in
the SAM record). In this case, H operations specify segments at
the start and/or end of the query that do not appear in the SAM
record.
P BAM_CPAD 6 padding (silent deletion from padded reference)
= BAM_CEQUAL 7 Alignment column containing two identical letters. USEARCH can
read CIGAR strings using this operation, but does not generate
them.
X BAM_CDIFF 8 Alignment column containing a mismatch, i.e. two different
letters. USEARCH can read CIGAR strings using this operation,
but does not generate them.
B BAM_CBACK 9
"""
self.cigar_ops = []
for op_group in self.read.cigartuples:
# First element of the tuple is the operation code
# Second element of the tuple is the number of operations
# Create a new list [# of operations] long and add it to the
# master operations list
self.cigar_ops.extend([op_group[0],] * op_group[1])
# Reset the cigar index
self.cigar_i = 0
# Start the reference at the position it is mapped onto the read
# using read.reference_start
self.ref_i = self.read.reference_start
# Start the read at the position it is mapped onto the reference
# using read.query_alignment_start
self.read_i = 0
def crawl_to(self, destination):
"""Iterate (consume bases) through both the read and the reference
Use the CIGAR operations and other stats to stay on the same
"aligned" base (as if we did a multiple sequence alignment on the read and ref)
Parameters
----------
destination: int
- Index on the reference of where we want to crawl to
"""
while self.ref_i < destination:
# If we've reached the end of the CIGAR string, break out
if self.cigar_i >= len(self.cigar_ops) or self.read_i >= len(self.read_seq):
return
# Grab the current CIGAR operation
op = self.cigar_ops[self.cigar_i]
"""
https://samtools.github.io/hts-specs/SAMv1.pdf
---------------------------------------------------
| Op | Code | Consume Read | Consume Reference |
---------------------------------------------------
| M | 0 | Yes | Yes |
| I | 1 | Yes | No |
| D | 2 | No | Yes |
| N | 3 | No | Yes |
| S | 4 | Yes | No |
| H | 5 | No | No |
| P | 6 | No | No |
| = | 7 | Yes | Yes |
| X | 8 | Yes | Yes |
| B | 9 | ? | ? |
---------------------------------------------------
"""
# MATCH - can be match or mismatch (SNP)
if op == 0 or op == 7 or op == 8:
# Check for SNPs
# If the OP code is 0, then we have to check both the read
# and the reference to see if there's a mismatch
# If bowtie2 gave us the OP code of 8, then we know there's a mismatch
if (
# Check for a mismatch OP code or a base mismatch for a
# generic 0 OP code
(
(op == 8)
or (
op == 0
and self.read_seq[self.read_i]
!= self.reference_seq[self.ref_i]
)
)
and
# If the reference has an X as the base, then
# ignore any SNPs at this position
(self.reference_seq[self.ref_i] != "X")
):
# Add substitution information to mutation string
self.mutation_str.append(
(
self.read.query_name,
self.ref_i,
self.reference_seq[self.ref_i],
self.read_seq[self.read_i],
)
)
self.read_i += 1
self.ref_i += 1
# Insertion or Soft Clip
elif op == 1 or op == 4:
# Add insertion information to mutation string
self.mutation_str.append(
(self.read.query_name, self.ref_i, "", self.read_seq[self.read_i])
)
self.read_i += 1
# Deletion or Skip
elif op == 2 or op == 3:
# Add deletion information to mutation string
self.mutation_str.append(
(
self.read.query_name,
self.ref_i,
self.reference_seq[self.ref_i],
"",
)
)
self.ref_i += 1
# Hard Clip, Padding
else:
# Do nothing
pass
# Always iterate the CIGAR index
self.cigar_i += 1
# END WHILE
def get_dna_snps(self):
"""Store list of NT SNPs/indels"""
# Join adjacent indels
self.dna_snps = []
i = 0
while i < len(self.mutation_str):
(query_name, pos, ref, alt) = self.mutation_str[i]
# mut is a tuple: (Position, Ref, Alt)
# Offset the position back to 1-indexed, starting at the genome start
pos = pos + 1
# If it's a SNP, then add and continue
if ref and alt:
i += 1
# Actually, skip adding it if either the ref or the alt
# is an ambiguous base (N)
# This is useless data bloat and should be removed as
# early as possible
if alt not in ["A", "C", "G", "T"]:
continue
self.dna_snps.append((query_name, pos, ref, alt))
continue
# Check ahead for adjacent positions and the same indel type
j = i
while j < len(self.mutation_str) and (
# Both insertions
(
(not self.mutation_str[j][2] and not ref)
# Both deletions
or (not self.mutation_str[j][3] and not alt)
)
# New position must be adjacent to the previous one
and self.mutation_str[j][1] == int(pos - 1 + (j - i))
):
j += 1
# Get adjacent indels
adj_muts = self.mutation_str[i:j]
# Combine bases, but keep first position and type
self.dna_snps.append(
(
query_name,
pos,
"".join([m[2] for m in adj_muts]),
"".join([m[3] for m in adj_muts]),
)
)
# Skip ahead to the end of the adjacent mutations
i = j
def process_all(self):
"""Do everything, return everything"""
# Travel to the end of the read
# so that we can collect additional mutations (if they exist)
# Don't throw an error once we reach the end
self.crawl_to(len(self.reference_seq))
self.get_dna_snps()
return self.dna_snps
| 2.1875 | 2 |
app/views/admin/index.py | mrakzero/FlaskCMS | 1 | 12791211 | from flask import render_template
from app.views.admin import bp_admin
@bp_admin.route('/')
def index():
return render_template('admin/index.html')
@bp_admin.route('/dashboard')
def dashboard():
return render_template('admin/dashboard.html')
| 0.972656 | 1 |
app/main/routes.py | mrtoronto/find-a-lab | 0 | 12791339 | <reponame>mrtoronto/find-a-lab<filename>app/main/routes.py
from app import db
from app.main.forms import LoginForm, RegistrationForm, EditProfileForm, \
ResetPasswordRequestForm, ResetPasswordForm, authorIndexQueryForm
from app.models import User, Result
from app.email import send_password_reset_email
from app.main import bp
from config import Config
from app.main_api_functions import *
from rq.job import Job
from datetime import datetime, timezone
from flask import render_template, flash, redirect, url_for, request, jsonify,current_app
from flask_login import login_user, logout_user, current_user, login_required
from config import Config
from werkzeug.urls import url_parse
import itertools
import re
import ast
import datetime
import pandas as pd
from collections import Counter
from geotext import GeoText
import time
@bp.before_request
def before_request():
if current_user.is_authenticated:
current_user.last_seen = datetime.datetime.now(timezone.utc)
db.session.commit()
@bp.route('/', methods=['GET', 'POST'])
@bp.route('/index', methods=['GET', 'POST'])
def index():
return render_template('index.html')
@bp.route('/user/<username>')
@login_required
def user(username):
user = User.query.filter_by(username=username).first_or_404()
return render_template('user.html', user=user)
@bp.route('/edit_profile', methods=['GET', 'POST'])
@login_required
def edit_profile():
form = EditProfileForm(current_user.username)
if form.validate_on_submit():
current_user.username = form.username.data
current_user.about_me = form.about_me.data
db.session.commit()
flash('Your changes have been saved.')
return redirect(url_for('main.edit_profile'))
elif request.method == 'GET':
form.username.data = current_user.username
form.about_me.data = current_user.about_me
return render_template('edit_profile.html', title='Edit Profile',
form=form)
def run_query(query_type, query_text, \
from_year, locations, affils, api_key, \
querying_user):
"""
Query data is returned in a nested dictionary and assigned to `obj_dicts` which is stored in the db.
"""
### Import create_app because this function is run by the worker
from app import create_app
from app.models import Result
app = create_app()
app.app_context().push()
if query_type == 'author_papers':
obj_dicts = query_author_papers(query = query_text,
from_year = from_year,
locations = locations,
n_authors = 25,
affils = affils,
api_key = api_key,
api_out = False)
elif query_type == 'affil_papers':
obj_dicts = query_affil_papers(query = query_text,
from_year = from_year,
locations = locations,
n_authors = 25,
affils = affils,
api_key = api_key,
api_out = False)
result = Result(
query_type = query_type,
query_text = query_text,
query_from = from_year,
query_affiliations = affils,
query_locations= locations,
user_querying = querying_user,
length_of_results = len(obj_dicts.keys()),
result_all=obj_dicts
)
db.session.add(result)
db.session.commit()
return result.id
@bp.route('/query/<query_type>', methods=['GET', 'POST'])
@login_required
def make_a_query(query_type):
"""
"""
if query_type == 'author_papers':
form = authorIndexQueryForm()
elif query_type == 'affil_papers':
form = authorIndexQueryForm()
if form.validate_on_submit():
if current_app.config['ASYNC_FUNC']:
from app.main.routes import run_query
### If async == True, queue a task with the args from the form
job = current_app.task_queue.enqueue_call(
func=run_query,
args=(query_type,
form.query_text.data, form.query_from.data,
form.locations.data, form.affiliations.data,
form.api_key.data, current_user.username),
result_ttl=current_app.config['RESULT_TTL'],
timeout=current_app.config['WORKER_TIMEOUT'])
flash(f'Your query is running! Your ID is : {job.get_id()}')
return get_results(job.get_id())
elif not current_app.config['ASYNC_FUNC']:
### Run the query without task queue if async == False
if query_type == 'affil_papers':
affil_dicts = query_affil_papers(query = form.query_text.data,
from_year = form.query_from.data,
locations = form.locations.data,
n_authors = 25,
affils = form.affiliations.data,
api_key = form.api_key.data,
api_out = False)
n_results = sum([affil_dict['total_count'] for affil_dict in \
affil_dicts.values()])
length_of_results = len(affil_dicts.keys())
return render_template('query_results/affil_papers.html', \
data = affil_dicts, n_results = n_results, unique_results = length_of_results), 200
elif query_type == 'author_papers':
author_dicts = query_author_papers(query = form.query_text.data,
from_year = form.query_from.data,
locations = form.locations.data,
n_authors = 25,
affils = form.affiliations.data,
api_key = form.api_key.data,
api_out = False)
n_results = sum([author_dict.get('total_count', 0) for author_dict in \
author_dicts.values()])
length_of_results = len(author_dicts.keys())
return render_template('query_results/author_papers.html', \
data = author_dicts, n_results = n_results, unique_results = length_of_results), 200
return render_template('make_a_query.html', form=form)
@bp.route("/results/<job_key>", methods=['GET'])
def get_results(job_key):
"""
Results page for <job_key>. If job is still running, this will redirect to the same page with the link to refresh again. When its done,
the refresh link will link to the tables.
"""
job = Job.fetch(job_key, connection=current_app.redis)
### Return results
if job.is_finished and job.result:
result = Result.query.filter_by(id=job.result).first()
if result.result_all.get('error'):
return render_template('errors/data_error.html', data = result.result_all.get('error'),
query_text = result.query_text, query_from = result.query_from ,
query_location = result.query_locations, query_affiliations = result.query_affiliations)
n_results = sum([author_dict.get('total_count', 0) for author_dict in \
result.result_all.values()])
### Return different pages for different queries
if result.query_type == 'affil_papers':
return render_template('query_results/affil_papers.html', \
data = result.result_all, n_results = n_results, unique_results = result.length_of_results), 200
elif result.query_type == 'author_papers':
return render_template('query_results/author_papers.html', \
data = result.result_all, n_results = n_results, unique_results = result.length_of_results), 200
### Refresh if job is still processing
else:
return render_template('query_results/processing.html', job_key = job_key), 202
#######
@bp.route('/api/help/', methods = ['GET'])
def help():
return {'endpoints' : {'/api/query/author_affils/' : {'parameters' :
{'query' : '', 'from' : '', 'locations' : '', 'n' : ''}, 'info' : ''},
'/api/query/author_papers/' : {'parameters' :
{'query' : '', 'from' : '', 'locations' : '', 'n' : ''}, 'info' : ''}
},
'general_notes' : '<NAME>'}
@bp.route('/api/query/author_papers/', methods = ['GET'])
def query_author_papers(query = "", from_year = "",
locations = "", n_authors = "",
affils = "", api_key = "", api_out = True):
timeit_start = time.time()
"""if request.args.get('query'):
query = request.args.get('query')
if request.args.get('from'):
from_year = int(request.args.get('from', 2000))
if request.args.get('locations'):
locations = request.args.get('locations', [])
if request.args.get('n', 25):
n_authors = request.args.get('n', 25)
if request.args.get('affiliations', []):
affils = request.args.get('affiliations', [])
if request.args.get('api_key'):
api_key = request.args.get('api_key')
if request.args.get('api_out'):
api_out = request.args.get('api_out')"""
if locations:
locations = [location.strip().lower() for location in locations.split(',')]
if affils:
affils = [affil.strip().lower() for affil in affils.split(',')]
if not api_key:
no_key_dict = {'error' : 'Please supply an API key to run your query under!'}
if api_out == True:
return jsonify(no_key_dict)
else:
return no_key_dict
out_dict = query_author_papers_data(query, from_year, locations, affils, n_authors, timeit_start, api_key)
timeit_end = time.time()
print(f'`query_author_papers` for "{query}" from {from_year} onward ran in {round(timeit_end - timeit_start,4)} seconds. Returning results.')
if api_out == True:
return jsonify(out_dict)
else:
return out_dict
@bp.route('/api/query/affil_papers/', methods = ['GET'])
def query_affil_papers(query = "",
from_year = "",
locations = "",
n_authors = "",
affils = "",
api_key = "",
api_out = True):
timeit_start = time.time()
#if request.args.get('query'):
# query = request.args.get('query')
#if request.args.get('from'):
# from_year = int(request.args.get('from', 2000))
#if request.args.get('locations'):
# locations = request.args.get('locations', [])
##if request.args.get('n', 25):
# n_authors = request.args.get('n', 25)
#if request.args.get('affiliations', []):
# affils = request.args.get('affiliations', [])
#if request.args.get('api_key'):
# api_key = request.args.get('api_key')
if locations:
locations = [location.strip().lower() for location in locations.split(',')]
if affils:
affils = [affil.strip().lower() for affil in affils.split(',')]
if not api_key:
no_key_dict = {'error' : 'Please supply an API key to run your query under!'}
if api_out == True:
return jsonify(no_key_dict)
else:
return no_key_dict
out_dict = query_affil_papers_data(query, from_year, locations, affils, n_authors, timeit_start, api_key)
timeit_end = time.time()
#print(f'`author_papers_w_location` for "{query}" from {from_year} onward ran in {round(timeit_end - timeit_start,4)} seconds. Returning results.')
print(f'`query_affil_papers` for "{query}" from {from_year} onward ran in {round(timeit_end - timeit_start,4)} seconds. Returning results.')
if api_out == True:
return jsonify(out_dict)
else:
return out_dict | 1.460938 | 1 |
finetune.py | kbehouse/vgg-face-keras | 3 | 12791467 | from keras.engine import Model
from keras.layers import Flatten, Dense, Input
from keras import optimizers
from keras.preprocessing.image import ImageDataGenerator
from vggface import VGGFace
from sklearn.metrics import log_loss
from one_face_predict import prdict_one_face
from load_face_data import load_face_data
from facetool import FaceTool
def train_face_model(finetune = True):
#===============custom parameters =============== #
hidden_dim = 512
img_width, img_height = 224, 224
nb_class = 16
One_Class_Train_MAX = 30
One_Class_Valid_MAX = 10
nb_train_samples = nb_class * One_Class_Train_MAX
nb_validation_samples = nb_class * One_Class_Valid_MAX
nb_epoch = 10
batch_size = 16
train_data_dir = 'data/train'
validation_data_dir = 'data/validation'
save_model_path = './faceDB/face-model.json'
save_model_h5 = './faceDB/face-model.h5'
save_face_index = './faceDB/face-index.json'
# =============== NN =============== #
vgg_model = VGGFace(include_top=False, input_shape=(224, 224, 3))
# print('----------------After Add finetune layers----------------')
# for l in vgg_model.layers:
# print('Name ', l.name, 'trainable' ,l.trainable)
last_layer = vgg_model.get_layer('pool5').output
x = Flatten(name='flatten')(last_layer)
x = Dense(hidden_dim, activation='relu', name='fc6')(x)
x = Dense(hidden_dim, activation='relu', name='fc7')(x)
out = Dense(nb_class, activation='softmax', name='fc8')(x)
custom_vgg_model = Model(vgg_model.input, out)
if finetune:
# print('----------------After Disable Trainable----------------')
all_layers = custom_vgg_model.layers
pool5_index = custom_vgg_model.layers.index(custom_vgg_model.get_layer('pool5'))
for ind, l in enumerate(all_layers):
if ind <= pool5_index:
l.trainable = False
# all_layers[:pool5_index].trainable = False
# for ind, l in enumerate(all_layers):
# print('Name ', l.name, 'trainable' ,l.trainable,'index',ind)
# Train your model as usual.
# You can Try different optimizers
# opt = optimizers.SGD(lr=1e-5, decay=1e-6) #OK
# adagrad = optimizers.Adagrad( decay=1e-6)
# opt = optimizers.Adadelta( )
opt = optimizers.Adam(lr=1e-5, decay=1e-6)
custom_vgg_model.compile(optimizer=opt, loss='categorical_crossentropy', metrics=['accuracy'])
custom_vgg_model.summary()
X_train, Y_train, X_valid, Y_valid, Face_Label_Dic = load_face_data('data/')
ftool = FaceTool()
ftool.write_json(save_face_index,Face_Label_Dic)
# Start Fine-tuning
custom_vgg_model.fit(X_train, Y_train,
batch_size=batch_size,
nb_epoch=nb_epoch,
shuffle=True,
verbose=1,
validation_data=(X_valid, Y_valid),
)
# Make predictions
predictions_valid = custom_vgg_model.predict(X_valid, batch_size=batch_size, verbose=1)
# Cross-entropy loss score
score = log_loss(Y_valid, predictions_valid)
# ===============Save Model===============
print("Saved model to disk")
model_json = custom_vgg_model.to_json()
with open(save_model_path, "w") as json_file:
json_file.write(model_json)
# serialize weights to HDF5
custom_vgg_model.save_weights(save_model_h5)
# ===============Test===============
face_index = prdict_one_face(custom_vgg_model, 'data/test/1.jpg')
print Face_Label_Dic[face_index]
face_index = prdict_one_face(custom_vgg_model, 'data/test/2.jpg')
print Face_Label_Dic[face_index]
face_index = prdict_one_face(custom_vgg_model, 'data/test/3.jpg')
print Face_Label_Dic[face_index]
if __name__ == '__main__':
train_face_model(False) | 1.867188 | 2 |
api-intent/app.py | ClimenteA/Python-Chalice-AWSLambda-APIGateway | 1 | 12791595 | <gh_stars>1-10
import requests
from chalice import Chalice
from chalicelib import (
productSchemaIsValid,
cleanProductData,
uploadFromMediaUrls,
saveProductData,
getProductDataByID,
deleteProductDataByID
)
app = Chalice(app_name='api-intent')
# All commented lines are for debugging
# app.debug = False
# http localhost:8000
@app.route('/')
def index():
return {'hello': 'world!'}
# http POST localhost:8000/products
# http POST localhost:8000/products URL="https://bad-url.nogood"
# http POST localhost:8000/products URL="https://raw.githubusercontent.com/ClimenteA/Python-Chalice-AWSLambda-APIGateway/main/retailer-data.json"
# image_list = [
# "https://softgata.com/assets/django.png",
# "https://softgata.com/assets/fastapi.svg",
# "https://softgata.com/assets/svelte.svg"
# ]
@app.route('/products', methods=['POST'])
def saveProduct():
payload = app.current_request.json_body
if not payload:
return { "message": "URL not found!", "productData": None }
if 'URL' in payload:
try:
product_data = requests.get(payload["URL"]).json()
except:
return { "message": "Invalid URL!", "productData": None }
#product_data = {"bad": "productData"}
if not productSchemaIsValid(product_data):
return { "message": "Invalid product schema!", "productData": None }
product_data = cleanProductData(product_data)
product_data["media"]["uploadedImages"] = uploadFromMediaUrls(product_data["media"]["images"])
#product_data["media"]["uploadedImages"] = uploadFromMediaUrls(image_list)
try:
saveProductData(product_data)
except Exception as e:
return { "message": str(e), "productData": None }
return { "message": "Success!", "productData": product_data }
# http localhost:8000/products/42
@app.route('/products/{productId}', methods=['GET'])
def getProduct(productId):
try:
product_data = getProductDataByID(productId)
if not product_data: return {"message": "Product not found!"}
return { "message": "Success!", "productData": product_data }
except:#if not int
return {"message": "Missing ID!", "productData": None}
# http localhost:8000/products/
@app.route('/products', methods=['GET'])
def failProduct():
return {"message": "Missing ID!", "productData": None}
# http localhost:8000/products/42/delete
@app.route('/products/{productId}/delete', methods=['GET'])
def deleteProduct(productId):
try:
deleteProductDataByID(productId)
return {"message": "Product deleted!"}
except:
return {"message": "Missing ID!"}
| 1.5 | 2 |
libs/redis.py | fightingfish008/tornado-extensions | 5 | 12791723 | <reponame>fightingfish008/tornado-extensions
# -*- coding:utf-8 -*-
import traceback
import logging
import aioredis
from tornado.options import options
class AsyncRedisClient(object):
def __init__(self,loop=None):
self.loop = loop
async def init_pool(self, db=None):
if db is None:
_db = options.redis_db4
else:
_db = db
uri = 'redis://{}:{}/{}'.format(
options.redis_host,
options.redis_port,
_db
)
self.pool = await aioredis.create_pool(
uri,
password=options.redis_password,
# encoding="utf-8",
minsize=5,
maxsize=10,
loop = self.loop,
)
super(AsyncRedisClient, self).__init__()
async def execute(self, command, *args, **kwargs):
try:
async with self.pool.get() as conn:
retsult = await conn.execute(command, *args, **kwargs)
return retsult
except Exception as e:
logging.error(traceback.print_exc())
logging.error("redis execute error: %s", e)
async def get(self, key):
return await self.execute('get', key)
async def set(self, key, value):
return await self.execute('set', key, value)
async def setex(self, key, seconds, value):
return await self.execute('setex', key, seconds, value)
async def keys(self, key):
return await self.execute('keys', key)
async def hgetall(self, key):
return await self.execute('hgetall', key)
async def scan(self, key):
return await self.execute('scan', key)
async def connect(loop, db=None):
client = AsyncRedisClient(loop)
await client.init_pool(db)
return client | 1.390625 | 1 |
allink_core/core_apps/allink_legacy_redirect/config.py | allink/allink-core | 5 | 12791851 | # -*- coding: utf-8 -*-
from django.apps import AppConfig
class AllinkLegacyConfig(AppConfig):
name = 'allink_core.core_apps.allink_legacy_redirect'
verbose_name = "Legacy Redirect"
| 0.765625 | 1 |
pythonfile/theoretic_graphs.py | penguinoneshaw/MPhysProject | 0 | 12791979 | <gh_stars>0
#!/usr/bin/env python3
from ctypes import cdll, c_double, CFUNCTYPE
import numpy as np
import seaborn
from pathlib import Path
import matplotlib
matplotlib.use("pgf")
pgf_with_pdflatex = {
"pgf.texsystem": "pdflatex",
"font.family": "serif", # use serif/main font for text elements
"text.usetex": True,
"errorbar.capsize": 0.5,
"pgf.preamble": [
r"\usepackage[utf8]{inputenc}",
r"\usepackage[T1]{fontenc}",
r"\usepackage{mathpazo}",
r"\usepackage[version-1-compatibility]{siunitx}"
]
}
matplotlib.rcParams.update(pgf_with_pdflatex)
from matplotlib import pyplot as plt
# import netCDF4
try:
lib = cdll.LoadLibrary("pythonfile/libProjectPython.dylib")
except OSError as e:
lib = cdll.LoadLibrary("pythonfile/libProjectPython.so")
lib.unesco_depth.argtypes = [c_double,c_double,c_double]
lib.unesco_depth.restype = c_double
lib.unesco_pressure.argtypes = [c_double,c_double,c_double]
lib.unesco_pressure.restype = c_double
lib.leroy_et_al.argtypes = [c_double,c_double,c_double]
lib.leroy_et_al.restype = c_double
lib.ideal_sound_channel.argtypes = [c_double,c_double,c_double,c_double,c_double]
lib.ideal_sound_channel.restype = c_double
depths = np.linspace(0,2000,5000,dtype=np.double)
pressures = np.linspace(0,1000,5000,dtype=np.double)
temps = np.linspace(0, 40, 100, dtype=np.double)
salinities = np.linspace(0, 40, 100, dtype=np.double)
ufunc_unesco = np.frompyfunc(lib.unesco_depth, 3, 1)
ufunc_leroy = np.frompyfunc(lib.leroy_et_al, 3, 1)
ufunc_ideal = np.frompyfunc(lib.ideal_sound_channel, 5, 1)
def plot_contours(ufunc, title, filename):
fig, plots = plt.subplots(2, 2, 'col', 'row', True, gridspec_kw={
'hspace': 0.3, 'bottom': 0.08, 'top': 0.92}, figsize=(5,5))
t, d = np.meshgrid(temps, depths)
cp = plots[0][0].contour(t, d, ufunc(d,t, 35))
plt.clabel(cp, fmt="%d", rightside_up=False)
plots[0][0].set_ylim(2000, 0)
plots[0][0].set_ylabel("Depth (m)")
plots[0][0].set_xlabel(r"Temperature (\si{\degreeCelsius})")
s, d = np.meshgrid(salinities, depths)
cp = plots[0][1].contour(s, d, ufunc(d, 10, s))
plt.clabel(cp, fmt="%d", rightside_up=False)
plots[0][1].set_ylim(2000, 0)
plots[0][1].set_ylabel("Depth (m)")
plots[0][1].set_xlabel("Salinity (ppt)")
t,s = np.meshgrid(temps, salinities)
cp = plots[1][0].contour(t,s, ufunc(1000, t, s))
plt.clabel(cp, fmt="%d")
plots[1][0].set_xlabel(r"Temperature (\si{\degreeCelsius})")
plots[1][0].set_ylabel("Salinity (ppt)")
fig.suptitle(title)
plots[0][0].grid()
plots[0][1].grid()
plots[1][0].grid()
fig.delaxes(plots[1][1])
fig.savefig(filename)
plot_contours(ufunc_unesco, "UNESCO Equation (Chen and Millero 1995)", Path("final_output/figures/unesco.pdf"))
plot_contours(ufunc_leroy, "Leroy et al. 2008", Path("final_output/figures/leroy.pdf"))
plt.figure()
plt.plot(1000*ufunc_ideal(depths, 1160, 1.3, 1.45, 1.14e-3), depths)
plt.ylim(2000, 0)
plt.xlabel(r"Speed of Sound (\si{\meter\per\second})")
plt.ylabel(r"Depth (\si{\meter})")
plt.savefig(Path("final_output/figures/ideal.pdf")) | 1.507813 | 2 |
python/setup.py | gitter-badger/rikai | 0 | 12792107 | import pathlib
import re
from setuptools import find_packages, setup
about = {}
with open(pathlib.Path("rikai") / "__version__.py", "r") as fh:
exec(fh.read(), about)
with open(
pathlib.Path(__file__).absolute().parent.parent / "README.md",
"r",
) as fh:
long_description = fh.read()
# extras
test = ["pytest"]
torch = ["torch>=1.5.0", "torchvision"]
jupyter = ["matplotlib", "jupyterlab"]
aws = ["boto"]
docs = ["sphinx"]
youtube = ["pafy", "youtube_dl", "ffmpeg-python"]
all = test + torch + jupyter + aws + docs + youtube
setup(
name="rikai",
version=about["version"],
license="Apache License, Version 2.0",
author="<NAME>",
author_email="<EMAIL>",
long_description=long_description,
long_description_content_type="text/markdown",
url="https://github.com/eto-ai/rikai",
packages=find_packages(),
include_package_data=True,
python_requires=">=3.7",
install_requires=[
"antlr4-python3-runtime",
"ipython",
"jsonschema",
"numpy",
"opencv-python",
"pandas",
"Pillow",
"pyarrow>=2.0",
"pyspark>=3.1,<3.2",
"pyyaml",
"requests",
],
extras_require={
"test": test,
"pytorch": torch,
"jupyter": jupyter,
"aws": aws,
"docs": docs,
"youtube": youtube,
"all": all,
},
classifiers=[
"License :: OSI Approved :: Apache Software License",
"Operating System :: OS Independent",
"Programming Language :: Python :: 3",
"Topic :: Software Development :: Libraries",
],
)
| 0.855469 | 1 |
install.py | InfinityMarketing/Harbor-Script | 0 | 12792235 | <reponame>InfinityMarketing/Harbor-Script
#! /usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Thu Aug 3 14:20:50 2017
@author: <NAME>
"""
import zipfile
import os
import requests
import glob
import subprocess
import platform
import sys, getopt
import argparse
import re
def main():
parser = argparse.ArgumentParser()
parser.add_argument('-u', '--update',
action='store_true',
dest='update',
default=False,
help="Check for updates to the harbor script")
parser.add_argument('-i', '--install', action='store_true',
dest='install',
default=False,
help="Install harbor theme in current directory")
parser.add_argument('-d', '--directory', action='store',
dest='dir',
help="Specify a directory to install Harbor theme to or if -u option is present updates the Harbor based theme in that directory")
plat = platform.system()
results = parser.parse_args()
install = results.install
update = results.update
setup_dir = results.dir
if(install):
if setup_dir is not None:
os.chdir(setup_dir)
if platform.system() != "Windows":
if os.getuid() != 0:
print("Please run this script as root")
print("Example: 'sudo python3 setup.py'")
return
#download theme zip
if fetchArchive() == False:
return 1
print("Setting up Theme...")
slug = setupTheme()
setupEnvironment(slug)
elif update:
if setup_dir is not None:
updateTheme(setup_dir)
else:
print("Checking for updates to Harbor script...")
print("Up to date!")
else:
parser.print_usage()
def updateTheme(directory):
os.chdir(directory)
print("Updating theme...")
os.system("bower list > updates.tmp")
update_avail = re.compile("\(([0-9]\.)*[0-9] available\)")
nameRe = re.compile("[a-z]+-*[a-z]*#")
#print(update_avail.findall("├─┬ breakpoint-sass#2.5.0 "))
#exit(0)
with open("updates.tmp", "r") as update_file:
for line in update_file:
results = update_avail.findall(line)
if results != []:
print(line)
nameMatch = nameRe.search(line)
name = nameMatch.group()[:-1]
ans = input("Update module?(Y/n)")
while ans != "" and ans.lower()[0] != 'y' and ans.lower()[0] != 'n':
ans = input("Update module?(Y/n)")
if(ans == "" or ans.lower()[0] == 'y'):
print("updating", name, sep=" ")
os.system("bower update " + name)
print("")
print("Done!")
# Downloads the starter theme _s from github
def fetchArchive():
try:
os.remove("sass-restructure.zip")
except FileNotFoundError:
pass
print("Downloading Theme files...", end=' ')
file = requests.get("https://github.com/Infinity-Marketing/Harbor/archive/sass-restructure.zip")
if file.status_code != 200:
print("Error: There was a problem while downloading the files.\n\tAborting. ")
return False
with open("sass-restructure.zip", "wb") as content:
content.write(file.content)
print("Done!")
print("Extracting files...", end=' ')
with zipfile.ZipFile("sass-restructure.zip", "r") as file:
file.extractall(".")
print("Done!")
return True
def setupTheme():
name = input("Enter a name for the theme: ")
slug = name.lower().replace(' ', '-')
funcSlug = name.lower().replace(' ', '_')
desc = input("Enter a short description for the theme: ")
print("Setting up Theme...", end=' ')
os.rename("./Harbor-sass-restructure", "./" + slug)
files = glob.glob("./" + slug + "/*.php")
for filename in glob.glob("./" + slug + "/*/*.php"):
files.append(filename)
strings = []
strings.append(("'harbor'", "'" + slug + "'"))
strings.append(("harbor_", funcSlug + "_"))
strings.append((" <code> Harbor</code>", " <code> " + name.replace(' ', '_') + "</code>"))
strings.append(("Harbor-", slug + "-"))
findInFiles(strings, files)
headerInfo = []
headerInfo.append(("Text Domain: harbor", "Text Domain: " + slug))
headerInfo.append(("Theme Name: Harbor", "Theme Name: " + name))
headerInfo.append(("Description: Harbor is a starter theme and development environment setup by Infinity Marketing that is heavily based on Automattic's Underscores theme.", "Description: " + desc))
findInFiles(headerInfo, ["./" + slug + "/style.css", "./" + slug + "/sass/style.scss"])
print('Done!')
return slug
def findInFiles(strings, files):
for filename in files:
file = open(filename, "r")
filedata = file.read()
file.close()
for change in strings:
filedata = filedata.replace(change[0], change[1])
file = open(filename, "w")
file.write(filedata)
file.close()
def setupEnvironment(slug):
cmd = "where" if platform.system() == "Windows" else "which"
npm = subprocess.run(cmd+ " npm", shell=True)
if npm.returncode == 1:
print("NodeJs is not installed. Aborting")
return
bower = subprocess.run(cmd+ " bower", shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
if bower.returncode == 1:
print("Bower is not installed.")
print("Installing bower...")
subprocess.run("npm install -g bower", shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
print("Done!")
gulp = subprocess.run(cmd+ " gulp", shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
if gulp.returncode == 1:
print("Gulp is not installed")
print("Installing Gulp...", end=' ')
subprocess.run("npm install -g gulp", shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
print("Done!")
print("Installing dependancies...")
subprocess.run("bower install", shell=True, cwd="./"+slug)
subprocess.run("npm install", shell=True, cwd="./"+slug)
print("Done!")
if(__name__ == "__main__"):
main()
| 1.554688 | 2 |
quad9_plot.py | lindsayad/python | 0 | 12792363 | <filename>quad9_plot.py
import yt
ds = yt.load("/home/lindsayad/projects/moose/modules/navier_stokes/tests/ins/lid_driven/gold/lid_driven_out.e", step=-1)
slc = yt.SlicePlot(ds, 'z', ('connect1', 'vel_y'))
slc.set_log(('connect1','vel_y'), False)
slc.set_width((1, 1))
slc.save()
| 0.984375 | 1 |
trello/cards/views.py | copydataai/clon-trello | 0 | 12792491 |
# DRF
from rest_framework.viewsets import ModelViewSet
from rest_framework import permissions
from rest_framework.permissions import IsAuthenticated
# Serializer
from trello.cards.serializers import CardSerializer
# Model
from trello.cards.models import Card
class CardViewSet(ModelViewSet):
serializer_class = CardSerializer
queryset = Car.objects.filter(list)
permission_classes = [permissions.IsAuthenticated]
| 0.882813 | 1 |
functions/load_nag.py | daviddoret/pyxag | 1 | 12792619 | from . import *
def load_nag(nag, path):
"""Load a NAG from a file"""
with open(path, 'r') as nag_file:
nag_json = nag_file.read()
nag = convert_json_to_nag(nag_json)
return nag
| 1.210938 | 1 |
tirelire-auth/tests/unit/test_handlers.py | AgRenaud/tirelire | 0 | 12792747 | <reponame>AgRenaud/tirelire
from unittest import TestCase
from typing import List
from app import bootstrap
from app.domain import commands, model
from app.service_layer import handlers
from app.service_layer.unit_of_work import UnitOfWork
from app.service_layer.auth_service import AuthService
class FakeRepository:
def __init__(self, users: List[model.User]):
self._users = set(users)
self.seen = set()
def add(self, user: model.User):
self._users.add(user)
def get(self, id: str):
return next((u for u in self._users if u.id == id), None)
def get_by_email(self, email: str):
return next((u for u in self._users if u.email == email), None)
def list(self):
return self._users
class FakeAuthService:
def verify_password(self, password: str, user: model.User) -> bool:
return hash(password) == hash(user.password)
def encrypt_password(self, password: str) -> str:
return hash(password)
def generate_token(self, password: str, user: model.User) -> dict:
return password
def verify_token(self, token: str) -> bool:
return token
class FakeUnitOfWork(UnitOfWork):
def __init__(self):
self.users: AbstractUserRepository = FakeRepository([])
self.auth_service = FakeAuthService()
self.committed = False
def _commit(self):
self.committed = True
def rollback(self):
pass
def bootstrap_test_app():
return bootstrap.bootstrap(
start_orm=False,
uow=FakeUnitOfWork(),
)
class TestHandlers(TestCase):
def test_create_user_must_create_user(self):
uow = bootstrap_test_app()
command = commands.CreateUser(
"id1234",
"secure_password",
"john",
"doe",
"<EMAIL>"
)
handlers.create_user(command, uow, lambda *args: None)
self.assertIsNotNone(uow.users.get('id1234'))
def test_add_app_auth_to_user_must_return(self):
uow = bootstrap_test_app()
command = commands.CreateUser(
"id1234",
"secure_password",
"john",
"doe",
"<EMAIL>"
)
handlers.create_user(command, uow, lambda *args: None)
app_auth_1 = model.AppAuthorization(model.App.TIRELIRE_APP)
command = commands.AddAuthorizationToUser("id1234", app_auth_1)
handlers.add_app_auth_to_user(command, uow)
app_auth_2 = model.AppAuthorization(model.App.TIRELIRE_WEB)
command = commands.AddAuthorizationToUser("id1234", app_auth_2)
handlers.add_app_auth_to_user(command, uow)
user = uow.users.get('id1234')
self.assertSetEqual(user._applications_auth, {app_auth_1, app_auth_2})
def test_get_token_must_return_token(self):
uow = bootstrap_test_app()
command = commands.CreateUser(
"id1234",
"secure_password",
"john",
"doe",
"<EMAIL>"
)
handlers.create_user(command, uow, lambda *args: None)
cmd = commands.Authenticate("<EMAIL>", "secure_password")
token = handlers.get_token(cmd, uow)
# TODO: Fake token generation
def verify_token_must_return(self):
pass
| 1.804688 | 2 |
onetrack/TrackingData.py | murnanedaniel/OneTrack | 1 | 12792875 | <filename>onetrack/TrackingData.py
# import all
import os
import sys
import logging
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import torch
import torch.nn as nn
import torch.nn.functional as F
from scipy import sparse as sps
from tqdm import tqdm
from tqdm.contrib.concurrent import process_map
import networkx as nx
from functools import partial
from .tracking_utils import *
from .plotting_utils import *
def load_single_pytorch_file(file):
"""
Loads a single Pytorch Geometric file
"""
return torch.load(file, map_location="cpu")
def build_single_candidates(instance, building_method, sanity_check, **kwargs):
instance.build_candidates(building_method, sanity_check, **kwargs)
return instance
def evaluate_single_candidates(instance, evaluation_method, **kwargs):
instance.evaluate_candidates(**kwargs)
return instance
class TrackingData():
"""
A class that holds a list of Events, specifically for the tracking pipeline.
An Event contains a Graph and an EventTruth object.
"""
def __init__(self, files):
self.files = files
self.event_data = None
self.events = None
self.evaluation = None
logging.info("Loading files")
self.__load_files()
assert self.event_data is not None # Test if files are loaded
logging.info("Building events")
self.__build_events()
assert self.events is not None # Test if events are built
def __len__(self):
return len(self.events)
def __getitem__(self, idx):
event = self.events[idx]
return event
def __load_files(self):
"""
Loads files based on type
"""
file_type = self.__get_file_type()
if file_type == "pytorch_geometric":
self.event_data = self.__load_pytorch_files()
else:
raise ValueError("Unknown file type")
def __build_events(self):
"""
Builds Event objects from event data
"""
# self.events = []
# for data in tqdm(self.event_data):
# self.events.append(Event(data))
self.events = process_map(Event, self.event_data)#, max_workers=1)
def __get_file_type(self):
"""
Determine type of file
"""
try:
sample = torch.load(self.files[0], map_location="cpu")
if str(type(sample)) == "<class 'torch_geometric.data.data.Data'>":
return "pytorch_geometric"
else:
raise ValueError("Unknown file type, this is not a Pytorch Geometric file")
except:
raise ValueError("Unknown file type, there are still more file types to be added!")
def __load_pytorch_files(self):
"""
Loads all Pytorch geometric files in file list
"""
# data = []
# for file in tqdm(self.files):
# data.append(torch.load(file, map_location="cpu"))
data = process_map(load_single_pytorch_file, self.files)#, max_workers=1)
return data
def build_candidates(self, building_method="CC", sanity_check=False, **kwargs):
"""
Builds track candidates from events
"""
logging.info(f"Building candidates with sanity check: {sanity_check}")
build_single_candidates_partial = partial(build_single_candidates, building_method=building_method, sanity_check=sanity_check, **kwargs)
self.events = process_map(build_single_candidates_partial, self.events, max_workers=8)
# for event in tqdm(self.events):
# event.build_candidates(building_method, sanity_check, **kwargs)
def evaluate_candidates(self, evaluation_method="matching", **kwargs):
"""
Evaluates track candidates from events
"""
logging.info("Evaluating candidates")
evaluate_single_candidates_partial = partial(evaluate_single_candidates, evaluation_method=evaluation_method, **kwargs)
self.events = process_map(evaluate_single_candidates_partial, self.events, max_workers=8)
# for event in tqdm(self.events):
# event.evaluate_candidates(evaluation_method, **kwargs)
# TODO: Tidy this up!
n_true_tracks, n_reco_tracks, n_matched_particles, n_matched_tracks, n_duplicated_tracks, n_single_matched_particles = 0, 0, 0, 0, 0, 0
for event in self.events:
n_true_tracks += event.candidates.evaluation["n_true_tracks"]
n_reco_tracks += event.candidates.evaluation["n_reco_tracks"]
n_matched_particles += event.candidates.evaluation["n_matched_particles"]
n_single_matched_particles += event.candidates.evaluation["n_single_matched_particles"]
n_matched_tracks += event.candidates.evaluation["n_matched_tracks"]
n_duplicated_tracks += event.candidates.evaluation["n_duplicated_tracks"]
building_method = event.candidates.building_method
self.evaluation = {
"building_method": building_method,
"evaluation_method": evaluation_method,
"eff": n_matched_particles / n_true_tracks,
"single_eff": n_single_matched_particles / n_true_tracks,
"fr": 1 - (n_matched_tracks / n_reco_tracks),
"dup": n_duplicated_tracks / n_reco_tracks,
}
print(self.evaluation)
print(f"n_true_tracks: {n_true_tracks}, n_reco_tracks: {n_reco_tracks}, n_matched_particles: {n_matched_particles}, n_matched_tracks: {n_matched_tracks}, n_duplicated_tracks: {n_duplicated_tracks}")
def plot_evaluation(self, metric="eff", observable="eta", **kwargs):
"""
Plots evaluation of candidates
"""
if self.evaluation is None:
raise ValueError("No evaluation available")
if self.evaluation["evaluation_method"] == "matching":
self.__plot_matching_evaluation(metric, observable, **kwargs)
else:
raise NotImplementedError("Plotting not implemented yet for that method")
def __plot_matching_evaluation(self, metric="eff", observable="eta", **kwargs):
"""
Plots matching evaluation of candidates
"""
all_particles = pd.concat([event.candidates.evaluation["particles"].merge(event.event_truth.particles, on="particle_id", how="inner") for event in self.events])
plot_observable_performance(all_particles)
class Event():
"""
An Event contains a Graph and an EventTruth object. It represents a unit of particle physics data.
"""
def __init__(self, data):
self.graph = None
self.event_truth = None
self.candidates = None
self.data = self.__process_data(data)
def __process_data(self, data):
"""
Processes data to be used in the pipeline
"""
if str(type(data)) == "<class 'torch_geometric.data.data.Data'>":
self.graph = Graph(data_dict = data.to_dict())
self.event_truth = EventTruth(event_file = data.event_file)
else:
raise ValueError("Unknown data type")
# Define representation
def __repr__(self):
return f"Event(graph=({len(self.graph.hits['x'])} hits, {self.graph.edges['edge_index'].shape[1]} edges), event_truth=({len(self.event_truth)} particles), candidates=({len(self.candidates)} candidates))"
def build_candidates(self, building_method="CC", sanity_check=False, **kwargs):
"""
Builds track candidates from event
"""
self.candidates = self.graph.build_candidates(building_method, sanity_check, **kwargs)
def evaluate_candidates(self, method="matching", **kwargs):
"""
Evaluates track candidates from event
"""
self.candidates.evaluate(method, self.event_truth, **kwargs)
class Graph():
def __init__(self, data_dict):
self.hits = None
self.edges = None
self.graph_data = None
assert type(data_dict) == dict, "Data must be a dictionary"
self.__process_data(data_dict)
# Test if data is loaded
assert self.hits is not None
assert self.edges is not None
assert self.graph_data is not None
# Define representation
def __repr__(self):
return f"Graph(hits={self.hits}, edges={self.edges}, graph_data={self.graph_data})"
def __len__(self):
return len(self.hits["x"])
def __process_data(self, data):
"""
Processes data to be used in the pipeline
"""
if type(data) == dict:
self.__get_hit_data(data)
self.__get_edge_data(data)
self.__get_graph_data(data)
else:
raise ValueError("Unknown data type")
def __get_hit_data(self, data):
"""
Returns hit data
"""
self.hits = {}
assert "x" in data.keys(), "At least need a feature called x, otherwise define default node feature in config" # Check if x is in data
for key in data.keys():
if len(data[key]) == len(data["x"]):
self.hits[key] = data[key]
def __get_edge_data(self, data):
"""
Returns edge data
"""
self.edges = {}
assert "edge_index" in data.keys(), "At least need a feature called edge_index, otherwise define default edge feature in config" # Check if edge_index is in data
for key in data.keys():
if (
len(data[key].shape) > 1 and data[key].shape[1] == data["edge_index"].shape[1] or
len(data[key].shape) == 1 and data[key].shape[0] == data["edge_index"].shape[1]
):
self.edges[key] = data[key]
def __get_graph_data(self, data):
"""
Returns graph data
"""
self.graph_data = {k: data[k] for k in data.keys() - (self.hits.keys() & self.edges.keys())}
def build_candidates(self, building_method="CC", sanity_check=False, **kwargs):
"""
Builds track candidates from graph
"""
if building_method == "CC":
candidates = self.__get_connected_components(sanity_check, **kwargs)
elif building_method == "AP":
candidates = self.__get_all_paths(sanity_check, **kwargs)
elif building_method == "KF":
candidates = self.__get_kf_candidates(**kwargs)
else:
raise ValueError("Unknown building method")
return candidates
def __get_connected_components(self, sanity_check=False, score_cut=0.5, **kwargs):
"""
Builds connected components from graph
"""
if sanity_check:
edge_mask = self.edges["y"].bool()
else:
edge_mask = self.edges["scores"] > score_cut
row, col = self.edges["edge_index"][:, edge_mask]
edge_attr = np.ones(row.size(0))
N = self.hits["x"].size(0)
sparse_edges = sps.coo_matrix((edge_attr, (row.numpy(), col.numpy())), (N, N))
num_candidates, candidate_labels = sps.csgraph.connected_components(sparse_edges, directed=False, return_labels=True)
candidates = Candidates(self.hits["hid"], candidate_labels, building_method="CC")
return candidates
def __get_kf_candidates(self, **kwargs):
"""
Builds KF candidates from graph
"""
raise NotImplementedError("KF candidates not implemented yet")
def __get_all_paths(self, sanity_check=False, score_cut=0.5, **kwargs):
"""
Returns all paths from graph
"""
if sanity_check:
edge_mask = self.edges["y"].bool()
else:
edge_mask = self.edges["scores"] > score_cut
# Order edges by increasing R
r, phi, z = self.hits["x"].T
R = np.sqrt(r**2 + z**2)
# in_edges are the nodes towards the inner of the detector, out_edges are the nodes towards the outer
in_edges, out_edges = self.edges["edge_index"][:, edge_mask]
# Ensure edges are numpy arrays
if (type(in_edges) != np.ndarray) or (type(out_edges) != np.ndarray):
in_edges = in_edges.numpy()
out_edges = out_edges.numpy()
# Sort edges by increasing R
wrong_direction_mask = R[in_edges] > R[out_edges]
in_edges[wrong_direction_mask], out_edges[wrong_direction_mask] = out_edges[wrong_direction_mask], in_edges[wrong_direction_mask]
starting_nodes = np.unique(in_edges[~np.isin(in_edges, out_edges)])
ending_nodes = np.unique(out_edges[~np.isin(out_edges, in_edges)])
# Build graph
G = nx.DiGraph()
G.add_edges_from(np.stack([in_edges, out_edges]).T)
all_paths = nx.shortest_path(G)
all_paths = {path: all_paths[path] for path in all_paths.keys() if path in starting_nodes}
valid_paths = [all_paths[start_key][end_key]
for start_key in all_paths.keys()
for end_key in all_paths[start_key].keys()
if (start_key != end_key and end_key in ending_nodes)]
hit_list = np.array(list(itertools.chain.from_iterable(valid_paths)))
track_label_list = np.repeat(np.arange(len(valid_paths)), [len(path) for path in valid_paths])
candidates = Candidates(hit_list, track_label_list, building_method="AP")
# TODO: CHECK THAT HIT ID IS USED CORRECTLY!!
return candidates
class EventTruth():
def __init__(self, event_file):
self.particles = None
self.hit_truth = None
assert type(event_file) == str or type(event_file) == np.str_, "Event file must be a string"
self.__process_data(event_file)
# Test data loaded properly
assert self.particles is not None
assert self.hit_truth is not None
# Define representation
def __repr__(self):
return f"EventTruth(particles={self.particles}, hit_truth={self.hit_truth})"
def __len__(self):
return len(self.particles)
def __process_data(self, event_file):
"""
Processes data to be used in the pipeline
"""
self.__get_particle_data(event_file)
self.__get_hit_truth_data(event_file)
def __get_particle_data(self, event_file):
"""
Returns particle data
"""
try:
particle_filename = event_file + "-particles.csv"
self.particles = pd.read_csv(particle_filename)
except:
raise ValueError("Could not find particles file")
def __get_hit_truth_data(self, event_file):
"""
Returns hit truth data
"""
try:
hit_truth_filename = event_file + "-truth.csv"
self.hit_truth = pd.read_csv(hit_truth_filename)
self.hit_truth = self.__process_hit_truth(self.hit_truth)
except:
raise ValueError("Could not find hit truth file")
def __process_hit_truth(self, hit_truth):
"""
Processes hit truth data
"""
hit_truth.drop_duplicates(subset=["hit_id"], inplace=True)
return hit_truth
class Candidates():
def __init__(self, hit_ids, track_ids, building_method, **kwargs):
self.hit_ids = hit_ids
self.track_ids = track_ids
self.building_method = building_method
self.evaluation = None
def __repr__(self):
return f"{self.__len__()} Candidates(hit_ids={self.hit_ids}, track_ids={self.track_ids})"
def __len__(self):
return len(np.unique(self.track_ids))
def get_df(self):
"""
Returns dataframe of candidates
"""
df = pd.DataFrame({"hit_id": self.hit_ids, "track_id": self.track_ids})
return df
def evaluate(self, method, event_truth, **kwargs):
"""
Returns evaluation of candidates
"""
if method == "matching":
self.evaluation = self.__matching_reconstruction(event_truth.particles, event_truth.hit_truth, **kwargs)
elif method == "iou":
self.evaluation = self.__iou_reconstruction(**kwargs)
else:
raise ValueError("Unknown method")
def __matching_reconstruction(self, particles, hit_truth, **kwargs):
"""
Evaluates track candidates from event with matching criteria. Criteria given by ratios of common hits in candidates ("reconstructed") and particles ("truth")
"""
particles, candidates = match_reco_tracks(self.get_df(), hit_truth, particles, build_method = self.building_method, **kwargs)
(n_true_tracks, n_reco_tracks,
n_matched_particles, n_single_matched_particles, n_matched_tracks,
n_duplicated_tracks, n_matched_tracks_poi) = get_statistics(particles, candidates)
evaluation = {
"evaluation_method": "matching",
"particles": particles,
"candidates": candidates,
"eff": n_matched_particles / n_true_tracks,
"fr": 1 - (n_matched_tracks / n_reco_tracks),
"dup": n_duplicated_tracks / n_reco_tracks,
"n_true_tracks": n_true_tracks,
"n_reco_tracks": n_reco_tracks,
"n_matched_particles": n_matched_particles,
"n_single_matched_particles": n_single_matched_particles,
"n_matched_tracks": n_matched_tracks,
"n_duplicated_tracks": n_duplicated_tracks,
"n_matched_tracks_poi": n_matched_tracks_poi
}
return evaluation
def __iou_reconstruction(self, **kwargs):
"""
Evaluates track candidates from event with Intersection over Union (IoU)
"""
raise NotImplementedError("IOU reconstruction not implemented yet")
| 1.882813 | 2 |
spinsim/__init__.py | rpanderson/spinsim | 0 | 12793003 | <filename>spinsim/__init__.py
"""
"""
# from . import utilities
from enum import Enum
import numpy as np
import numba as nb
from numba import cuda
from numba import roc
import math
sqrt2 = math.sqrt(2)
sqrt3 = math.sqrt(3)
class SpinQuantumNumber(Enum):
"""
Options for the spin quantum number of a system.
Parameters
----------
value : :obj:`float`
The numerical value of the spin quantum number.
dimension : :obj:`int`
Dimension of the hilbert space the states with this spin belong to.
label : :obj:`str`
A text label that can be used for archiving.
"""
def __init__(self, value, dimension, label):
super().__init__()
self._value_ = value
self.dimension = dimension
self.label = label
HALF = (1/2, 2, "half")
"""
For two level systems.
"""
ONE = (1, 3, "one")
"""
For three level systems.
"""
class IntegrationMethod(Enum):
"""
Options for describing which method is used during the integration.
Parameters
----------
value : :obj:`str`
A text label that can be used for archiving.
"""
MAGNUS_CF4 = "magnus_cf4"
"""
Commutator free, fourth order Magnus based integrator.
"""
MIDPOINT_SAMPLE = "midpoint_sample"
"""
Euler integration method.
"""
HALF_STEP = "half_step"
"""
Integration method from AtomicPy. Makes two Euler integration steps, one sampling the field from the start of the time step, one sampling the field from the end of the time step. The equivalent of the trapezoidal method.
"""
class ExponentiationMethod(Enum):
"""
The implementation to use for matrix exponentiation within the integrator.
Parameters
----------
value : :obj:`str`
A text label that can be used for archiving.
index : :obj:`int`
A reference number, used when compiling the integrator, where higher level objects like enums cannot be interpreted.
"""
def __init__(self, value, index):
super().__init__()
self._value_ = value
self.index = index
ANALYTIC = ("analytic", 0)
"""
Analytic expression of the matrix exponential. For spin half :obj:`SpinQuantumNumber.HALF` systems only.
"""
LIE_TROTTER = ("lie_trotter", 1)
"""
Approximation using the Lie Trotter theorem.
"""
class Device(Enum):
"""
The target device that the integrator is being compiled for.
.. _Supported Python features: http://numba.pydata.org/numba-doc/latest/reference/pysupported.html
.. _Supported Numpy features: http://numba.pydata.org/numba-doc/latest/reference/numpysupported.html
.. _Supported CUDA Python features: http://numba.pydata.org/numba-doc/latest/cuda/cudapysupported.html
"""
def __init__(self, value, index):
super().__init__()
self._value_ = value
self.index = index
if value == "python":
def jit_host(template, max_registers):
def jit_host(func):
return func
return jit_host
self.jit_host = jit_host
def jit_device(func):
return func
self.jit_device = jit_device
def jit_device_template(template):
def jit_device_template(func):
return func
return jit_device_template
self.jit_device_template = jit_device_template
elif value == "cpu_single":
def jit_host(template, max_registers):
def jit_host(func):
return nb.njit(template)(func)
return jit_host
self.jit_host = jit_host
def jit_device(func):
return nb.njit()(func)
self.jit_device = jit_device
def jit_device_template(template):
def jit_device_template(func):
return nb.njit(template)(func)
return jit_device_template
self.jit_device_template = jit_device_template
elif value == "cpu":
def jit_host(template, max_registers):
def jit_host(func):
return nb.njit(template, parallel = True)(func)
return jit_host
self.jit_host = jit_host
def jit_device(func):
return nb.njit()(func)
self.jit_device = jit_device
def jit_device_template(template):
def jit_device_template(func):
return nb.njit(template)(func)
return jit_device_template
self.jit_device_template = jit_device_template
elif value == "cuda":
def jit_host(template, max_registers):
def jit_host(func):
return cuda.jit(template, debug = False, max_registers = max_registers)(func)
return jit_host
self.jit_host = jit_host
def jit_device(func):
return cuda.jit(device = True, inline = True)(func)
self.jit_device = jit_device
def jit_device_template(template):
def jit_device_template(func):
return cuda.jit(template, device = True, inline = True)(func)
return jit_device_template
self.jit_device_template = jit_device_template
elif value == "roc":
def jit_host(template, max_registers):
def jit_host(func):
return roc.jit(template)(func)
return jit_host
self.jit_host = jit_host
def jit_device(func):
return roc.jit(device = True)(func)
self.jit_device = jit_device
def jit_device_template(template):
def jit_device_template(func):
return roc.jit(template, device = True)(func)
return jit_device_template
self.jit_device_template = jit_device_template
PYTHON = ("python", 0)
"""
Use pure python interpreted code for the integrator, ie, don't compile the integrator.
"""
CPU_SINGLE = ("cpu_single", 0)
"""
Use the :func:`numba.jit()` LLVM compiler to compile the integrator to run on a single CPU core.
.. note ::
To use this device option, the user defined field function must be :func:`numba.jit()` compilable. See `Supported Python features`_ for compilable python features, and `Supported Numpy features`_ for compilable numpy features.
"""
CPU = ("cpu", 0)
"""
Use the :func:`numba.jit()` LLVM compiler to compile the integrator to run on all CPU cores, in parallel.
.. note ::
To use this device option, the user defined field function must be :func:`numba.jit()` compilable. See `Supported Python features`_ for compilable python features, and `Supported Numpy features`_ for compilable numpy features.
"""
CUDA = ("cuda", 1)
"""
Use the :func:`numba.cuda.jit()` LLVM compiler to compile the integrator to run on an Nvidia cuda compatible GPU, in parallel.
.. note ::
To use this device option, the user defined field function must be :func:`numba.cuda.jit()` compilable. See `Supported CUDA Python features`_ for compilable python features.
"""
ROC = ("roc", 2)
"""
Use the :func:`numba.roc.jit()` LLVM compiler to compile the integrator to run on an AMD ROCm compatible GPU, in parallel.
.. warning ::
Work in progress, not currently functional!
"""
class Results:
"""
The results of a an evaluation of the integrator.
Attributes
----------
time : :obj:`numpy.ndarray` of :obj:`numpy.float64` (time_index)
The times that `state` was evaluated at.
time_evolution : :obj:`numpy.ndarray` of :obj:`numpy.float128` (time_index, y_index, x_index)
The evaluated time evolution operator between each time step. See :ref:`architecture` for some information.
state : :obj:`numpy.ndarray` of :obj:`numpy.complex128` (time_index, magnetic_quantum_number)
The evaluated quantum state of the spin system over time, written in terms of the eigenstates of the spin projection operator in the z direction.
spin : :obj:`numpy.ndarray` of :obj:`numpy.float64` (time_index, spatial_direction)
The expected spin projection (Bloch vector) over time. This is calculated just in time using the JITed :obj:`callable` `spin_calculator`.
spin_calculator : :obj:`callable`
Calculates the expected spin projection (Bloch vector) over time for a given time series of a quantum state. Used to calculate `spin` the first time it is referenced by the user.
Parameters:
* **state** (:obj:`numpy.ndarray` of :obj:`numpy.complex128` (time_index, magnetic_quantum_number)) - The quantum state of the spin system over time, written in terms of the eigenstates of the spin projection operator in the z direction.
Returns:
* **spin** (:obj:`numpy.ndarray` of :obj:`numpy.float64` (time_index, spatial_direction)) - The expected spin projection (Bloch vector) over time.
"""
def __init__(self, time, time_evolution, state, spin_calculator):
"""
Parameters
----------
time : :obj:`numpy.ndarray` of :obj:`numpy.float64` (time_index)
The times that `state` was evaluated at.
time_evolution : :obj:`numpy.ndarray` of :obj:`numpy.float128` (time_index, y_index, x_index)
The evaluated time evolution operator between each time step. See :ref:`architecture` for some information.
state : :obj:`numpy.ndarray` of :obj:`numpy.complex128` (time_index, magnetic_quantum_number)
The evaluated quantum state of the spin system over time, written in terms of the eigenstates of the spin projection operator in the z direction.
spin_calculator : :obj:`callable`
Calculates the expected spin projection (Bloch vector) over time for a given time series of a quantum state. Used to calculate `spin` the first time it is referenced by the user.
Parameters:
* **state** (:obj:`numpy.ndarray` of :obj:`numpy.complex128` (time_index, magnetic_quantum_number)) - The quantum state of the spin system over time, written in terms of the eigenstates of the spin projection operator in the z direction.
Returns:
* **spin** (:obj:`numpy.ndarray` of :obj:`numpy.float64` (time_index, spatial_direction)) - The expected spin projection (Bloch vector) over time.
"""
self.time = time
self.time_evolution = time_evolution
self.state = state
self.spin_calculator = spin_calculator
def __getattr__(self, attr_name):
if attr_name == "spin":
spin = self.spin_calculator(self.state)
setattr(self, attr_name, spin)
return self.spin
raise AttributeError("{} has no attribute called {}.".format(self, attr_name))
class Simulator:
"""
Attributes
----------
spin_quantum_number : :obj:`SpinQuantumNumber`
The option to select whether the simulator will integrate a spin half :obj:`SpinQuantumNumber.HALF`, or spin one :obj:`SpinQuantumNumber.ONE` quantum system.
threads_per_block : :obj:`int`
The size of each thread block (workgroup), in terms of the number of threads (workitems) they each contain, when running on the GPU target devices :obj:`Device.CUDA` (:obj:`Device.ROC`). Defaults to 64. Modifying might be able to increase execution time for different GPU models.
device : :obj:`Device`
The option to select which device will be targeted for integration. That is, whether the integrator is compiled for a CPU or GPU. Defaults to :obj:`Device.CUDA` if the system it is being run on is Nvidia Cuda compatible, and defaults to :obj:`Device.CPU` otherwise. See :obj:`Device` for all options and more details.
get_time_evolution_raw : :obj:`callable`
The internal function for evaluating the time evolution operator in parallel. Compiled for chosen device on object constrution.
Parameters:
* **sweep_parameter** (:obj:`float`) - The input to the `get_field` function supplied by the user. Modifies the field function so the integrator can be used for many experiments, without the need for slow recompilation. For example, if the `sweep_parameter` is used to define the bias field strength in `get_field`, then one can run many simulations, sweeping through bias values, by calling this method multiple times, each time varying `sweep_parameter`.
* **time_coarse** (:obj:`numpy.ndarray` of :obj:`numpy.float64` (time_index)) - The times that `state` was evaluated at.
* **time_end_points** (:obj:`numpy.ndarray` of :obj:`numpy.float64` (start/end)) - The time offset that the experiment is to start at, and the time that the experiment is to finish at. Measured in s.
* **time_step_integration** (:obj:`float`) - The integration time step. Measured in s.
* **time_step_output** (:obj:`float`) - The sample resolution of the output timeseries for the state. Must be a whole number multiple of `time_step_integration`. Measured in s.
* **time_evolution_coarse** (:obj:`numpy.ndarray` of :obj:`numpy.float128` (time_index, y_index, x_index)) - The evaluated time evolution operator between each time step. See :ref:`architecture` for some information.
spin_calculator : :obj:`callable`
Calculates the expected spin projection (Bloch vector) over time for a given time series of a quantum state. This :obj:`callable` is passed to the :obj:`Results` object returned from :func:`Simulator.evaluate()`, and is executed there just in time if the `spin` property is needed. Compiled for chosen device on object constrution.
Parameters:
* **state** (:obj:`numpy.ndarray` of :obj:`numpy.complex128` (time_index, magnetic_quantum_number)) - The quantum state of the spin system over time, written in terms of the eigenstates of the spin projection operator in the z direction.
Returns:
* **spin** (:obj:`numpy.ndarray` of :obj:`numpy.float64` (time_index, spatial_direction)) - The expected spin projection (Bloch vector) over time.
"""
def __init__(self, get_field, spin_quantum_number, device = None, exponentiation_method = None, use_rotating_frame = True, integration_method = IntegrationMethod.MAGNUS_CF4, trotter_cutoff = 32, threads_per_block = 64, max_registers = 63):
"""
.. _Achieved Occupancy: https://docs.nvidia.com/gameworks/content/developertools/desktop/analysis/report/cudaexperiments/kernellevel/achievedoccupancy.htm
Parameters
----------
get_field : :obj:`callable`
A python function that describes the field that the spin system is being put under. It must have three arguments:
* **time_sample** (:obj:`float`) - the time to sample the field at, in units of s.
* **simulation_index** (:obj:`int`) - a parameter that can be swept over when multiple simulations need to be run. For example, it is used to sweep over dressing frequencies during the simulations that `spinsim` was designed for.
* **field_sample** (:class:`numpy.ndarray` of :class:`numpy.float64` (spatial_index)) the returned value of the field. This is a four dimensional vector, with the first three entries being x, y, z spatial directions (to model a magnetic field, for example), and the fourth entry being the amplitude of the quadratic shift (only appearing, and required, in spin one systems).
.. note::
This function must be compilable for the device that the integrator is being compiled for. See :class:`Device` for more information and links.
spin_quantum_number : :obj:`SpinQuantumNumber`
The option to select whether the simulator will integrate a spin half :obj:`SpinQuantumNumber.HALF`, or spin one :obj:`SpinQuantumNumber.ONE` quantum system.
device : :obj:`Device`
The option to select which device will be targeted for integration. That is, whether the integrator is compiled for a CPU or GPU. Defaults to :obj:`Device.CUDA` if the system it is being run on is Nvidia Cuda compatible, and defaults to :obj:`Device.CPU` otherwise. See :obj:`Device` for all options and more details.
exponentiation_method : :obj:`ExponentiationMethod`
Which method to use for matrix exponentiation in the integration algorithm. Defaults to :obj:`ExponentiationMethod.LIE_TROTTER` when `spin_quantum_number` is set to :obj:`SpinQuantumNumber.ONE`, and defaults to :obj:`ExponentiationMethod.ANALYTIC` when `spin_quantum_number` is set to :obj:`SpinQuantumNumber.HALF`. See :obj:`ExponentiationMethod` for more details.
use_rotating_frame : :obj:`bool`
Whether or not to use the rotating frame optimisation. Defaults to :obj:`True`. If set to :obj:`True`, the integrator moves into a frame rotating in the z axis by an amount defined by the field in the z direction. This removes the (possibly large) z component of the field, which increases the accuracy of the output since the integrator will on average take smaller steps.
.. note ::
The use of a rotating frame is commonly associated with the use of a rotating wave approximation, a technique used to get approximate analytic solutions of spin system dynamics. This is not done when this option is set to :obj:`True` - no such approximations are made, and the output state in given out of the rotating frame. One can, of course, use :mod:`spinsim` to integrate states in the rotating frame, using the rating wave approximation: just define `get_field()` with field functions that use the rotating wave approximation in the rotating frame.
integration_method : :obj:`IntegrationMethod`
Which integration method to use in the integration. Defaults to :obj:`IntegrationMethod.MAGNUS_CF4`. See :obj:`IntegrationMethod` for more details.
trotter_cutoff : :obj:`int`
The number of squares made by the matrix exponentiator, if :obj:`ExponentiationMethod.LIE_TROTTER` is chosen.
threads_per_block : :obj:`int`
The size of each thread block (workgroup), in terms of the number of threads (workitems) they each contain, when running on the GPU target devices :obj:`Device.CUDA` (:obj:`Device.ROC`). Defaults to 64. Modifying might be able to increase execution time for different GPU models.
max_registers : :obj:`int`
The maximum number of registers allocated per thread when using :obj:`Device.CUDA` as the target device, and can be modified to increase the execution speed for a specific GPU model. Defaults to 63 (optimal for GTX1070, the device used for testing. Note that one extra register per thread is always added to the number specified for control, so really this number is 64).
Raising this value allocates more registers (fast memory) to each thread, out of a maximum number for the whole GPU, for each specific GPU model. This means that if more registers are allocated than are available for the GPU model, the GPU must run fewer threads concurrently than it has Cuda cores, meaning some cores are inactive, and the GPU is said to have less occupancy. Lowering the value increases GPU occupancy, meaning more threads run concurrently, at the expense of fewer resgiters being avaliable to each thread, meaning slower memory must be used. Thus, there will be an optimal value of `max_registers` for each model of GPU running :mod:`spinsim`, balancing more threads vs faster running threads, and changing this value could increase performance for your GPU. See `Achieved Occupancy`_ for Nvidia's official explanation.
"""
if not device:
if cuda.is_available():
device = Device.CUDA
else:
device = Device.CPU
self.threads_per_block = threads_per_block
self.spin_quantum_number = spin_quantum_number
self.device = device
self.get_time_evolution_raw = None
self.get_spin_raw = None
try:
self.compile_time_evolver(get_field, spin_quantum_number, device, use_rotating_frame, integration_method, exponentiation_method, trotter_cutoff, threads_per_block, max_registers)
except:
print("\033[31mspinsim error: numba could not jit get_field function into a device function.\033[0m\n")
raise
def compile_time_evolver(self, get_field, spin_quantum_number, device, use_rotating_frame = True, integration_method = IntegrationMethod.MAGNUS_CF4, exponentiation_method = None, trotter_cutoff:int = 28, threads_per_block = 64, max_registers = 63):
"""
Compiles the integrator and spin calculation functions of the simulator.
Parameters
----------
get_field : :obj:`callable`
A python function that describes the field that the spin system is being put under. It must have three arguments:
* **time_sample** (:obj:`float`) - the time to sample the field at, in units of s.
* **simulation_index** (:obj:`int`) - a parameter that can be swept over when multiple simulations need to be run. For example, it is used to sweep over dressing frequencies during the simulations that `spinsim` was designed for.
* **field_sample** (:class:`numpy.ndarray` of :class:`numpy.float64` (spatial_index)) the returned value of the field. This is a four dimensional vector, with the first three entries being x, y, z spatial directions (to model a magnetic field, for example), and the fourth entry being the amplitude of the quadratic shift (only appearing, and required, in spin one systems).
.. note::
This function must be compilable for the device that the integrator is being compiled for. See :class:`Device` for more information and links.
spin_quantum_number : :obj:`SpinQuantumNumber`
The option to select whether the simulator will integrate a spin half :obj:`SpinQuantumNumber.HALF`, or spin one :obj:`SpinQuantumNumber.ONE` quantum system.
device : :obj:`Device`
The option to select which device will be targeted for integration. That is, whether the integrator is compiled for a CPU or GPU. Defaults to :obj:`Device.CUDA` if the system it is being run on is Nvidia Cuda compatible, and defaults to :obj:`Device.CPU` otherwise. See :obj:`Device` for all options and more details.
exponentiation_method : :obj:`ExponentiationMethod`
Which method to use for matrix exponentiation in the integration algorithm. Defaults to :obj:`ExponentiationMethod.LIE_TROTTER` when `spin_quantum_number` is set to :obj:`SpinQuantumNumber.ONE`, and defaults to :obj:`ExponentiationMethod.ANALYTIC` when `spin_quantum_number` is set to :obj:`SpinQuantumNumber.HALF`. See :obj:`ExponentiationMethod` for more details.
use_rotating_frame : :obj:`bool`
Whether or not to use the rotating frame optimisation. Defaults to :obj:`True`. If set to :obj:`True`, the integrator moves into a frame rotating in the z axis by an amount defined by the field in the z direction. This removes the (possibly large) z component of the field, which increases the accuracy of the output since the integrator will on average take smaller steps.
.. note ::
The use of a rotating frame is commonly associated with the use of a rotating wave approximation, a technique used to get approximate analytic solutions of spin system dynamics. This is not done when this option is set to :obj:`True` - no such approximations are made, and the output state in given out of the rotating frame. One can, of course, use :mod:`spinsim` to integrate states in the rotating frame, using the rating wave approximation: just define `get_field()` with field functions that use the rotating wave approximation in the rotating frame.
integration_method : :obj:`IntegrationMethod`
Which integration method to use in the integration. Defaults to :obj:`IntegrationMethod.MAGNUS_CF4`. See :obj:`IntegrationMethod` for more details.
trotter_cutoff : :obj:`int`
The number of squares made by the matrix exponentiator, if :obj:`ExponentiationMethod.LIE_TROTTER` is chosen.
threads_per_block : :obj:`int`
The size of each thread block (workgroup), in terms of the number of threads (workitems) they each contain, when running on the GPU target devices :obj:`Device.CUDA` (:obj:`Device.ROC`). Defaults to 64. Modifying might be able to increase execution time for different GPU models.
max_registers : :obj:`int`
The maximum number of registers allocated per thread when using :obj:`Device.CUDA` as the target device, and can be modified to increase the execution speed for a specific GPU model. Defaults to 63 (optimal for GTX1070, the device used for testing. Note that one extra register per thread is always added to the number specified for control, so really this number is 64).
Raising this value allocates more registers (fast memory) to each thread, out of a maximum number for the whole GPU, for each specific GPU model. This means that if more registers are allocated than are available for the GPU model, the GPU must run fewer threads concurrently than it has Cuda cores, meaning some cores are inactive, and the GPU is said to have less occupancy. Lowering the value increases GPU occupancy, meaning more threads run concurrently, at the expense of fewer resgiters being avaliable to each thread, meaning slower memory must be used. Thus, there will be an optimal value of `max_registers` for each model of GPU running :mod:`spinsim`, balancing more threads vs faster running threads, and changing this value could increase performance for your GPU. See `Achieved Occupancy`_ for Nvidia's official explanation.
"""
utilities = Utilities(spin_quantum_number, device, threads_per_block)
conj = utilities.conj
complex_abs = utilities.complex_abs
norm2 = utilities.norm2
inner = utilities.inner
set_to = utilities.set_to
set_to_one = utilities.set_to_one
set_to_zero = utilities.set_to_zero
matrix_multiply = utilities.matrix_multiply
adjoint = utilities.adjoint
matrix_exponential_analytic = utilities.matrix_exponential_analytic
matrix_exponential_lie_trotter = utilities.matrix_exponential_lie_trotter
jit_host = device.jit_host
jit_device = device.jit_device
jit_device_template = device.jit_device_template
device_index = device.index
dimension = spin_quantum_number.dimension
lie_dimension = dimension + 1
# utility_set = spin_quantum_number.utility_set
if not exponentiation_method:
if spin_quantum_number == SpinQuantumNumber.ONE:
exponentiation_method = ExponentiationMethod.LIE_TROTTER
elif spin_quantum_number == SpinQuantumNumber.HALF:
exponentiation_method = ExponentiationMethod.ANALYTIC
if integration_method == IntegrationMethod.MAGNUS_CF4:
sample_index_max = 3
sample_index_end = 4
elif integration_method == IntegrationMethod.HALF_STEP:
sample_index_max = 3
sample_index_end = 4
elif integration_method == IntegrationMethod.MIDPOINT_SAMPLE:
sample_index_max = 1
sample_index_end = 1
exponentiation_method_index = exponentiation_method.index
if (exponentiation_method == ExponentiationMethod.ANALYTIC) and (spin_quantum_number != SpinQuantumNumber.HALF):
print("\033[31mspinsim warning!!!\n_attempting to use an analytic exponentiation method outside of spin half. Switching to a Lie Trotter method.\033[0m")
exponentiation_method = ExponentiationMethod.LIE_TROTTER
exponentiation_method_index = 1
@jit_device_template("(float64[:], complex128[:, :], complex128[:, :])")
def append_exponentiation(field_sample, time_evolution_fine, time_evolution_coarse):
if device_index == 0:
time_evolution_old = np.empty((dimension, dimension), dtype = np.complex128)
elif device_index == 1:
time_evolution_old = cuda.local.array((dimension, dimension), dtype = np.complex128)
elif device_index == 2:
time_evolution_old_group = roc.shared.array((threads_per_block, dimension, dimension), dtype = np.complex128)
time_evolution_old = time_evolution_old_group[roc.get_local_id(1), :, :]
# Calculate the exponential
if exponentiation_method_index == 0:
matrix_exponential_analytic(field_sample, time_evolution_fine)
elif exponentiation_method_index == 1:
matrix_exponential_lie_trotter(field_sample, time_evolution_fine, trotter_cutoff)
# Premultiply to the exitsing time evolution operator
set_to(time_evolution_coarse, time_evolution_old)
matrix_multiply(time_evolution_fine, time_evolution_old, time_evolution_coarse)
if use_rotating_frame:
if dimension == 3:
@jit_device_template("(float64[:], float64, complex128)")
def transform_frame_spin_one_rotating(field_sample, rotating_wave, rotating_wave_winding):
X = (field_sample[0] + 1j*field_sample[1])/rotating_wave_winding
field_sample[0] = X.real
field_sample[1] = X.imag
field_sample[2] = field_sample[2] - rotating_wave
transform_frame = transform_frame_spin_one_rotating
else:
@jit_device_template("(float64[:], float64, complex128)")
def transform_frame_spin_half_rotating(field_sample, rotating_wave, rotating_wave_winding):
X = (field_sample[0] + 1j*field_sample[1])/(rotating_wave_winding**2)
field_sample[0] = X.real
field_sample[1] = X.imag
field_sample[2] = field_sample[2] - 2*rotating_wave
transform_frame = transform_frame_spin_half_rotating
else:
@jit_device_template("(float64[:], float64, complex128)")
def transform_frame_lab(field_sample, rotating_wave, rotating_wave_winding):
return
transform_frame = transform_frame_lab
get_field_jit = jit_device(get_field)
if integration_method == IntegrationMethod.MAGNUS_CF4:
@jit_device_template("(float64, float64, float64, float64, float64[:, :], float64, complex128[:])")
def get_field_integration_magnus_cf4(sweep_parameter, time_fine, time_coarse, time_step_integration, field_sample, rotating_wave, rotating_wave_winding):
time_sample = ((time_fine + 0.5*time_step_integration*(1 - 1/sqrt3)) - time_coarse)
rotating_wave_winding[0] = math.cos(math.tau*rotating_wave*time_sample) + 1j*math.sin(math.tau*rotating_wave*time_sample)
time_sample += time_coarse
get_field_jit(time_sample, sweep_parameter, field_sample[0, :])
time_sample = ((time_fine + 0.5*time_step_integration*(1 + 1/sqrt3)) - time_coarse)
rotating_wave_winding[1] = math.cos(math.tau*rotating_wave*time_sample) + 1j*math.sin(math.tau*rotating_wave*time_sample)
time_sample += time_coarse
get_field_jit(time_sample, sweep_parameter, field_sample[1, :])
@jit_device_template("(complex128[:, :], complex128[:, :], float64[:, :], float64, float64, complex128[:])")
def append_exponentiation_integration_magnus_cf4(time_evolution_fine, time_evolution_coarse, field_sample, time_step_integration, rotating_wave, rotating_wave_winding):
transform_frame(field_sample[0, :], rotating_wave, rotating_wave_winding[0])
transform_frame(field_sample[1, :], rotating_wave, rotating_wave_winding[1])
w0 = (1.5 + sqrt3)/6
w1 = (1.5 - sqrt3)/6
field_sample[2, 0] = math.tau*time_step_integration*(w0*field_sample[0, 0] + w1*field_sample[1, 0])
field_sample[2, 1] = math.tau*time_step_integration*(w0*field_sample[0, 1] + w1*field_sample[1, 1])
field_sample[2, 2] = math.tau*time_step_integration*(w0*field_sample[0, 2] + w1*field_sample[1, 2])
if dimension > 2:
field_sample[2, 3] = math.tau*time_step_integration*(w0*field_sample[0, 3] + w1*field_sample[1, 3])
append_exponentiation(field_sample[2, :], time_evolution_fine, time_evolution_coarse)
field_sample[2, 0] = math.tau*time_step_integration*(w1*field_sample[0, 0] + w0*field_sample[1, 0])
field_sample[2, 1] = math.tau*time_step_integration*(w1*field_sample[0, 1] + w0*field_sample[1, 1])
field_sample[2, 2] = math.tau*time_step_integration*(w1*field_sample[0, 2] + w0*field_sample[1, 2])
if dimension > 2:
field_sample[2, 3] = math.tau*time_step_integration*(w1*field_sample[0, 3] + w0*field_sample[1, 3])
append_exponentiation(field_sample[2, :], time_evolution_fine, time_evolution_coarse)
get_field_integration = get_field_integration_magnus_cf4
append_exponentiation_integration = append_exponentiation_integration_magnus_cf4
elif integration_method == IntegrationMethod.HALF_STEP:
@jit_device_template("(float64, float64, float64, float64, float64[:, :], float64, complex128[:])")
def get_field_integration_half_step(sweep_parameter, time_fine, time_coarse, time_step_integration, field_sample, rotating_wave, rotating_wave_winding):
time_sample = time_fine - time_coarse
rotating_wave_winding[0] = math.cos(math.tau*rotating_wave*time_sample) + 1j*math.sin(math.tau*rotating_wave*time_sample)
time_sample += time_coarse
get_field_jit(time_sample, sweep_parameter, field_sample[0, :])
time_sample = time_fine + time_step_integration - time_coarse
rotating_wave_winding[1] = math.cos(math.tau*rotating_wave*time_sample) + 1j*math.sin(math.tau*rotating_wave*time_sample)
time_sample += time_coarse
get_field_jit(time_sample, sweep_parameter, field_sample[1, :])
@jit_device_template("(complex128[:, :], complex128[:, :], float64[:, :], float64, float64, complex128[:])")
def append_exponentiation_integration_half_step(time_evolution_fine, time_evolution_coarse, field_sample, time_step_integration, rotating_wave, rotating_wave_winding):
transform_frame(field_sample[0, :], rotating_wave, rotating_wave_winding[0])
transform_frame(field_sample[1, :], rotating_wave, rotating_wave_winding[1])
field_sample[2, 0] = math.tau*time_step_integration*field_sample[0, 0]/2
field_sample[2, 1] = math.tau*time_step_integration*field_sample[0, 1]/2
field_sample[2, 2] = math.tau*time_step_integration*field_sample[0, 2]/2
if dimension > 2:
field_sample[2, 3] = math.tau*time_step_integration*field_sample[0, 3]/2
append_exponentiation(field_sample[2, :], time_evolution_fine, time_evolution_coarse)
field_sample[2, 0] = math.tau*time_step_integration*field_sample[1, 0]/2
field_sample[2, 1] = math.tau*time_step_integration*field_sample[1, 1]/2
field_sample[2, 2] = math.tau*time_step_integration*field_sample[1, 2]/2
if dimension > 2:
field_sample[2, 3] = math.tau*time_step_integration*field_sample[1, 3]/2
append_exponentiation(field_sample[2, :], time_evolution_fine, time_evolution_coarse)
get_field_integration = get_field_integration_half_step
append_exponentiation_integration = append_exponentiation_integration_half_step
elif integration_method == IntegrationMethod.MIDPOINT_SAMPLE:
@jit_device_template("(float64, float64, float64, float64, float64[:, :], float64, complex128[:])")
def get_field_integration_midpoint(sweep_parameter, time_fine, time_coarse, time_step_integration, field_sample, rotating_wave, rotating_wave_winding):
time_sample = time_fine + 0.5*time_step_integration - time_coarse
rotating_wave_winding[0] = math.cos(math.tau*rotating_wave*time_sample) + 1j*math.sin(math.tau*rotating_wave*time_sample)
time_sample += time_coarse
get_field_jit(time_sample, sweep_parameter, field_sample[0, :])
@jit_device_template("(complex128[:, :], complex128[:, :], float64[:, :], float64, float64, complex128[:])")
def append_exponentiation_integration_midpoint(time_evolution_fine, time_evolution_coarse, field_sample, time_step_integration, rotating_wave, rotating_wave_winding):
transform_frame(field_sample[0, :], rotating_wave, rotating_wave_winding[0])
field_sample[0, 0] = math.tau*time_step_integration*field_sample[0, 0]
field_sample[0, 1] = math.tau*time_step_integration*field_sample[0, 1]
field_sample[0, 2] = math.tau*time_step_integration*field_sample[0, 2]
if dimension > 2:
field_sample[0, 3] = math.tau*time_step_integration*field_sample[0, 3]
append_exponentiation(field_sample[0, :], time_evolution_fine, time_evolution_coarse)
get_field_integration = get_field_integration_midpoint
append_exponentiation_integration = append_exponentiation_integration_midpoint
@jit_device_template("(int64, float64[:], float64, float64, float64[:], complex128[:, :, :], float64)")
def get_time_evolution_loop(time_index, time_coarse, time_step_output, time_step_integration, time_end_points, time_evolution_coarse, sweep_parameter):
# Declare variables
if device_index == 0:
time_evolution_fine = np.empty((dimension, dimension), dtype = np.complex128)
field_sample = np.empty((sample_index_max, lie_dimension), dtype = np.float64)
rotating_wave_winding = np.empty(sample_index_end, dtype = np.complex128)
elif device_index == 1:
time_evolution_fine = cuda.local.array((dimension, dimension), dtype = np.complex128)
field_sample = cuda.local.array((sample_index_max, lie_dimension), dtype = np.float64)
rotating_wave_winding = cuda.local.array(sample_index_end, dtype = np.complex128)
elif device_index == 2:
time_evolution_fine_group = roc.shared.array((threads_per_block, dimension, dimension), dtype = np.complex128)
time_evolution_fine = time_evolution_fine_group[roc.get_local_id(1), :, :]
field_sample_group = roc.shared.array((threads_per_block, sample_index_max, lie_dimension), dtype = np.float64)
field_sample = field_sample_group[roc.get_local_id(1), :, :]
rotating_wave_winding_group = roc.shared.array((threads_per_block, sample_index_end), dtype = np.complex128)
rotating_wave_winding = rotating_wave_winding_group[roc.get_local_id(1), :]
time_coarse[time_index] = time_end_points[0] + time_step_output*time_index
time_fine = time_coarse[time_index]
# Initialise time evolution operator to 1
set_to_one(time_evolution_coarse[time_index, :])
field_sample[0, 2] = 0
if use_rotating_frame:
time_sample = time_coarse[time_index] + time_step_output/2
get_field_jit(time_sample, sweep_parameter, field_sample[0, :])
rotating_wave = field_sample[0, 2]
if dimension == 2:
rotating_wave /= 2
# For every fine step
for time_fine_index in range(math.floor(time_step_output/time_step_integration + 0.5)):
get_field_integration(sweep_parameter, time_fine, time_coarse[time_index], time_step_integration, field_sample, rotating_wave, rotating_wave_winding)
append_exponentiation_integration(time_evolution_fine, time_evolution_coarse[time_index, :], field_sample, time_step_integration, rotating_wave, rotating_wave_winding)
time_fine += time_step_integration
if use_rotating_frame:
# Take out of rotating frame
rotating_wave_winding[0] = math.cos(math.tau*rotating_wave*time_step_output) + 1j*math.sin(math.tau*rotating_wave*time_step_output)
time_evolution_coarse[time_index, 0, 0] /= rotating_wave_winding[0]
time_evolution_coarse[time_index, 0, 1] /= rotating_wave_winding[0]
if dimension > 2:
time_evolution_coarse[time_index, 0, 2] /= rotating_wave_winding[0]
time_evolution_coarse[time_index, 2, 0] *= rotating_wave_winding[0]
time_evolution_coarse[time_index, 2, 1] *= rotating_wave_winding[0]
time_evolution_coarse[time_index, 2, 2] *= rotating_wave_winding[0]
else:
time_evolution_coarse[time_index, 1, 0] *= rotating_wave_winding[0]
time_evolution_coarse[time_index, 1, 1] *= rotating_wave_winding[0]
@jit_host("(float64, float64[:], float64[:], float64, float64, complex128[:, :, :])", max_registers)
def get_time_evolution(sweep_parameter, time_coarse, time_end_points, time_step_integration, time_step_output, time_evolution_coarse):
"""
Find the stepwise time evolution opperator.
Parameters
----------
sweep_parameter : :obj:`float`
time_coarse : :class:`numpy.ndarray` of :class:`numpy.float64` (time_index)
A coarse grained list of time samples that the time evolution operator is found for. In units of s. This is an output, so use an empty :class:`numpy.ndarray` with :func:`numpy.empty()`, or declare a :class:`numpy.ndarray` using :func:`numba.cuda.device_array_like()`.
time_end_points : :class:`numpy.ndarray` of :class:`numpy.float64` (start time (0) or end time (1))
The time values for when the experiment is to start and finishes. In units of s.
time_step_integration : :obj:`float`
The time step used within the integration algorithm. In units of s.
time_step_output : :obj:`float`
The time difference between each element of `time_coarse`. In units of s. Determines the sample rate of the outputs `time_coarse` and `time_evolution_coarse`.
time_evolution_coarse : :class:`numpy.ndarray` of :class:`numpy.complex128` (time_index, bra_state_index, ket_state_index)
Time evolution operator (matrix) between the current and next timesteps, for each time sampled. See :math:`U(t)` in :ref:`overview_of_simulation_method`. This is an output, so use an empty :class:`numpy.ndarray` with :func:`numpy.empty()`, or declare a :class:`numpy.ndarray` using :func:`numba.cuda.device_array_like()`.
"""
if device_index == 0:
for time_index in nb.prange(time_coarse.size):
get_time_evolution_loop(time_index, time_coarse, time_step_output, time_step_integration, time_end_points, time_evolution_coarse, sweep_parameter)
elif device_index == 1:
# Run calculation for each coarse timestep in parallel
time_index = cuda.grid(1)
if time_index < time_coarse.size:
get_time_evolution_loop(time_index, time_coarse, time_step_output, time_step_integration, time_end_points, time_evolution_coarse, sweep_parameter)
elif device_index == 2:
# Run calculation for each coarse timestep in parallel
time_index = roc.get_global_id(1)
if time_index < time_coarse.size:
get_time_evolution_loop(time_index, time_coarse, time_step_output, time_step_integration, time_end_points, time_evolution_coarse, sweep_parameter)
return
@jit_host("(complex128[:, :], float64[:, :])", max_registers = max_registers)
def get_spin(state, spin):
"""
Calculate each expected spin value in parallel.
For spin half:
.. math::
\\begin{align*}
\\langle F\\rangle(t) = \\begin{pmatrix}
\\Re(\\psi_{+\\frac{1}{2}}(t)\\psi_{-\\frac{1}{2}}(t)^*)\\\\
-\\Im(\\psi_{+\\frac{1}{2}}(t)\\psi_{-\\frac{1}{2}}(t)^*)\\\\
\\frac{1}{2}(|\\psi_{+\\frac{1}{2}}(t)|^2 - |\\psi_{-\\frac{1}{2}}(t)|^2)
\\end{pmatrix}
\\end{align*}
For spin one:
.. math::
\\begin{align*}
\\langle F\\rangle(t) = \\begin{pmatrix}
\\Re(\\sqrt{2}\\psi_{0}(t)^*(\\psi_{+1}(t) + \\psi_{-1}(t))\\\\
-\\Im(\\sqrt{2}\\psi_{0}(t)^*(\\psi_{+1}(t) - \\psi_{-1}(t))\\\\
|\\psi_{+1}(t)|^2 - |\\psi_{-1}(t)|^2
\\end{pmatrix}
\\end{align*}
Parameters
----------
state : :class:`numpy.ndarray` of :class:`numpy.complex128` (time_index, state_index)
The state (wavefunction) of the spin system in the lab frame, for each time sampled. See :math:`\\psi(t)` in :ref:`overview_of_simulation_method`.
spin : :class:`numpy.ndarray` of :class:`numpy.float64` (time_index, spatial_index)
The expected value for hyperfine spin of the spin system in the lab frame, for each time sampled. Units of :math:`\\hbar`. This is an output, so use an empty :class:`numpy.ndarray` with :func:`numpy.empty()`, or declare a :class:`numpy.ndarray` using :func:`numba.cuda.device_array_like()`.
"""
if device_index == 0:
for time_index in nb.prange(spin.shape[0]):
if dimension == 2:
spin[time_index, 0] = (state[time_index, 0]*conj(state[time_index, 1])).real
spin[time_index, 1] = (1j*state[time_index, 0]*conj(state[time_index, 1])).real
spin[time_index, 2] = 0.5*(state[time_index, 0].real**2 + state[time_index, 0].imag**2 - state[time_index, 1].real**2 - state[time_index, 1].imag**2)
else:
spin[time_index, 0] = (2*conj(state[time_index, 1])*(state[time_index, 0] + state[time_index, 2])/sqrt2).real
spin[time_index, 1] = (2j*conj(state[time_index, 1])*(state[time_index, 0] - state[time_index, 2])/sqrt2).real
spin[time_index, 2] = state[time_index, 0].real**2 + state[time_index, 0].imag**2 - state[time_index, 2].real**2 - state[time_index, 2].imag**2
elif device_index > 0:
if device_index == 1:
time_index = cuda.grid(1)
elif device_index == 1:
time_index = roc.get_global_id(1)
if time_index < spin.shape[0]:
if dimension == 2:
spin[time_index, 0] = (state[time_index, 0]*conj(state[time_index, 1])).real
spin[time_index, 1] = (1j*state[time_index, 0]*conj(state[time_index, 1])).real
spin[time_index, 2] = 0.5*(state[time_index, 0].real**2 + state[time_index, 0].imag**2 - state[time_index, 1].real**2 - state[time_index, 1].imag**2)
else:
spin[time_index, 0] = (2*conj(state[time_index, 1])*(state[time_index, 0] + state[time_index, 2])/sqrt2).real
spin[time_index, 1] = (2j*conj(state[time_index, 1])*(state[time_index, 0] - state[time_index, 2])/sqrt2).real
spin[time_index, 2] = state[time_index, 0].real**2 + state[time_index, 0].imag**2 - state[time_index, 2].real**2 - state[time_index, 2].imag**2
return
def spin_calculator(state):
"""
Calculates the expected spin projection (Bloch vector) over time for a given time series of a quantum state.
Parameters
----------
state : :obj:`numpy.ndarray` of :obj:`numpy.complex128` (time_index, magnetic_quantum_number)
The quantum state of the spin system over time, written in terms of the eigenstates of the spin projection operator in the z direction.
Returns
-------
spin : :obj:`numpy.ndarray` of :obj:`numpy.float64` (time_index, spatial_direction)
The expected spin projection (Bloch vector) over time.
"""
if device.index == 0:
spin = np.empty((state.shape[0], 3), np.float64)
get_spin(state, spin)
elif device == Device.CUDA:
spin = cuda.device_array((state.shape[0], 3), np.float64)
blocks_per_grid = (state.shape[0] + (threads_per_block - 1)) // threads_per_block
get_spin[blocks_per_grid, threads_per_block](cuda.to_device(state), spin)
spin = spin.copy_to_host()
elif device == Device.ROC:
spin = roc.device_array((state.shape[0], 3), np.float64)
blocks_per_grid = (state.shape[0] + (threads_per_block - 1)) // threads_per_block
get_spin[blocks_per_grid, threads_per_block](roc.to_device(state), spin)
spin = spin.copy_to_host()
return spin
self.get_time_evolution_raw = get_time_evolution
self.spin_calculator = spin_calculator
def evaluate(self, sweep_parameter, time_start, time_end, time_step_integration, time_step_output, state_init):
"""
Integrates the time dependent Schroedinger equation and returns the quantum state of the spin system over time.
Parameters
----------
sweep_parameter : :obj:`float`
The input to the `get_field` function supplied by the user. Modifies the field function so the integrator can be used for many experiments, without the need for slow recompilation. For example, if the `sweep_parameter` is used to define the bias field strength in `get_field`, then one can run many simulations, sweeping through bias values, by calling this method multiple times, each time varying `sweep_parameter`.
time_start : :obj:`float`
The time offset that the experiment is to start at. Measured in s.
time_end : :obj:`float`
The time that the experiment is to finish at. Measured in s. The duration of the experiment is `time_end - time_start`.
time_step_integration : :obj:`float`
The integration time step. Measured in s.
time_step_output : :obj:`float`
The sample resolution of the output timeseries for the state. Must be a whole number multiple of `time_step_integration`. Measured in s.
state_init : :obj:`numpy.ndarray` of :obj:`numpy.complex128` (magnetic_quantum_number)
The initial quantum state of the spin system, written in terms of the eigenstates of the spin projection operator in the z direction.
Returns
-------
results : :obj:`Results`
An object containing the results of the simulation.
"""
if math.fabs(time_step_output/time_step_integration - round(time_step_output/time_step_integration)) > 1e-6:
print(f"\033[33mspinsim warning: time_step_output not an integer multiple of time_step_integration. Resetting time_step_integration to {time_step_output/round(time_step_output/time_step_integration):8.4e}.\033[0m\n")
time_step_integration = time_step_output/round(time_step_output/time_step_integration)
time_end_points = np.asarray([time_start, time_end], np.float64)
state_init = np.asarray(state_init, np.complex128)
time_index_max = int((time_end_points[1] - time_end_points[0])/time_step_output)
if self.device.index == 0:
time = np.empty(time_index_max, np.float64)
time_evolution_coarse = np.empty((time_index_max, self.spin_quantum_number.dimension, self.spin_quantum_number.dimension), np.complex128)
self.get_time_evolution_raw(sweep_parameter, time, time_end_points, time_step_integration, time_step_output, time_evolution_coarse)
elif self.device == Device.CUDA:
time = cuda.device_array(time_index_max, np.float64)
time_evolution_coarse = cuda.device_array((time_index_max, self.spin_quantum_number.dimension, self.spin_quantum_number.dimension), np.complex128)
blocks_per_grid = (time.size + (self.threads_per_block - 1)) // self.threads_per_block
try:
self.get_time_evolution_raw[blocks_per_grid, self.threads_per_block](sweep_parameter, time, time_end_points, time_step_integration, time_step_output, time_evolution_coarse)
except:
print("\033[31mspinsim error: numba.cuda could not jit get_field function into a cuda device function.\033[0m\n")
raise
time_evolution_coarse = time_evolution_coarse.copy_to_host()
time = time.copy_to_host()
elif self.device == Device.ROC:
time = roc.device_array(time_index_max, np.float64)
time_evolution_coarse = roc.device_array((time_index_max, self.spin_quantum_number.dimension, self.spin_quantum_number.dimension), np.complex128)
blocks_per_grid = (time.size + (self.threads_per_block - 1)) // self.threads_per_block
try:
self.get_time_evolution_raw[blocks_per_grid, self.threads_per_block](sweep_parameter, time, time_end_points, time_step_integration, time_step_output, time_evolution_coarse)
except:
print("\033[31mspinsim error: numba.roc could not jit get_field function into a roc device function.\033[0m\n")
raise
time_evolution_coarse = time_evolution_coarse.copy_to_host()
time = time.copy_to_host()
state = np.empty((time_index_max, self.spin_quantum_number.dimension), np.complex128)
self.get_state(state_init, state, time_evolution_coarse)
results = Results(time, time_evolution_coarse, state, self.spin_calculator)
return results
@staticmethod
@nb.njit
def get_state(state_init, state, time_evolution):
"""
Use the stepwise time evolution operators in succession to find the quantum state timeseries of the 3 level atom.
Parameters
----------
state_init : :class:`numpy.ndarray` of :class:`numpy.complex128`
The state (spin wavefunction) of the system at the start of the simulation.
state : :class:`numpy.ndarray` of :class:`numpy.complex128` (time_index, state_index)
The state (wavefunction) of the spin system in the lab frame, for each time sampled.
time_evolution : :class:`numpy.ndarray` of :class:`numpy.complex128` (time_index, bra_state_index, ket_state_index)
The evaluated time evolution operator between each time step. See :ref:`architecture` for some information.
"""
for time_index in range(state.shape[0]):
# State = time evolution * previous state
for x_index in nb.prange(state.shape[1]):
state[time_index, x_index] = 0
if time_index > 0:
for z_index in range(state.shape[1]):
state[time_index, x_index] += time_evolution[time_index - 1, x_index, z_index]*state[time_index - 1, z_index]
else:
state[time_index, x_index] += state_init[x_index]
sqrt2 = math.sqrt(2)
sqrt3 = math.sqrt(3)
machine_epsilon = np.finfo(np.float64).eps*1000
class Utilities:
"""
A on object that contains definitions of all of the device functions (functions compiled for use on the target device) used in the integrator. These device functions are compiled for the chosen target device on construction of the object.
Attributes
----------
conj(z) : :obj:`callable`
Conjugate of a complex number.
.. math::
\\begin{align*}
(a + ib)^* &= a - ib\\\\
a, b &\\in \\mathbb{R}
\\end{align*}
Parameters:
* **z** (:class:`numpy.complex128`) - The complex number to take the conjugate of.
Returns
* **cz** (:class:`numpy.complex128`) - The conjugate of z.
complex_abs(z) : :obj:`callable`
The absolute value of a complex number.
.. math::
\\begin{align*}
|a + ib| &= \\sqrt{a^2 + b^2}\\\\
a, b &\\in \\mathbb{R}
\\end{align*}
Parameters:
* **z** (:class:`numpy.complex128`) - The complex number to take the absolute value of.
Returns
* **az** (:class:`numpy.float64`) - The absolute value of z.
norm2(z) : :obj:`callable`
The 2 norm of a complex vector.
.. math::
\|a + ib\|_2 = \\sqrt {\\left(\\sum_i a_i^2 + b_i^2\\right)}
Parameters:
* **z** (:class:`numpy.ndarray` of :class:`numpy.complex128`, (index)) - The vector to take the 2 norm of.
Returns
* **nz** (:class:`numpy.float64`) - The 2 norm of z.
inner(left, right) : :obj:`callable`
The inner (maths convention dot) product between two complex vectors.
.. note::
The mathematics definition is used here rather than the physics definition, so the left vector is conjugated. Thus the inner product of two orthogonal vectors is 0.
.. math::
\\begin{align*}
l \\cdot r &\\equiv \\langle l, r \\rangle\\\\
l \\cdot r &= \\sum_i (l_i)^* r_i
\\end{align*}
Parameters:
* **left** (:class:`numpy.ndarray` of :class:`numpy.complex128`, (index)) - The vector to left multiply in the inner product.
* **right** (:class:`numpy.ndarray` of :class:`numpy.complex128`, (index)) - The vector to right multiply in the inner product.
Returns
* **d** (:class:`numpy.complex128`) - The inner product of l and r.
set_to(operator, result) : :obj:`callable`
Copy the contents of one matrix into another.
.. math::
(A)_{i, j} = (B)_{i, j}
Parameters:
* **operator** (:class:`numpy.ndarray` of :class:`numpy.complex128`, (y_index, x_index)) - The matrix to copy from.
* **result** (:class:`numpy.ndarray` of :class:`numpy.complex128`, (y_index, x_index)) - The matrix to copy to.
set_to_one(operator) : :obj:`callable`
Make a matrix the multiplicative identity, ie, :math:`1`.
.. math::
\\begin{align*}
(A)_{i, j} &= \\delta_{i, j}\\\\
&= \\begin{cases}
1,&i = j\\\\
0,&i\\neq j
\\end{cases}
\\end{align*}
Parameters:
* **operator** (:class:`numpy.ndarray` of :class:`numpy.complex128`, (y_index, x_index)) - The matrix to set to :math:`1`.
set_to_zero(operator) : :obj:`callable`
Make a matrix the additive identity, ie, :math:`0`.
.. math::
\\begin{align*}
(A)_{i, j} = 0
\\end{align*}
Parameters:
* **operator** (:class:`numpy.ndarray` of :class:`numpy.complex128`, (y_index, x_index)) - The matrix to set to :math:`0`.
matrix_multiply(left, right, result) : :obj:`callable`
Multiply matrices left and right together, to be returned in result.
.. math::
\\begin{align*}
(LR)_{i,k} = \\sum_j (L)_{i,j} (R)_{j,k}
\\end{align*}
Parameters:
* **left** (:class:`numpy.ndarray` of :class:`numpy.complex128`, (y_index, x_index)) - The matrix to left multiply by.
* **right** (:class:`numpy.ndarray` of :class:`numpy.complex128`, (y_index, x_index)) - The matrix to right multiply by.
* **result** (:class:`numpy.ndarray` of :class:`numpy.complex128`, (y_index, x_index)) - A matrix to be filled with the result of the product.
adjoint(operator) : :obj:`callable`
Takes the hermitian adjoint of a matrix.
.. math::
\\begin{align*}
A^\\dagger &\\equiv A^H\\\\
(A^\\dagger)_{y,x} &= ((A)_{x,y})^*
\\end{align*}
Matrix can be in :math:`\\mathbb{C}^{2\\times2}` or :math:`\\mathbb{C}^{3\\times3}`.
Parameters:
* **operator** (:class:`numpy.ndarray` of :class:`numpy.complex128`, (y_index, x_index)) - The operator to take the adjoint of.
* **result** (:class:`numpy.ndarray` of :class:`numpy.complex128`, (y_index, x_index)) - An array to write the resultant adjoint to.
matrix_exponential_analytic(field_sample, result) : :obj:`callable`
Calculates a :math:`\\mathfrak{su}(2)` matrix exponential based on its analytic form.
.. warning::
Only available for use with spin half systems. Will not work with spin one systems.
Assumes the exponent is an imaginary linear combination of :math:`\\mathfrak{su}(2)`, being,
.. math::
\\begin{align*}
A &= -i(x J_x + y J_y + z J_z),
\\end{align*}
with
.. math::
\\begin{align*}
J_x &= \\frac{1}{2}\\begin{pmatrix}
0 & 1 \\\\
1 & 0
\\end{pmatrix},&
J_y &= \\frac{1}{2}\\begin{pmatrix}
0 & -i \\\\
i & 0
\\end{pmatrix},&
J_z &= \\frac{1}{2}\\begin{pmatrix}
1 & 0 \\\\
0 & -1
\\end{pmatrix}
\\end{align*}
Then the exponential can be calculated as
.. math::
\\begin{align*}
\\exp(A) &= \\exp(-ix J_x - iy J_y - iz J_z)\\\\
&= \\begin{pmatrix}
\\cos(\\frac{r}{2}) - i\\frac{z}{r}\\sin(\\frac{r}{2}) & -\\frac{y + ix}{r}\\sin(\\frac{r}{2})\\\\
\\frac{y - ix}{r}\\sin(\\frac{r}{2}) & \\cos(\\frac{r}{2}) + i\\frac{z}{r}\\sin(\\frac{r}{2})
\\end{pmatrix}
\\end{align*}
with :math:`r = \\sqrt{x^2 + y^2 + z^2}`.
Parameters:
* **field_sample** (:class:`numpy.ndarray` of :class:`numpy.float64`, (y_index, x_index)) - The values of x, y and z respectively, as described above.
* **result** (:class:`numpy.ndarray` of :class:`numpy.complex128`, (y_index, x_index)) - The matrix which the result of the exponentiation is to be written to.
matrix_exponential_lie_trotter(field_sample, result) : :obj:`callable`
Calculates a matrix exponential based on the Lie Product Formula,
.. math::
\\exp(A + B) = \\lim_{c \\to \\infty} \\left(\\exp\\left(\\frac{1}{c}A\\right) \\exp\\left(\\frac{1}{c}B\\right)\\right)^c.
**For spin half systems:**
Assumes the exponent is an imaginary linear combination of a subspace of :math:`\\mathfrak{su}(2)`, being,
.. math::
\\begin{align*}
A &= -i(x J_x + y J_y + z J_z),
\\end{align*}
with
.. math::
\\begin{align*}
J_x &= \\frac{1}{2}\\begin{pmatrix}
0 & 1 \\\\
1 & 0
\\end{pmatrix},&
J_y &= \\frac{1}{2}\\begin{pmatrix}
0 & -i \\\\
i & 0
\\end{pmatrix},&
J_z &= \\frac{1}{2}\\begin{pmatrix}
1 & 0 \\\\
0 & -1
\\end{pmatrix}
\\end{align*}
Then the exponential can be approximated as, for large :math:`\\tau`,
.. math::
\\begin{align*}
\\exp(A) &= \\exp(-ix J_x - iy J_y - iz J_z)\\\\
&= \\exp(2^{-\\tau}(-ix J_x - iy J_y - iz J_z))^{2^\\tau}\\\\
&\\approx (\\exp(-i(2^{-\\tau} x) J_x) \\exp(-i(2^{-\\tau} y) J_y) \\exp(-i(2^{-\\tau} z) J_z)^{2^\\tau}\\\\
&= \\begin{pmatrix}
(c_Xc_Y - is_Xs_Y) e^{-iZ} &
-(c_Xs_Y + is_Xc_Y) e^{iZ} \\\\
(c_Xs_Y - is_Xc_Y) e^{-iZ} &
(c_Xc_Y + is_Xs_Y) e^{iZ}
\\end{pmatrix}^{2^\\tau}\\\\
&= T^{2^\\tau},
\\end{align*}
with
.. math::
\\begin{align*}
X &= \\frac{1}{2}2^{-\\tau}x,\\\\
Y &= \\frac{1}{2}2^{-\\tau}y,\\\\
Z &= \\frac{1}{2}2^{-\\tau}z,\\\\
c_{\\theta} &= \\cos(\\theta),\\\\
s_{\\theta} &= \\sin(\\theta).
\\end{align*}
**For spin one systems**
Assumes the exponent is an imaginary linear combination of a subspace of :math:`\\mathfrak{su}(3)`, being,
.. math::
\\begin{align*}
A &= -i(x J_x + y J_y + z J_z + q J_q),
\\end{align*}
with
.. math::
\\begin{align*}
J_x &= \\frac{1}{\\sqrt{2}}\\begin{pmatrix}
0 & 1 & 0 \\\\
1 & 0 & 1 \\\\
0 & 1 & 0
\\end{pmatrix},&
J_y &= \\frac{1}{\\sqrt{2}}\\begin{pmatrix}
0 & -i & 0 \\\\
i & 0 & -i \\\\
0 & i & 0
\\end{pmatrix},\\\\
J_z &= \\begin{pmatrix}
1 & 0 & 0 \\\\
0 & 0 & 0 \\\\
0 & 0 & -1
\\end{pmatrix},&
J_q &= \\frac{1}{3}\\begin{pmatrix}
1 & 0 & 0 \\\\
0 & -2 & 0 \\\\
0 & 0 & 1
\\end{pmatrix}
\\end{align*}
Then the exponential can be approximated as, for large :math:`\\tau`,
.. math::
\\begin{align*}
\\exp(A) &= \\exp(-ix J_x - iy J_y - iz J_z - iq J_q)\\\\
&= \\exp(2^{-\\tau}(-ix J_x - iy J_y - iz J_z - iq J_q))^{2^\\tau}\\\\
&\\approx (\\exp(-i(2^{-\\tau} x) J_x) \\exp(-i(2^{-\\tau} y) J_y) \\exp(-i(2^{-\\tau} z J_z + (2^{-\\tau} q) J_q)))^{2^\\tau}\\\\
&= \\begin{pmatrix}
\\frac{e^{-i\\left(Z + \\frac{Q}{3}\\right)}(c_X + c_Y - i s_Xs_Y)}{2} & \\frac{e^{i\\frac{2Q}{3}} (-s_Y -i c_Y s_X)}{\\sqrt{2}} & \\frac{e^{-i\\left(-Z + \\frac{Q}{3}\\right)}(c_X - c_Y + i s_Xs_Y)}{2} \\\\
\\frac{e^{-i\\left(Z + \\frac{Q}{3}\\right)} (-i s_X + c_X s_Y)}{\\sqrt{2}} & e^{i\\frac{2Q}{3}} c_X c_Y & \\frac{e^{-i(Z - \\frac{Q}{3})} (-i s_X - c_X s_Y)}{\\sqrt{2}} \\\\
\\frac{e^{-i\\left(Z + \\frac{Q}{3}\\right)}(c_X - c_Y - i s_Xs_Y)}{2} & \\frac{e^{i\\frac{2Q}{3}} (s_Y -i c_Y s_X)}{\\sqrt{2}} & \\frac{e^{-i\\left(-Z + \\frac{Q}{3}\\right)}(c_X + c_Y + i s_Xs_Y)}{2}
\\end{pmatrix}^{2^\\tau}\\\\
&= T^{2^\\tau},
\\end{align*}
with
.. math::
\\begin{align*}
X &= 2^{-\\tau}x,\\\\
Y &= 2^{-\\tau}y,\\\\
Z &= 2^{-\\tau}z,\\\\
Q &= 2^{-\\tau}q,\\\\
c_{\\theta} &= \\cos(\\theta),\\\\
s_{\\theta} &= \\sin(\\theta).
\\end{align*}
Once :math:`T` is calculated, it is then recursively squared :math:`\\tau` times to obtain :math:`\\exp(A)`.
Parameters:
* **field_sample** (:class:`numpy.ndarray` of :class:`numpy.float64`, (y_index, x_index)) - The values of x, y and z (and q for spin one) respectively, as described above.
* **result** (:class:`numpy.ndarray` of :class:`numpy.complex128`, (y_index, x_index)) - The matrix which the result of the exponentiation is to be written to.
* **trotter_cutoff** (:obj:`int`) - The number of squares to make to the approximate matrix (:math:`\\tau` above).
"""
def __init__(self, spin_quantum_number, device, threads_per_block):
"""
Parameters
----------
spin_quantum_number : :obj:`SpinQuantumNumber`
The option to select whether the simulator will integrate a spin half :obj:`SpinQuantumNumber.HALF`, or spin one :obj:`SpinQuantumNumber.ONE` quantum system.
device : :obj:`Device`
The option to select which device will be targeted for integration. That is, whether the integrator is compiled for a CPU or GPU. Defaults to :obj:`Device.CUDA` if the system it is being run on is Nvidia Cuda compatible, and defaults to :obj:`Device.CPU` otherwise. See :obj:`Device` for all options and more details.
threads_per_block : :obj:`int`
The size of each thread block (workgroup), in terms of the number of threads (workitems) they each contain, when running on the GPU target devices :obj:`Device.CUDA` (:obj:`Device.ROC`). Defaults to 64. Modifying might be able to increase execution time for different GPU models.
"""
jit_device = device.jit_device
device_index = device.index
@jit_device
def conj(z):
return (z.real - 1j*z.imag)
@jit_device
def complex_abs(z):
return math.sqrt(z.real**2 + z.imag**2)
if spin_quantum_number == SpinQuantumNumber.HALF:
@jit_device
def norm2(z):
return math.sqrt(z[0].real**2 + z[0].imag**2 + z[1].real**2 + z[1].imag**2)
@jit_device
def inner(left, right):
return conj(left[0])*right[0] + conj(left[1])*right[1]
@jit_device
def set_to(operator, result):
result[0, 0] = operator[0, 0]
result[1, 0] = operator[1, 0]
result[0, 1] = operator[0, 1]
result[1, 1] = operator[1, 1]
@jit_device
def set_to_one(operator):
operator[0, 0] = 1
operator[1, 0] = 0
operator[0, 1] = 0
operator[1, 1] = 1
@jit_device
def set_to_zero(operator):
operator[0, 0] = 0
operator[1, 0] = 0
operator[0, 1] = 0
operator[1, 1] = 0
@jit_device
def matrix_multiply(left, right, result):
result[0, 0] = left[0, 0]*right[0, 0] + left[0, 1]*right[1, 0]
result[1, 0] = left[1, 0]*right[0, 0] + left[1, 1]*right[1, 0]
result[0, 1] = left[0, 0]*right[0, 1] + left[0, 1]*right[1, 1]
result[1, 1] = left[1, 0]*right[0, 1] + left[1, 1]*right[1, 1]
@jit_device
def matrix_square_residual(operator, result):
result[0, 0] = (2 + operator[0, 0])*operator[0, 0] + operator[0, 1]*operator[1, 0]
result[1, 0] = operator[1, 0]*operator[0, 0] + (2 + operator[1, 1])*operator[1, 0]
result[0, 1] = (2 + operator[0, 0])*operator[0, 1] + operator[0, 1]*operator[1, 1]
result[1, 1] = operator[1, 0]*operator[0, 1] + (2 + operator[1, 1])*operator[1, 1]
@jit_device
def adjoint(operator, result):
result[0, 0] = conj(operator[0, 0])
result[1, 0] = conj(operator[0, 1])
result[0, 1] = conj(operator[1, 0])
result[1, 1] = conj(operator[1, 1])
@jit_device
def matrix_exponential_analytic(field_sample, result):
x = field_sample[0]
y = field_sample[1]
z = field_sample[2]
r = math.sqrt(x**2 + y**2 + z**2)
if r > 0:
x /= r
y /= r
z /= r
c = math.cos(r/2)
s = math.sin(r/2)
result[0, 0] = c - 1j*z*s
result[1, 0] = (y - 1j*x)*s
result[0, 1] = -(y + 1j*x)*s
result[1, 1] = c + 1j*z*s
else:
result[0, 0] = 1
result[1, 0] = 0
result[0, 1] = 0
result[1, 1] = 1
@jit_device
def matrix_exponential_lie_trotter(field_sample, result, trotter_cutoff):
hyper_cube_amount = math.ceil(trotter_cutoff/2)
if hyper_cube_amount < 0:
hyper_cube_amount = 0
precision = 4**hyper_cube_amount
a = math.sqrt(field_sample[0]*field_sample[0] + field_sample[1]*field_sample[1])
if a > 0:
ep = (field_sample[0] + 1j*field_sample[1])/a
else:
ep = 1
a = a/precision
Ca = math.cos(a/2)
Sa = -1j*math.sin(a/2)
ez = field_sample[2]/(2*precision)
ez = math.cos(ez) + 1j*math.sin(ez)
# eq = field_sample[3]/(6*precision)
# eq = math.cos(eq) + 1j*math.sin(eq)
result[0, 0] = Ca/ez - 1
result[1, 0] = Sa*ep
result[0, 1] = Sa/ep
result[1, 1] = Ca*ez - 1
if device_index == 0:
temporary = np.empty((2, 2), dtype = np.complex128)
elif device_index == 1:
temporary = cuda.local.array((2, 2), dtype = np.complex128)
elif device_index == 2:
temporary_group = roc.shared.array((threads_per_block, 2, 2), dtype = np.complex128)
temporary = temporary_group[roc.get_local_id(1), :, :]
for power_index in range(hyper_cube_amount):
matrix_square_residual(result, temporary)
matrix_square_residual(temporary, result)
# matrix_multiply(result, result, temporary)
# matrix_multiply(temporary, temporary, result)
result[0, 0] += 1
result[1, 1] += 1
# @jit_device
# def matrix_exponential_lie_trotter(field_sample, result, trotter_cutoff):
# hyper_cube_amount = math.ceil(trotter_cutoff/2)
# if hyper_cube_amount < 0:
# hyper_cube_amount = 0
# precision = 4**hyper_cube_amount
# x = field_sample[0]/(2*precision)
# y = field_sample[1]/(2*precision)
# z = field_sample[2]/(2*precision)
# cx = math.cos(x)
# sx = math.sin(x)
# cy = math.cos(y)
# sy = math.sin(y)
# cisz = math.cos(z) + 1j*math.sin(z)
# result[0, 0] = (cx*cy - 1j*sx*sy)/cisz
# result[1, 0] = (cx*sy -1j*sx*cy)/cisz
# result[0, 1] = -(cx*sy + 1j*sx*cy)*cisz
# result[1, 1] = (cx*cy + 1j*sx*sy)*cisz
# if device_index == 0:
# temporary = np.empty((2, 2), dtype = np.complex128)
# elif device_index == 1:
# temporary = cuda.local.array((2, 2), dtype = np.complex128)
# elif device_index == 2:
# temporary_group = roc.shared.array((threads_per_block, 2, 2), dtype = np.complex128)
# temporary = temporary_group[roc.get_local_id(1), :, :]
# for power_index in range(hyper_cube_amount):
# matrix_multiply(result, result, temporary)
# matrix_multiply(temporary, temporary, result)
else:
@jit_device
def norm2(z):
return math.sqrt(z[0].real**2 + z[0].imag**2 + z[1].real**2 + z[1].imag**2 + z[2].real**2 + z[2].imag**2)
@jit_device
def cross(left, right, result):
result[0] = conj(left[1]*right[2] - left[2]*right[1])
result[1] = conj(left[2]*right[0] - left[0]*right[2])
result[2] = conj(left[0]*right[1] - left[1]*right[0])
@jit_device
def inner(left, right):
return conj(left[0])*right[0] + conj(left[1])*right[1] + conj(left[2])*right[2]
@jit_device
def set_to(operator, result):
result[0, 0] = operator[0, 0]
result[1, 0] = operator[1, 0]
result[2, 0] = operator[2, 0]
result[0, 1] = operator[0, 1]
result[1, 1] = operator[1, 1]
result[2, 1] = operator[2, 1]
result[0, 2] = operator[0, 2]
result[1, 2] = operator[1, 2]
result[2, 2] = operator[2, 2]
@jit_device
def set_to_one(operator):
operator[0, 0] = 1
operator[1, 0] = 0
operator[2, 0] = 0
operator[0, 1] = 0
operator[1, 1] = 1
operator[2, 1] = 0
operator[0, 2] = 0
operator[1, 2] = 0
operator[2, 2] = 1
@jit_device
def set_to_zero(operator):
operator[0, 0] = 0
operator[1, 0] = 0
operator[2, 0] = 0
operator[0, 1] = 0
operator[1, 1] = 0
operator[2, 1] = 0
operator[0, 2] = 0
operator[1, 2] = 0
operator[2, 2] = 0
@jit_device
def matrix_multiply(left, right, result):
result[0, 0] = left[0, 0]*right[0, 0] + left[0, 1]*right[1, 0] + left[0, 2]*right[2, 0]
result[1, 0] = left[1, 0]*right[0, 0] + left[1, 1]*right[1, 0] + left[1, 2]*right[2, 0]
result[2, 0] = left[2, 0]*right[0, 0] + left[2, 1]*right[1, 0] + left[2, 2]*right[2, 0]
result[0, 1] = left[0, 0]*right[0, 1] + left[0, 1]*right[1, 1] + left[0, 2]*right[2, 1]
result[1, 1] = left[1, 0]*right[0, 1] + left[1, 1]*right[1, 1] + left[1, 2]*right[2, 1]
result[2, 1] = left[2, 0]*right[0, 1] + left[2, 1]*right[1, 1] + left[2, 2]*right[2, 1]
result[0, 2] = left[0, 0]*right[0, 2] + left[0, 1]*right[1, 2] + left[0, 2]*right[2, 2]
result[1, 2] = left[1, 0]*right[0, 2] + left[1, 1]*right[1, 2] + left[1, 2]*right[2, 2]
result[2, 2] = left[2, 0]*right[0, 2] + left[2, 1]*right[1, 2] + left[2, 2]*right[2, 2]
@jit_device
def matrix_square_residual(operator, result):
result[0, 0] = (2 + operator[0, 0])*operator[0, 0] + operator[0, 1]*operator[1, 0] + operator[0, 2]*operator[2, 0]
result[1, 0] = operator[1, 0]*operator[0, 0] + (2 + operator[1, 1])*operator[1, 0] + operator[1, 2]*operator[2, 0]
result[2, 0] = operator[2, 0]*operator[0, 0] + operator[2, 1]*operator[1, 0] + (2 + operator[2, 2])*operator[2, 0]
result[0, 1] = (2 + operator[0, 0])*operator[0, 1] + operator[0, 1]*operator[1, 1] + operator[0, 2]*operator[2, 1]
result[1, 1] = operator[1, 0]*operator[0, 1] + (2 + operator[1, 1])*operator[1, 1] + operator[1, 2]*operator[2, 1]
result[2, 1] = operator[2, 0]*operator[0, 1] + operator[2, 1]*operator[1, 1] + (2 + operator[2, 2])*operator[2, 1]
result[0, 2] = (2 + operator[0, 0])*operator[0, 2] + operator[0, 1]*operator[1, 2] + operator[0, 2]*operator[2, 2]
result[1, 2] = operator[1, 0]*operator[0, 2] + (2 + operator[1, 1])*operator[1, 2] + operator[1, 2]*operator[2, 2]
result[2, 2] = operator[2, 0]*operator[0, 2] + operator[2, 1]*operator[1, 2] + (2 + operator[2, 2])*operator[2, 2]
@jit_device
def adjoint(operator, result):
result[0, 0] = conj(operator[0, 0])
result[1, 0] = conj(operator[0, 1])
result[2, 0] = conj(operator[0, 2])
result[0, 1] = conj(operator[1, 0])
result[1, 1] = conj(operator[1, 1])
result[2, 1] = conj(operator[1, 2])
result[0, 2] = conj(operator[2, 0])
result[1, 2] = conj(operator[2, 1])
result[2, 2] = conj(operator[2, 2])
@jit_device
def matrix_exponential_analytic(field_sample, result, trotter_cutoff):
pass
@jit_device
def matrix_exponential_lie_trotter(field_sample, result, trotter_cutoff):
hyper_cube_amount = math.ceil(trotter_cutoff/2)
if hyper_cube_amount < 0:
hyper_cube_amount = 0
precision = 4**hyper_cube_amount
a = math.sqrt(field_sample[0]*field_sample[0] + field_sample[1]*field_sample[1])
if a > 0:
ep = (field_sample[0] + 1j*field_sample[1])/a
else:
ep = 1
a = a/precision
Ca = math.cos(a/2)
Sa = math.sin(a/2)
ca = math.cos(a)
sa = -1j*math.sin(a)/sqrt2
ez = field_sample[2]/(2*precision)
ez = math.cos(ez) + 1j*math.sin(ez)
eq = field_sample[3]/(6*precision)
eq = math.cos(eq) + 1j*math.sin(eq)
# Ca = 1
# Sa = a/2
# ca = 1
# sa = -1j*a/sqrt2
# ez = field_sample[2]/(2*precision)
# ez = 1 + 1j*ez
# eq = field_sample[3]/(6*precision)
# eq = 1 + 1j*eq
result[0, 0] = (Ca/(eq*ez))*(Ca/(eq*ez)) - 1
result[1, 0] = sa*eq*ep/ez
result[2, 0] = -((Sa*ep/eq)*(Sa*ep/eq))
result[0, 1] = sa*eq/(ez*ep)
result[1, 1] = ca*(eq*eq*eq*eq) - 1
result[2, 1] = sa*eq*ez*ep
result[0, 2] = -((Sa*eq/ep)*(Sa*eq/ep))
result[1, 2] = sa*eq*ez/ep
result[2, 2] = (Ca*ez/eq)*(Ca*ez/eq) - 1
if device_index == 0:
temporary = np.empty((3, 3), dtype = np.complex128)
elif device_index == 1:
temporary = cuda.local.array((3, 3), dtype = np.complex128)
elif device_index == 2:
temporary_group = roc.shared.array((threads_per_block, 3, 3), dtype = np.complex128)
temporary = temporary_group[roc.get_local_id(1), :, :]
for power_index in range(hyper_cube_amount):
matrix_square_residual(result, temporary)
matrix_square_residual(temporary, result)
result[0, 0] += 1
result[1, 1] += 1
result[2, 2] += 1
# @jit_device
# def matrix_exponential_lie_trotter(field_sample, result, trotter_cutoff):
# hyper_cube_amount = math.ceil(trotter_cutoff/2)
# if hyper_cube_amount < 0:
# hyper_cube_amount = 0
# precision = 4**hyper_cube_amount
# x = field_sample[0]/precision
# y = field_sample[1]/precision
# z = field_sample[2]/precision
# q = field_sample[3]/precision
# cx = math.cos(x)
# sx = math.sin(x)
# cy = math.cos(y)
# sy = math.sin(y)
# cisz = math.cos(z + q/3) - 1j*math.sin(z + q/3)
# result[0, 0] = 0.5*cisz*(cx + cy - 1j*sx*sy)
# result[1, 0] = cisz*(-1j*sx + cx*sy)/sqrt2
# result[2, 0] = 0.5*cisz*(cx - cy - 1j*sx*sy)
# cisz = math.cos(2*q/3) + 1j*math.sin(2*q/3)
# result[0, 1] = cisz*(-sy - 1j*cy*sx)/sqrt2
# result[1, 1] = cisz*cx*cy
# result[2, 1] = cisz*(sy - 1j*cy*sx)/sqrt2
# cisz = math.cos(z - q/3) + 1j*math.sin(z - q/3)
# result[0, 2] = 0.5*cisz*(cx - cy + 1j*sx*sy)
# result[1, 2] = cisz*(-1j*sx - cx*sy)/sqrt2
# result[2, 2] = 0.5*cisz*(cx + cy + 1j*sx*sy)
# if device_index == 0:
# temporary = np.empty((3, 3), dtype = np.complex128)
# elif device_index == 1:
# temporary = cuda.local.array((3, 3), dtype = np.complex128)
# elif device_index == 2:
# temporary_group = roc.shared.array((threads_per_block, 3, 3), dtype = np.complex128)
# temporary = temporary_group[roc.get_local_id(1), :, :]
# for power_index in range(hyper_cube_amount):
# matrix_multiply(result, result, temporary)
# matrix_multiply(temporary, temporary, result)
self.conj = conj
self.complex_abs = complex_abs
self.norm2 = norm2
self.inner = inner
self.set_to = set_to
self.set_to_one = set_to_one
self.set_to_zero = set_to_zero
self.matrix_multiply = matrix_multiply
self.adjoint = adjoint
self.matrix_exponential_analytic = matrix_exponential_analytic
self.matrix_exponential_lie_trotter = matrix_exponential_lie_trotter
self.matrix_square_residual = matrix_square_residual | 2.546875 | 3 |
Algorithm/BasicAlgorithm/ArrayAndSort/select_sort.py | hqzhang83/Everything101 | 0 | 12793131 | def selectSort(nums):
l = len(nums)
for i in range(l):
for j in range(i + 1, l):
if nums[j] < nums[i]:
nums[i], nums[j] = nums[j], nums[i]
return nums
| 1.484375 | 1 |
src/algorithms/number_theory/P003_trial_division/solution_01.py | lakshmikanth-tesla/ProgrammingProblems | 1 | 12793259 | import logging
import math
"""
1. Note
- Loop from 2 till Square Root of N and keep dividing N at every step.
2. Optimisation(s)
- Apart from 2, only ODD numbers are tested for divisiblity.
- Only numbers upto SquareRoot(n) are tested for divisibility.
3. Limitation(s)
- Do not try with numbers which has more than 15-digit prime factors.
"""
def prime_factors_using_trial_division(n):
"""Returns a list of all prime prime_factors of n"""
prime_factors = []
# Test for 2 separately so that only ODD numbers can be tested in the loop
while n % 2 == 0:
factor = 2
prime_factors.append(factor)
n = n // 2
# Test only for ODD numbers starting with 3
for i in xrange(3, int(math.sqrt(n)) + 1, 2):
# logging.debug("i = {0}".format(i))
while n % i == 0:
factor = i
prime_factors.append(factor)
n = n // i
logging.debug("Factor = {0}, N = {1}".format(i, n))
# All factors have been found if N is reduced to 0.
if n == 1:
break
# If no factor has been found then N is PRIME and the only prime factor of itself.
if n > 1:
prime_factors.append(n)
return prime_factors | 3.015625 | 3 |
crashbin_app/migrations/0009_unique_names.py | The-Compiler/crashbin | 0 | 12793387 | <filename>crashbin_app/migrations/0009_unique_names.py
# Generated by Django 2.2.1 on 2019-05-20 09:40
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [("crashbin_app", "0008_create_mailbox")]
operations = [
migrations.AlterField(
model_name="bin",
name="name",
field=models.CharField(max_length=255, unique=True),
),
migrations.AlterField(
model_name="label",
name="name",
field=models.CharField(max_length=255, unique=True),
),
]
| 0.78125 | 1 |
bitbots_motion/bitbots_hcm/scripts/test_subscriber.py | MosHumanoid/bitbots_thmos_meta | 3 | 12793515 | <reponame>MosHumanoid/bitbots_thmos_meta
#!/usr/bin/env python3
import rospy
import time
from sensor_msgs.msg import Imu
class SubTest():
def __init__(self):
rospy.init_node("test_sub")
self.arrt = []
self.arrn = []
self.sum =0
self.count=0
self.max = 0
self.sub = rospy.Subscriber("test", Imu, self.cb, queue_size=1)
self.f = open("latencies", 'w')
while not rospy.is_shutdown():
time.sleep(1)
if self.count !=0:
print("mean: " + str((self.sum/self.count)*1000))
print("max: " + str(self.max*1000))
i = 0
for n in self.arrn:
self.f.write(str(n) + "," + str(self.arrt[i]*1000) + "\n")
i+=1
self.f.close()
def cb(self, msg:Imu):
diff = rospy.get_time() - msg.header.stamp.to_sec()
self.arrt.append(diff)
self.arrn.append(msg.header.seq)
self.sum += diff
self.count +=1
self.max = max(self.max, diff)
if __name__ == "__main__":
SubTest()
| 2.015625 | 2 |
manager.py | zhangmingkai4315/Flask-Web-App | 0 | 12793643 | <reponame>zhangmingkai4315/Flask-Web-App
#!/usr/bin/env python
import os
from app import create_app,db
from app.models import User,Role
from flask.ext.script import Manager,Shell
from flask.ext.migrate import Migrate,MigrateCommand
app=create_app(os.getenv('FLASK_CONFIG') or 'default')
manager=Manager(app)
migrate=Migrate(app,db)
manager.add_command('db',MigrateCommand)
if __name__=='__main__':
manager.run()
| 1.226563 | 1 |
tests/fixtures/create_fixtures.py | brightway-lca/bw_default_backend | 0 | 12793771 | <reponame>brightway-lca/bw_default_backend<filename>tests/fixtures/create_fixtures.py<gh_stars>0
import bw_projects as bw
import bw_default_backend as backend
import pytest
@pytest.fixture(scope="function")
def basic_fixture():
NAME = "test-fixtures"
# if NAME in bw.projects:
# bw.projects.delete_project(NAME)
bw.projects.create_project(NAME, add_base_data=True)
biosphere_collection = backend.Collection.create(name="biosphere")
food_collection = backend.Collection.create(name="food")
first = backend.Flow.create(
name="an emission", kind="biosphere", collection=biosphere_collection, unit="kg"
)
second = backend.Flow.create(
name="another emission",
kind="biosphere",
collection=biosphere_collection,
unit="kg",
)
world = backend.Geocollection.get(name="world")
canada = backend.Location.create(geocollection=world, name="Canada")
lunch_flow = backend.Flow.create(
name="lunch food", unit="kg", kind="technosphere", collection=food_collection
)
lunch_activity = backend.Activity.create(
name="eating lunch",
collection=food_collection,
reference_product=lunch_flow,
location=canada,
)
backend.Exchange.create(
activity=lunch_activity, flow=lunch_flow, direction="production", amount=0.5
)
backend.Exchange.create(activity=lunch_activity, flow=first, amount=0.05)
dinner_flow = backend.Flow.create(
name="dinner main dish",
unit="kg",
kind="technosphere",
collection=food_collection,
)
dinner_activity = backend.Activity.create(
name="eating dinner",
collection=food_collection,
reference_product=dinner_flow,
location=canada,
)
backend.Exchange.create(
activity=dinner_activity, flow=dinner_flow, direction="production", amount=0.25
)
backend.Exchange.create(activity=dinner_activity, flow=second, amount=0.15)
method = backend.Method.create(name=("test", "method"))
backend.CharacterizationFactor.create(flow=first, method=method, amount=42)
backend.CharacterizationFactor.create(flow=second, method=method, amount=99)
| 1.382813 | 1 |
house_rocket_analysis/APP/app.py | diogovalentte/data_engineer_portfolio | 8 | 12793899 | # Libraries
from pandas.io.formats.format import DataFrameFormatter
from streamlit_folium import folium_static
import pandas as pd
import numpy as np
import seaborn as sns
import streamlit as st
import sys
#! Add folder "src" as a package path
project_path = "Put/here/the/path/to/the/project's/root/folder/house_rocket_analysis"
sys.path.append(f'{project_path}/src/')
import visualization.maps as maps
#! App configuration
st.set_page_config(layout='wide')
@st.cache(allow_output_mutation=True)
def load_data(path):
data = pd.read_csv(path)
return data
# Pages definition
def sidebar():
st.sidebar.title('Select Page')
page_select = st.sidebar.selectbox( label='', options=['Final Reports', 'Maps'])
return page_select
def page_final_reports(renamed_houses, recommended_houses):
# Filter Recommended Houses to Buy DataFrame
st.sidebar.title('Search for recommended home for purchase')
id_input = str(st.sidebar.text_input(label='Enter the ID')).strip() # Input ID to search house
st.title('House Rocket Analysis')
st.title('')
st.title(f'There are {renamed_houses.shape[0]} properties available for purchase today.')
st.dataframe(renamed_houses)
st.header("Main considerations of the analysis.")
st.markdown('* The variables with the highest positive correlation with Price are "Grade" and "Sqft living".')
st.markdown('* Houses rated 8 or higher in the "Grid" (Quality of the building mateirais of the house) attribute have the best average price per rank and number of homes.')
st.markdown('* The average price of renovated homes is 22% higher than unrenovated homes.')
st.markdown('* The biggest correlation with Price and what can be added in a makeover is the bathroom and the amount of square feet of the house.')
st.markdown('* The best season for re-selling homes is Spring.')
st.header(
"""After these analyses, the recommended houses for House Rocket to buy follow the conditions:
Places with grade of variable "Grid" (Quality of the building mateirais of the house) equal or greater than 8
Houses with condition equal to or greater than 3
Houses priced below the median price in your region (ZipCode)""")
st.header("""The re-sale price of the after-purchased homes is based on the various "Total Avarage Price", which means the average value of the region's house prices (ZipCode) and the average price of the Season that the house was announced.
If the purchase price of the house is higher than the "Total Avarage Price", then the suggested selling price will be the purchase price + 10%.
If the purchase price of the house is less than the "Total Avarage Price", then the suggested selling price will be the purchase price + 30%.""")
st.header("""A column has also been added in the table representing the recommended re-sale price and the profit from re-selling the house if it is renewed.
If the house is renovated, the re-sale price and the after-sale profit will be 20% higher.
""")
st.title(f'After analysis, {recommended_houses.shape[0]} properties are recommended for purchase and re-sale.')
st.subheader('New columns have also been added at the end of the table. They represent the recommended selling price of the houses, whether it has been renovated or not, in addition to the possible profit if sold at the recommended price.')
st.text("")
try:
if not id_input:
st.dataframe(recommended_houses)
else:
if int(id_input) in recommended_houses['ID'].values:
st.dataframe(recommended_houses.loc[recommended_houses['ID'] == int(id_input)])
else:
st.error(
'Property with this ID is not recommended for purchase or there is no home with this ID.')
except:
st.error('ERROR: Input value is not a valid ID.')
#finally:
return None
def page_maps(renamed_houses, recommended_houses):
# SideBar - - -
st.sidebar.title('Filter Map')
filter_data = st.sidebar.radio(label='Filter Houses', options=[
'All houses', 'Recommended homes to buy'])
# Filters - -
if filter_data == 'Recommended homes to buy':
st.title('Map of all recommended homes for purchase')
st.header('')
data = recommended_houses.copy()
else:
st.title('Map of all available houses')
st.header('')
data = renamed_houses.copy()
# Map of density
houses_map = maps.houses_map(data)
folium_static(houses_map, width=1200, height=700)
# Map with avarage price per region (ZipCode)
st.title('Avarage Price per Region')
avg_region = maps.price_per_region(renamed_houses)
folium_static(avg_region, width=1200, height=700)
if __name__ == '__main__':
path = f"{project_path}/data/interim/renamed_data.csv"
renamed_houses = load_data(path)
path = f"{project_path}/reports/data/final_houses_sale.csv"
recommended_houses = load_data(path)
page_select = sidebar()
if page_select == 'Final Reports':
page_final_reports(renamed_houses=renamed_houses, recommended_houses=recommended_houses)
else:
page_maps(renamed_houses=renamed_houses, recommended_houses=recommended_houses)
| 2.125 | 2 |
bpmn/utils/string_utils.py | marcelobbfonseca/SFDjango-BPMN | 1 | 12794027 | <reponame>marcelobbfonseca/SFDjango-BPMN<gh_stars>1-10
from re import sub
def snake_case(s):
return '_'.join(
sub('([A-Z][a-z]+)', r' \1',
sub('([A-Z]+)', r' \1',
s.replace('-', ' '))).split()).lower()
| 2.46875 | 2 |
hero1/command_pb2.py | danielhwkim/Hero1 | 0 | 12794155 | # -*- coding: utf-8 -*-
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: command.proto
"""Generated protocol buffer code."""
from google.protobuf.internal import enum_type_wrapper
from google.protobuf import descriptor as _descriptor
from google.protobuf import descriptor_pool as _descriptor_pool
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
DESCRIPTOR = _descriptor_pool.Default().AddSerializedFile(b'\n\rcommand.proto\x12\tcommander\"U\n\x03\x43md\x12\x1f\n\x03\x63md\x18\x01 \x01(\x0e\x32\x12.commander.CmdType\x12\x0c\n\x04ints\x18\x02 \x03(\x05\x12\x0e\n\x06\x66loats\x18\x03 \x03(\x02\x12\x0f\n\x07strings\x18\x04 \x03(\t*\xe3\x01\n\x07\x43mdType\x12\x0e\n\nRAWKEYDOWN\x10\x00\x12\x0c\n\x08RAWKEYUP\x10\x01\x12\x0c\n\x08\x43OMMAND2\x10\x02\x12\x0c\n\x08\x43OMMAND3\x10\x03\x12\x0c\n\x08\x43OMMAND4\x10\x04\x12\x0c\n\x08\x43OMMAND5\x10\x05\x12\x0c\n\x08\x43OMMAND6\x10\x06\x12\x0c\n\x08\x43OMMAND7\x10\x07\x12\x0c\n\x08\x43OMMAND8\x10\x08\x12\x0c\n\x08\x43OMMAND9\x10\t\x12\x0f\n\x0bMAPORIGINAL\x10\n\x12\x07\n\x03MAP\x10\x0b\x12\x07\n\x03\x41\x43K\x10\x0c\x12\x08\n\x04\x41\x43K2\x10\r\x12\x08\n\x04HERO\x10\x0e\x12\t\n\x05READY\x10\x0f\x12\x08\n\x04INIT\x10\x10\x62\x06proto3')
_CMDTYPE = DESCRIPTOR.enum_types_by_name['CmdType']
CmdType = enum_type_wrapper.EnumTypeWrapper(_CMDTYPE)
RAWKEYDOWN = 0
RAWKEYUP = 1
COMMAND2 = 2
COMMAND3 = 3
COMMAND4 = 4
COMMAND5 = 5
COMMAND6 = 6
COMMAND7 = 7
COMMAND8 = 8
COMMAND9 = 9
MAPORIGINAL = 10
MAP = 11
ACK = 12
ACK2 = 13
HERO = 14
READY = 15
INIT = 16
_CMD = DESCRIPTOR.message_types_by_name['Cmd']
Cmd = _reflection.GeneratedProtocolMessageType('Cmd', (_message.Message,), {
'DESCRIPTOR' : _CMD,
'__module__' : 'command_pb2'
# @@protoc_insertion_point(class_scope:commander.Cmd)
})
_sym_db.RegisterMessage(Cmd)
if _descriptor._USE_C_DESCRIPTORS == False:
DESCRIPTOR._options = None
_CMDTYPE._serialized_start=116
_CMDTYPE._serialized_end=343
_CMD._serialized_start=28
_CMD._serialized_end=113
# @@protoc_insertion_point(module_scope)
| 0.84375 | 1 |
metREx/app/main/util/prometheus_helper.py | vijayragava/metREx | 8 | 12794283 | <filename>metREx/app/main/util/prometheus_helper.py<gh_stars>1-10
import os
import re
from prometheus_client.core import CollectorRegistry
from prometheus_client.multiprocess import MultiProcessCollector
collector_registries = {}
prometheus_multiproc_dir = os.getenv('prometheus_multiproc_dir')
def get_pushgateways(aa, apialchemy_info):
pushgateways = {}
apialchemy_prefix, apialchemy_binds = apialchemy_info
service_name_pattern = re.compile(r'^' + r'(?:' + re.escape(apialchemy_prefix) + r')(?P<name>.+)$', re.X)
api_vendor_pattern = re.compile(r'^(?:(?P<vendor>\w+)(?:\+(?:http|https))?)(?=://)', re.X)
pushgateway_services = list(filter(None, re.split(r'\s*,\s*', os.getenv('PUSHGATEWAY_SERVICES', ''))))
for service in pushgateway_services:
m = service_name_pattern.match(service)
if m is not None:
components = m.groupdict()
service_name = components['name']
if service_name in apialchemy_binds.keys():
conn_str = apialchemy_binds[service_name]
m = api_vendor_pattern.match(conn_str)
if m is not None:
components = m.groupdict()
if components['vendor'] == 'pushgateway':
from ..api.pushgateway import Pushgateway
dal = Pushgateway(aa)
dal.init_aa(service_name)
pushgateways[service] = dal.client
else:
raise ValueError("Service '" + service + "' is not a valid Pushgateway.")
else:
raise ValueError("Service '" + service + "' not found.")
return pushgateways
def get_registry(name):
if name not in collector_registries.keys():
collector_registries[name] = CollectorRegistry()
if prometheus_multiproc_dir is not None:
MultiProcessCollector(collector_registries[name])
return collector_registries[name]
def register_collector(name, collector):
job_registry = get_registry(name)
job_registry.register(collector)
def unregister_collector(name, collector):
if name in collector_registries.keys():
collector_registries[name].unregister(collector)
del collector_registries[name]
| 1.289063 | 1 |
scripts/slim_recommender.py | inpefess/recommender-systems-course | 0 | 12794411 | <filename>scripts/slim_recommender.py
# Copyright 2021-2022 <NAME>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
SLIM recommender
================
https://github.com/KarypisLab/SLIM
https://github.com/MaurizioFD/RecSys2019_DeepLearning_Evaluation
"""
from rs_datasets import MovieLens
from rs_metrics import hitrate
# pylint: disable=import-error
from SLIM import SLIM, SLIMatrix
from rs_course.utils import movielens_split
def slim_recommender(dataset_size: str) -> None:
"""
>>> slim_recommender("small")
Learning takes...
0.55
:param dataset_size: a size of MovieLens dataset to use
"""
train, test, _ = movielens_split(
MovieLens(dataset_size).ratings, 0.95, True
)
trainmat = SLIMatrix(train)
model = SLIM()
model.train({}, trainmat)
model.save_model(modelfname="slim_model.csr", mapfname="slim_map.csr")
testmat = SLIMatrix(train, model)
slim_pred = model.predict(testmat, outfile="slim_recommendations.txt")
pred = {int(k): list(map(int, v)) for k, v in slim_pred.items()}
print(hitrate(test, pred))
| 1.65625 | 2 |
PhotoManagementSystem/PhotoManager/Library/facep.py | 39M/PhotoTheater | 1 | 12794539 | # -*- coding: utf-8 -*-
# vim:fenc=utf-8
import requests
API_KEY = '<KEY>'
API_SECRET = '<KEY>'
API_URL = 'http://apicn.faceplusplus.com'
def detect(path):
data = {
'api_key': API_KEY,
'api_secret': API_SECRET,
}
files = {
'img': open(path, 'rb'),
}
r = requests.post(API_URL + '/detection/detect',
data=data,
files=files)
try:
face_id = r.json()["face"][0]["face_id"]
data = {
'api_key': API_KEY,
'api_secret': API_SECRET,
'face_id': face_id
}
result = requests.post(API_URL + '/detection/landmark',
data=data)
return result.json()
except:
return -1
# detect(u'source.jpg')
| 1.351563 | 1 |
setup.py | nuodb/nuodb-aws-quickstart | 2 | 12794667 | from setuptools import setup
import sys
setup(name='nuodbawsquickstart',
version='1.1.0',
description='Script to deploy a multi-region and multi-instance AWS cluster',
url='http://github.com/nuodb/nuodb-aws-quickstart',
author='<NAME>.',
author_email='<EMAIL>',
#data_files=[('nuodbawsquickstart/templates', ['nuodbawsquickstart/templates/init.py'])],
install_requires=["argparse", "boto", "requests"],
license='BSD licence, see LICENSE',
packages=['nuodbawsquickstart'],
scripts=["bin/nuodb_aws_quickstart.py"],
zip_safe=True)
| 1.101563 | 1 |
src/presence_analyzer/tests.py | stxnext-kindergarten/presence-analyzer-asierhej | 0 | 12794795 | <reponame>stxnext-kindergarten/presence-analyzer-asierhej
# -*- coding: utf-8 -*-
"""
Presence analyzer unit tests.
"""
from __future__ import unicode_literals
import os.path
import json
import datetime
import time
import unittest
from collections import OrderedDict
import main # pylint: disable=relative-import
import utils # pylint: disable=relative-import
import views # pylint: disable=unused-import, relative-import
from .utils import memoize
TEST_DATA_CSV = os.path.join(
os.path.dirname(__file__), '..', '..', 'runtime', 'data', 'test_data.csv'
)
TEST_XML_DATA = os.path.join(
os.path.dirname(__file__), '..', '..', 'runtime', 'data', 'export_test.xml'
)
# pylint: disable=maybe-no-member, too-many-public-methods
class PresenceAnalyzerViewsTestCase(unittest.TestCase):
"""
Views tests.
"""
def setUp(self):
"""
Before each test, set up a environment.
"""
main.app.config.update(
{
'XML_DATA': TEST_XML_DATA,
'DATA_CSV': TEST_DATA_CSV
}
)
self.client = main.app.test_client()
def tearDown(self):
"""
Get rid of unused objects after each test.
"""
pass
def test_mainpage(self):
"""
Test main page render template.
"""
resp = self.client.get('/')
self.assertEqual(resp.status_code, 200)
def test_api_users(self):
"""
Test users listing.
"""
resp = self.client.get('/api/v1/users')
self.assertEqual(resp.status_code, 200)
self.assertEqual(resp.content_type, 'application/json')
data = json.loads(resp.data)
self.assertEqual(
data[0],
{
'user_id': 36,
'name': '<NAME>.',
'avatar': 'https://intranet.stxnext.pl:443/api/images/users/36'
}
)
def test_presence_weekday_view(self):
"""
Test mean presence time of given user grouped by weekday.
"""
resp = self.client.get('/api/v1/presence_weekday/11')
self.assertEqual(resp.status_code, 200)
self.assertEqual(resp.content_type, 'application/json')
data = json.loads(resp.data)
self.assertEqual(
data,
[
['Weekday', 'Presence (s)'],
['Mon', 24123],
['Tue', 41885],
['Wed', 41885],
['Thu', 45968],
['Fri', 30549],
['Sat', 6426],
['Sun', 22969]
]
)
resp = self.client.get('/api/v1/podium/9999')
data = json.loads(resp.data)
self.assertEqual(data, 'no data')
def test_mean_time_weekday_view(self):
"""
Test of mean presence time grouped by weekday of given user.
"""
resp = self.client.get('/api/v1/mean_time_weekday/11')
self.assertEqual(resp.status_code, 200)
self.assertEqual(resp.content_type, 'application/json')
data = json.loads(resp.data)
self.assertEqual(
data,
[
['Mon', 24123.0],
['Tue', 20942.5],
['Wed', 20942.5],
['Thu', 22984.0],
['Fri', 15274.5],
['Sat', 6426.0],
['Sun', 22969.0]
]
)
resp = self.client.get('/api/v1/podium/9999')
data = json.loads(resp.data)
self.assertEqual(data, 'no data')
def test_presence_start_end(self):
"""
Test the medium time to come to the office and medium time of leave.
"""
resp = self.client.get('/api/v1/presence_start_end/10')
self.assertEqual(resp.status_code, 200)
self.assertEqual(resp.content_type, 'application/json')
data = json.loads(resp.data)
self.assertEqual(
data,
[
['Mon', 0, 0],
['Tue', 34745.0, 64792.0],
['Wed', 33592.0, 58057.0],
['Thu', 38926.0, 62631.0],
['Fri', 0, 0],
['Sat', 0, 0],
['Sun', 0, 0]
]
)
resp = self.client.get('/api/v1/podium/9999')
data = json.loads(resp.data)
self.assertEqual(data, 'no data')
def test_podium(self):
"""
Test five best months of work time.
"""
resp = self.client.get('/api/v1/podium/11')
self.assertEqual(resp.status_code, 200)
self.assertEqual(resp.content_type, 'application/json')
data = json.loads(resp.data)
self.assertEqual(
data,
[
['no data', 0],
['no data', 0],
['no data', 0],
['no data', 0],
['no data', 0],
['no data', 0],
['April', 1],
['July', 4],
['May', 6],
['August', 6],
['June', 7],
['September', 32]
]
)
resp = self.client.get('/api/v1/podium/9999')
data = json.loads(resp.data)
self.assertEqual(data, 'no data')
def test_five_top(self):
"""
Test top 5 workers per months in year.
"""
resp = self.client.get('/api/v1/five_top/9,2013')
self.assertEqual(resp.status_code, 200)
self.assertEqual(resp.content_type, 'application/json')
data = json.loads(resp.data)
self.assertEqual(
data,
[
{
'hours': 32,
'user_id': 11,
'name': '<NAME>.',
'avatar':
'https://intranet.stxnext.pl:443/api/images/users/11'
},
{
'hours': 21,
'user_id': 10,
'name': '<NAME>.',
'avatar':
'https://intranet.stxnext.pl:443/api/images/users/10'
}
]
)
class PresenceAnalyzerUtilsTestCase(unittest.TestCase):
"""
Utility functions tests.
"""
def setUp(self):
"""
Before each test, set up a environment.
"""
main.app.config.update(
{
'XML_DATA': TEST_XML_DATA,
'DATA_CSV': TEST_DATA_CSV
}
)
def tearDown(self):
"""
Get rid of unused objects after each test.
"""
pass
def test_get_data(self):
"""
Test parsing of CSV file.
"""
data = utils.get_data()
self.assertIsInstance(data, dict)
self.assertItemsEqual(data.keys(), [10, 11, 68, 49, 176, 141, 26, 62])
sample_date = datetime.date(2013, 9, 10)
self.assertIn(sample_date, data[10])
self.assertItemsEqual(data[10][sample_date].keys(), ['start', 'end'])
self.assertEqual(
data[10][sample_date]['start'],
datetime.time(9, 39, 5)
)
def test_seconds_since_midnight(self):
"""
Test calculation of secounds since midnight.
"""
data = utils.seconds_since_midnight(datetime.time(2, 42, 23))
self.assertEqual(data, 9743)
data = utils.seconds_since_midnight(datetime.time(00, 00, 00))
self.assertEqual(data, 0)
def test_interval(self):
"""
Test calculation of seconds between the time the objects.
"""
start_example = datetime.time(13, 59, 59)
end_example = datetime.time(23, 59, 59)
data = utils.interval(start_example, end_example)
self.assertEqual(36000, data)
data = utils.interval(end_example, start_example)
self.assertEqual(-36000, data)
def test_mean(self):
"""
Test of mean and if empty list returns 0.
"""
data = utils.mean([100, 100, 100])
self.assertEqual(100, data)
data = utils.mean([0.5, 0.2, 0.3, 234])
self.assertEqual(58.75, data)
data = utils.mean([])
self.assertEqual(0, data)
def test_day_start_end(self):
"""
Test start and end work times sorted by weekday.
"""
user = utils.get_data()
data = utils.day_start_end(user[10])
self.assertEqual(
data,
[
['Mon', 0, 0],
['Tue', 34745.0, 64792.0],
['Wed', 33592.0, 58057.0],
['Thu', 38926.0, 62631.0],
['Fri', 0, 0],
['Sat', 0, 0],
['Sun', 0, 0]
])
def test_xml_translator(self):
"""
Test user data from XML file extraction.
"""
data = utils.xml_translator()
self.assertIsInstance(data, dict)
self.assertItemsEqual(data.keys()[:3], [36, 165, 170])
self.assertEqual(
data.values()[0],
{
'name': '<NAME>.',
'avatar': 'https://intranet.stxnext.pl:443/api/images/users/36'
}
)
def test_cache(self):
"""
Test data caching.
"""
@memoize(age_cache=20)
def short_calculation():
data = 2 + 2
data = time.time()
time.sleep(1)
return data
self.assertEqual(short_calculation(), short_calculation())
@memoize(age_cache=1)
def other_calculation():
data = 2 + 3
data = time.time()
time.sleep(2)
return data
self.assertNotEqual(other_calculation(), other_calculation())
def test_podium_result_structure_builder(self):
"""
Test building result for podium template.
"""
months = [
[], [], [], [], [], [], [276890],
[655139], [500730], [233576], [], [], []
]
data = utils.podium_result_structure_builder(months)
self.assertEqual(
data,
[
['no data', 0],
['no data', 0],
['no data', 0],
['no data', 0],
['no data', 0],
['June', 76],
['July', 181],
['August', 139],
['September', 64],
['no data', 0],
['no data', 0],
['no data', 0]
]
)
def test_podium_data_maker(self):
"""
Test groups presence entries as podium data.
"""
data = utils.podium_data_maker(utils.get_data()[11])
self.assertEqual(
data,
[
['no data', 0],
['no data', 0],
['no data', 0],
['no data', 0],
['no data', 0],
['no data', 0],
['April', 1],
['July', 4],
['May', 6],
['August', 6],
['June', 7],
['September', 32]
]
)
def test_group_by_month(self):
"""
Test grouping presence entries by month.
"""
data = utils.group_by_month(utils.get_data(), 2013)
self.assertEqual(
data,
[
{68: [[], [], [], [], [], [], [], [], [], [], [], [], []]},
{
10: [
[], [], [], [], [], [], [], [], [], [78217], [], [], []
]
},
{
11: [
[], [], [], [], [6426], [22969], [25321],
[16564], [24123], [118402], [], [], []
]
},
{141: [[], [], [], [], [], [], [], [], [], [], [], [], []]},
{176: [[], [], [], [], [], [], [], [], [], [], [], [], []]},
{49: [[], [], [], [], [], [], [], [], [], [], [], [], []]},
{26: [[], [], [], [], [], [], [], [], [], [], [], [], []]},
{62: [[], [], [], [], [], [], [], [], [], [], [], [], []]}
]
)
data = utils.group_by_month(utils.get_data(), 2011)
self.assertEqual(
data,
[
{68: [[], [], [], [], [], [], [], [], [], [], [], [], []]},
{10: [[], [], [], [], [], [], [], [], [], [], [], [], []]},
{11: [[], [], [], [], [], [], [], [], [], [], [], [], []]},
{141: [[], [], [], [], [], [], [], [], [], [], [], [], []]},
{176: [[], [], [], [], [], [], [], [], [], [], [], [], []]},
{49: [[], [], [], [], [], [], [], [], [], [], [], [], []]},
{26: [[], [], [], [], [], [], [], [], [], [], [], [], []]},
{62: [[], [], [], [], [], [], [], [], [], [], [], [], []]}
]
)
def test_five_top_workers(self):
"""
Test top 5 presence users with information about them.
"""
data = utils.five_top_workers(9, 1997)
self.assertEqual(data, [])
data = utils.five_top_workers(9, 2013)
self.assertEqual(
data,
[
{
'hours': 32, 'user_id': 11, 'name': '<NAME>.',
'avatar':
'https://intranet.stxnext.pl:443/api/images/users/11'
},
{
'hours': 21, 'user_id': 10, 'name': '<NAME>.',
'avatar':
'https://intranet.stxnext.pl:443/api/images/users/10'
}
]
)
data = utils.five_top_workers(9, 2015)
self.assertEqual(
data,
[
{
'hours': 15, 'user_id': 62, 'name': '<NAME>.',
'avatar':
'https://intranet.stxnext.pl:443/api/images/users/62'
},
{
'hours': 12, 'user_id': 141, 'name': '<NAME>.',
'avatar':
'https://intranet.stxnext.pl:443/api/images/users/141'
},
{
'hours': 11, 'user_id': 176, 'name': '<NAME>.',
'avatar':
'https://intranet.stxnext.pl:443/api/images/users/176'
},
{
'hours': 11, 'user_id': 49, 'name': '<NAME>.',
'avatar':
'https://intranet.stxnext.pl:443/api/images/users/49'
},
{
'hours': 8, 'user_id': 68, 'name': '<NAME>.',
'avatar':
'https://intranet.stxnext.pl:443/api/images/users/68'
}
]
)
def test_five_top_user_data(self):
"""
Test top 5 user data.
"""
dict_months = [
(10, [455386]), (11, [263049]), (12, [371559]),
(13, [394007]), (15, [432795]), (16, [513180]),
(176, [606888]), (19, [434499]), (165, [555037]),
(170, [576346]), (23, [514312]), (24, [235634]),
(141, [612478]), (26, [508050]), (26, [560624]),
(29, [385973]), (30, []), (31, []), (33, [306667]),
(36, [546225]), (48, []), (49, []), (54, []), (58, []),
]
sorted_dict = OrderedDict(
[
(141, [612478]), (176, [606888]), (170, [576346]),
(26, [560624]), (165, [555037]), (36, [546225]),
(23, [514312]), (16, [513180]), (26, [508050]),
(10, [455386]), (19, [434499]), (15, [432795]),
(13, [394007]), (29, [385973]), (12, [371559]),
(33, [306667]), (11, [263049]), (24, [235634]),
(101, [])
]
)
data = utils.five_top_user_data(dict_months, sorted_dict)
self.assertEqual(
data[0],
{
'hours': 170,
'user_id': 141,
'name': '<NAME>.',
'avatar':
'https://intranet.stxnext.pl:443/api/images/users/141'
}
)
sorted_dict = OrderedDict([(141, [612478])])
data = utils.five_top_user_data(dict_months, sorted_dict)
self.assertEqual(data, [])
def test_sorted_months_dict(self):
"""
Test sorting of months dict.
"""
dict_months = [
(10, [455386]), (11, [263049]), (12, [371559]),
(13, [394007]), (15, [432795]), (16, [513180]),
(176, [606888]), (19, [434499]), (165, [555037]),
(170, [576346]), (23, [514312]), (24, [235634]),
(141, [612478]), (26, [508050]), (26, [560624]),
(29, [385973]), (30, []), (31, []), (33, [306667]),
(36, [546225]), (48, []), (49, []), (54, []), (58, [])
]
data = utils.sorted_months_dict(dict_months)
self.assertEqual(
data,
OrderedDict(
[
(141, [612478]), (176, [606888]), (170, [576346]),
(26, [508050]), (165, [555037]), (36, [546225]),
(23, [514312]), (16, [513180]), (10, [455386]),
(19, [434499]), (15, [432795]), (13, [394007]),
(29, [385973]), (12, [371559]), (33, [306667]),
(11, [263049]), (24, [235634]), (30, []), (31, []),
(48, []), (49, []), (54, []), (58, [])
]
)
)
def test_months_sum_dict(self):
"""
Test appending and suming time for every month.
"""
items = {
178:
{
datetime.date(2013, 9, 9):
{
'end': datetime.time(17, 14, 42),
'start': datetime.time(11, 43, 50)
}
},
179:
{
datetime.date(2013, 9, 12):
{
'end': datetime.time(18, 5, 24),
'start': datetime.time(16, 55, 24)
}
}
}
item = datetime.date(2013, 9, 9)
months = [[] for month in xrange(13)]
data = utils.months_sum_dict(2013, items, item, 178, months)
self.assertEqual(
data,
[
[], [], [], [], [], [], [], [], [], [19852], [], [], []
]
)
def test_user_validate(self):
"""
Test checking if user exist.
"""
months_sum = [
[], [], [], [], [], [], [550395], [632015],
[505118], [499105], [486939], [624356], [455386]
]
data = utils.user_validate(months_sum, 34654)
self.assertEqual(data, [])
data = utils.user_validate(months_sum, 141)
self.assertEqual(
data,
{
141: [
[], [], [], [], [], [], [550395], [632015],
[505118], [499105], [486939], [624356], [455386]
]
}
)
def suite():
"""
Default test suite.
"""
base_suite = unittest.TestSuite()
base_suite.addTest(unittest.makeSuite(PresenceAnalyzerViewsTestCase))
base_suite.addTest(unittest.makeSuite(PresenceAnalyzerUtilsTestCase))
return base_suite
if __name__ == '__main__':
unittest.main()
| 1.882813 | 2 |
diccionario/diccionario/dictionary/models.py | ssvargass/en-senas | 1 | 12794923 | # -*- coding: utf-8 -*-
from __future__ import absolute_import
from __future__ import unicode_literals
from django.db import models
from django.utils import timezone
from django.utils.encoding import python_2_unicode_compatible
from pyuploadcare.dj.models import ImageField
from taggit_autosuggest.managers import TaggableManager
@python_2_unicode_compatible
class Word(models.Model):
title = models.CharField(max_length=255)
image = ImageField(blank=True, manual_crop="")
tags = TaggableManager()
created_date = models.DateTimeField(default=timezone.now)
published_date = models.DateTimeField(blank=True, null=True)
def publish(self):
self.published_date = timezone.now()
self.save()
def __str__(self):
return self.title
| 1.234375 | 1 |
eval-ccs2019/benchmark.py | nibau/zkay | 0 | 12795051 | <reponame>nibau/zkay<gh_stars>0
#!/usr/bin/env python3
# usage ./benchmark.py [example_dir]
# (example_dir contains subdirectories with example sol/zkay and scenario files)
# requires installed memory-profiler and zkay packages
import os
import datetime
import sys
import shutil
clean=False
file_dir = os.path.realpath(os.path.dirname(__file__))
base_dir = os.path.join(file_dir, 'examples') if len(sys.argv) < 2 else os.path.realpath(sys.argv[1])
backends = ['dummy', 'ecdh-chaskey', 'ecdh-aes'] #, 'rsa-pkcs1.5', 'rsa-oaep'] # rsa consumes >100 GB hdd space
for backend in backends:
for dirname in os.listdir(base_dir):
p = os.path.join(base_dir, dirname)
if os.path.isdir(p):
file = None
for filename in os.listdir(p):
if filename.endswith(('.sol', '.zkay')):
file = os.path.join(p, filename)
break
if file is not None:
out_dir = os.path.join(p, f'out_{backend}')
if clean and os.path.exists(out_dir):
shutil.rmtree(out_dir)
os.makedirs(out_dir, exist_ok=True)
print(f'compiling {file}, at {datetime.datetime.utcnow()}')
os.system(f"mprof run --include-children --nopython -o '{out_dir}/mprof_compile.dat' zkay compile '{file}' --verbosity 0 --crypto-backend {backend} --opt-hash-threshold 0 -o '{out_dir}' --log --log-dir '{out_dir}'")
scenario_file = os.path.join(p, 'scenario.py')
if os.path.exists(scenario_file):
print(f'running {scenario_file}, at {datetime.datetime.utcnow()}')
os.system(f"mprof run --include-children --nopython -o '{out_dir}/mprof_run.dat' python '{scenario_file}' '{out_dir}'")
| 1.65625 | 2 |
images/views.py | Cyci25/Gallery | 0 | 12795179 | <gh_stars>0
from django.shortcuts import render, redirect
from django.http import HttpResponse, Http404, HttpResponseRedirect
import datetime as dt
# Create your views here.
def welcome(request):
return render(request, 'image.html')
def image(request, id):
try:
image = Image.objects.get(pk = id)
except DoesNotExist:
raise Http404()
return render(request, 'images.html', {"image": image})
def search_results(request):
if 'image' in request.GET and request.GET["image"]:
search_term = request.GET.get("image")
searched_images = image.search_by_title(search_term)
message = f"{search_term}"
return render(request, 'search.html',{"message":message,"picture": searched_images})
else:
message = "You haven't searched for any term"
return render(request, 'search.html',{"message":message})
| 1.4375 | 1 |
pnno/engine/processor.py | zjykzj/pnno | 3 | 12795307 | # -*- coding: utf-8 -*-
"""
@date: 2020/7/14 下午8:34
@file: processor.py
@author: zj
@description:
"""
from ..anno import build_anno
from ..util.logger import setup_logger
class Processor(object):
"""
The labeled data is processed to create training data with specified format
"""
def __init__(self, cfg):
self.parser = build_anno(cfg.ANNO.PARSER, cfg)
self.creator = build_anno(cfg.ANNO.CREATOR, cfg)
self.logger = setup_logger(__name__)
self.verbose = cfg.ANNO.VERBOSE
def process(self):
verbose = self.verbose
logger = self.logger
if verbose:
logger.info('Processing original data')
output_data = self.parser.process()
if verbose:
logger.info('Save data in specified format')
self.creator.save(output_data)
if verbose:
logger.info('Finish!!!')
| 1.703125 | 2 |
smarkets/tests/streaming_api/utils.py | smarkets/smk_python_sdk | 20 | 12795435 | <reponame>smarkets/smk_python_sdk
from __future__ import absolute_import, division, print_function, unicode_literals
from nose.tools import eq_
from smarkets.streaming_api.seto import OrderCreate, Payload, PAYLOAD_ORDER_CREATE
from smarkets.streaming_api.utils import set_payload_message
def test_set_payload_message():
payload = Payload()
assert payload.type != PAYLOAD_ORDER_CREATE
oc = OrderCreate(quantity=123456)
set_payload_message(payload, oc)
eq_(payload.type, PAYLOAD_ORDER_CREATE)
eq_(payload.order_create, oc)
| 1.070313 | 1 |
Python3/Exercises/UnluckyNumbers/unlucky_numbers.py | norbertosanchezdichi/TIL | 0 | 12795563 | for number in range(1, 21):
if number == 4 or number == 13:
state = 'UNLUCKY'
elif number % 2 == 0:
state = 'EVEN'
else:
state = 'ODD'
print(f'{number} is {state}!') | 2.15625 | 2 |
tests/test_gosubdag_relationships_i126.py | flying-sheep/goatools | 477 | 12795691 | #!/usr/bin/env python
"""Test that GoSubDag contains ancestors from only the user-specified relationships"""
# tests/test_gosubdag_relationships_i126.py
# goatools/gosubdag/gosubdag.py
# goatools/gosubdag/godag_rcnt.py
# goatools/gosubdag/godag_rcnt_init.py
# goatools/godag/go_tasks.py
# goatools/obo_parser.py
from __future__ import print_function
__copyright__ = "Copyright (C) 2016-2019, <NAME>, <NAME>, All rights reserved."
from os.path import join
from os import system
import sys
## import timeit
## import datetime
import collections as cx
from goatools.base import get_godag
from goatools.godag.consts import RELATIONSHIP_SET
from goatools.gosubdag.gosubdag import GoSubDag
from goatools.test_data.wr_subobo import WrSubObo
from tests.utils import REPO
# pylint: disable=line-too-long,unused-variable
def test_gosubdag_relationships(wr_new_obo_subset=False):
"""Test that GoSubDag contains ancestors from only the user-specified relationships"""
# Leaf GO: viral triggering of virus induced gene silencing
goid_chosen = 'GO:0060150'
# Load GODag with all relationships
fin_obo = join(REPO, "tests/data/i126/viral_gene_silence.obo") # "go-basic.obo")
godag_r0 = get_godag(fin_obo, loading_bar=None)
godag_r1 = get_godag(fin_obo, loading_bar=None, optional_attrs=['relationship'])
file_sub = join(REPO, "tests/data/viral_gene_silence.obo")
# Get all GO terms above this low-level GO ID using all relationships
if wr_new_obo_subset:
_wr_sub_obo(file_sub, goid_chosen, godag_r1, fin_obo)
# RELATIONSHIPS: None
gosubdag_r0 = GoSubDag(set([goid_chosen]), godag_r0)
assert len(gosubdag_r0.rcntobj.go2ancestors[goid_chosen]) == 12
# RELATIONSHIPS: ALL
gosubdag_r1 = GoSubDag(set([goid_chosen]), godag_r1, relationships=True)
assert gosubdag_r1.relationships == RELATIONSHIP_SET
#### set(['part_of', 'regulates', 'positively_regulates', 'negatively_regulates'])
assert len(gosubdag_r1.rcntobj.go2ancestors[goid_chosen]) == 50
# RELATIONSHIPS: part_of
gosubdag_rp = GoSubDag(set([goid_chosen]), godag_r1, relationships={'part_of'})
assert gosubdag_rp.relationships == set(['part_of'])
rp_par = gosubdag_rp.rcntobj.go2ancestors[goid_chosen]
assert 'GO:0016441' not in gosubdag_rp.go2obj, '**FATAL: REGULATION TERM GoSubDag(part_of) go2obj'
assert 'GO:0016441' not in rp_par, '**FATAL: REGULATION TERM GoSubDag(part_of) go2parents'
# RELATIONSHIPS: regulates
gosubdag_rr = GoSubDag(set([goid_chosen]), godag_r1, relationships={'regulates'})
assert gosubdag_rr.relationships == set(['regulates'])
rp_par = gosubdag_rr.rcntobj.go2ancestors[goid_chosen]
# assert 'GO:0016441' not in gosubdag_rp.go2obj, '**FATAL: REGULATION TERM GoSubDag(part_of) go2obj'
# assert 'GO:0016441' not in rp_par, '**FATAL: REGULATION TERM GoSubDag(part_of) go2parents'
# RELATIONSHIPS: positively_regulates
gosubdag_rp = GoSubDag(set([goid_chosen]), godag_r1, relationships={'positively_regulates'})
assert gosubdag_rp.relationships == set(['positively_regulates'])
rp_par = gosubdag_rp.rcntobj.go2ancestors[goid_chosen]
# RELATIONSHIPS: negatively_regulates
gosubdag_rn = GoSubDag(set([goid_chosen]), godag_r1, relationships={'negatively_regulates'})
assert gosubdag_rn.relationships == set(['negatively_regulates'])
rp_par = gosubdag_rn.rcntobj.go2ancestors[goid_chosen]
# RELATIONSHIPS: regulates positively_regulates negatively_regulates
regs = {'positively_regulates', 'negatively_regulates'}
gosubdag_rnp = GoSubDag(set([goid_chosen]), godag_r1, relationships=regs)
assert gosubdag_rnp.relationships == regs
rp_par = gosubdag_rnp.rcntobj.go2ancestors[goid_chosen]
_run_baseline_r0(gosubdag_r0, gosubdag_r1)
# BASELINE r1: Test that GOTerm.get_all_upper() is the same as GoSubDag ancestors
for goid, term in gosubdag_r1.go2obj.items():
ancestors_r1 = gosubdag_r1.rcntobj.go2ancestors.get(goid, set())
assert ancestors_r1 == term.get_all_upper()
#### # Test that
#### gosubdag_rp = GoSubDag(set([goid_chosen]), godag_r1, relationships={'part_of'}, prt=sys.stdout)
#### for goid, dag_term in godag_r1.items():
#### if goid in gosubdag_r1.rcntobj.go2ancestors:
#### ancestors = gosubdag_rp.rcntobj.go2ancestors[goid]
#### sub_term = gosubdag_rp.go2obj[goid]
#### reldict = sub_term.relationship.items()
#### # print(goid)
#### # print('DAG', sorted(dag_term.get_all_upper()))
#### # print('SUB', sorted(sub_term.get_all_upper()))
#### # print('ANS', sorted(ancestors))
#### # for rel, pterms in cx.OrderedDict(reldict).items():
#### # print(rel, ' '.join(sorted(o.id for o in pterms)))
#### # print('')
#### print(gosubdag_rp.relationships)
#### #assert 'GO:0016441' not in gosubdag_rp.rcntobj.go2ancestors['GO:0060150']
#### assert 'GO:0016441' in gosubdag_r1.go2nt
#### assert 'GO:0010467' in gosubdag_r1.go2nt
def _run_baseline_r0(gosubdag_r0, gosubdag_r1):
"""BASELINE r0: Test that GOTerm.get_all_parents() == GoSubDag ancestors"""
r1_ancestors_more = set()
# Loop through r0 GO IDs
for goid, term in gosubdag_r0.go2obj.items():
ancestors_r0 = gosubdag_r0.rcntobj.go2ancestors.get(goid, set())
ancestors_r1 = gosubdag_r1.rcntobj.go2ancestors.get(goid, set())
assert ancestors_r0 == term.get_all_parents()
assert ancestors_r0.issubset(ancestors_r1)
if len(ancestors_r0) < len(ancestors_r1):
r1_ancestors_more.add(goid)
assert r1_ancestors_more
print('{N} r1 GO terms in GoSubDag have more ancestors than r0'.format(
N=len(r1_ancestors_more)))
# scripts/go_plot.py --go_file=i126_goids_baseline.txt -r --obo=tests/data/viral_gene_silence.obo -o i126_goids_baseline.png
fout_gos = 'i126_goids_baseline.txt'
with open(fout_gos, 'w') as prt:
prt.write('#cafffb {SRC_GO}\n'.format(SRC_GO=next(iter(gosubdag_r0.go_sources))))
_prt_goterms(r1_ancestors_more, gosubdag_r1.go2nt, prt)
print(' WROTE: {GOs}'.format(GOs=fout_gos))
def _prt_goterms(goids, go2nt, prt):
"""Print details of GO terms"""
fmt = ('#ffd1df {GO} # {NS} {dcnt:5} {childcnt:3} '
'L{level:02} D{depth:02} R{reldepth:02} {D1:5} {REL} {rel} {GO_name}\n')
nts = [nt for go, nt in go2nt.items() if go in goids]
for ntd in sorted(nts, key=lambda nt: nt.dcnt, reverse=True):
prt.write(fmt.format(**ntd._asdict()))
#cafffb GO:0060150
#ffd1df GO:0050794 # BP 8278 64 D03 R03 regulation of cellular process
#ffd1df GO:0019222 # BP 3382 20 D03 R03 regulation of metabolic process
#ffd1df GO:0048522 # BP 2417 65 D04 R04 positive regulation of cellular process
#ffd1df GO:0060255 # BP 2130 20 D04 R04 regulation of macromolecule metabolic process
#ffd1df GO:0010468 # BP 862 20 D05 R05 regulation of gene expression
#ffd1df GO:0060968 # BP 53 4 D06 R08 regulation of gene silencing
#ffd1df GO:0060147 # BP 24 4 D07 R09 regulation of posttranscriptional gene silencing
#ffd1df GO:0060148 # BP 8 3 D08 R10 positive regulation of posttranscriptional gene silencing
#ffd1df GO:0060150 # BP 0 0 D09 R11 viral triggering of virus induced gene silencing
# - Generate GO DAG subset for this test ---------------------------------------------------------
def _wr_sub_obo(fout_obo, goid_chosen, godag_r1, fin_obo):
"""Sub plot used for visualizing this test file's elements"""
# Load GO-DAG: Load optional 'relationship'
godag = {go:o for go, o in godag_r1.items() if go == o.item_id}
_prt_rtel_ctr(godag)
rels_all = set(['part_of', 'regulates', 'negatively_regulates', 'positively_regulates'])
goids_leaf_all = set(o.id for o in godag.values() if not o.children)
gosubdag_r1 = GoSubDag(goids_leaf_all, godag, relationships=True, prt=sys.stdout)
goids_src_r1_all = _get_leafs_w_relsinhier(rels_all, gosubdag_r1)
gosubdag_r1.prt_goids(goids_src_r1_all)
# Pick one of the GO IDs as a source for the subset DAG
gosubdag_viral = GoSubDag({goid_chosen}, godag, relationships=True, prt=sys.stdout)
goids_viral = set(gosubdag_viral.go2obj.keys())
with open(fout_obo, 'w') as prt:
WrSubObo.prt_goterms(fin_obo, goids_viral, prt)
print('{N} GO IDs WROTE: {OBO}'.format(N=len(goids_viral), OBO=fout_obo))
# Plot obo subset
pat_r1 = '{REPO}/scripts/go_plot.py {GO} -o {PNG} -r'
pat_r0 = '{REPO}/scripts/go_plot.py {GO} -o {PNG}'
system(pat_r1.format(REPO=REPO, PNG=fout_obo.replace('.obo', '_r1.png'), GO=goid_chosen))
system(pat_r0.format(REPO=REPO, PNG=fout_obo.replace('.obo', '_r0.png'), GO=goid_chosen))
def _get_leafs_w_relsinhier(rels_usr, gosubdag_r1):
"""Get GO IDs that have all relationships up their hierarchy."""
gos_r1_relsinhier = set()
goids_leaf = set(o.id for o in gosubdag_r1.go2obj.values() if not o.children)
for goid in goids_leaf:
go_parents = gosubdag_r1.rcntobj.go2ancestors[goid]
rels = set(k for p in go_parents for k in gosubdag_r1.go2obj[p].relationship.keys())
if rels == rels_usr:
gos_r1_relsinhier.add(goid)
return gos_r1_relsinhier
def _prt_rtel_ctr(godag):
"""Print the count of relationships."""
objs_r1_all = set(o for o in godag.values() if o.relationship.keys())
octr = cx.Counter(k for o in objs_r1_all for k in o.relationship.keys())
# objs_r1_sub = set(o.id for o in objs_r1_all if not rels_all.isdisjoint(o.relationship.keys()))
print('{N:6,} GO Terms have relationships.'.format(N=len(objs_r1_all)))
for key, cnt in octr.most_common():
print('{N:6,} {REL}'.format(N=cnt, REL=key))
# def _chk_child_parent(go2o_dag, go2o_sub):
# """Check the differences between the two go2obb dicts."""
# pass
if __name__ == '__main__':
test_gosubdag_relationships(len(sys.argv) != 1)
# Copyright (C) 2016-2019, <NAME>, <NAME>, All rights reserved.
| 1.328125 | 1 |
lib/googlecloudsdk/command_lib/resource_manager/tag_arguments.py | google-cloud-sdk-unofficial/google-cloud-sdk | 2 | 12795819 | # -*- coding: utf-8 -*- #
# Copyright 2019 Google LLC. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Utilities for defining CRM Tag arguments on a parser."""
from __future__ import absolute_import
from __future__ import division
from __future__ import unicode_literals
from googlecloudsdk.calliope import arg_parsers
from googlecloudsdk.calliope import base
def AddShortNameArgToParser(parser):
"""Adds positional argument to parser.
Args:
parser: ArgumentInterceptor, an argparse parser.
"""
parser.add_argument(
"short_name",
metavar="SHORT_NAME",
help=("User specified, friendly name of the TagKey or TagValue. The field"
" must be 1-63 characters, beginning and ending with an "
"alphanumeric character ([a-z0-9A-Z]) with dashes (-), "
"underscores ( _ ), dots (.), and alphanumerics between. "))
def AddParentArgToParser(parser, required=True, message=""):
"""Adds argument for the TagKey or TagValue's parent to the parser.
Args:
parser: ArgumentInterceptor, An argparse parser.
required: Boolean, to enforce --parent as a required flag.
message: String, replacement help text for flag.
"""
parser.add_argument(
"--parent",
metavar="PARENT",
required=required,
help=message if message else ("Parent of the resource."))
def AddDescriptionArgToParser(parser):
"""Adds argument for the TagKey's or TagValue's description to the parser.
Args:
parser: ArgumentInterceptor, An argparse parser.
"""
parser.add_argument(
"--description",
metavar="DESCRIPTION",
help=("User-assigned description of the TagKey or TagValue. "
"Must not exceed 256 characters."))
def AddPurposeArgToParser(parser):
"""Adds argument for the TagKey's purpose to the parser.
Args:
parser: ArgumentInterceptor, An argparse parser.
"""
parser.add_argument(
"--purpose",
metavar="PURPOSE",
choices=["GCE_FIREWALL"],
help=("Purpose specifier of the TagKey that can only be set on creation. "
"Specifying this field adds additional validation from the policy "
"system that corresponds to the purpose."))
def AddPurposeDataArgToParser(parser):
"""Adds argument for the TagKey's purpose data to the parser.
Args:
parser: ArgumentInterceptor, An argparse parser.
"""
parser.add_argument(
"--purpose-data",
type=arg_parsers.ArgDict(
spec={"network": str},
max_length=1,
),
help=("Purpose data of the TagKey that can only be set on creation. "
"This data is validated by the policy system that corresponds"
" to the purpose."))
def AddAsyncArgToParser(parser):
"""Adds async flag to the parser.
Args:
parser: ArgumentInterceptor, An argparse parser.
"""
base.ASYNC_FLAG.AddToParser(parser)
def AddResourceNameArgToParser(parser):
"""Adds resource name argument for the namespaced name or resource name to the parser.
Args:
parser: ArgumentInterceptor, An argparse parser.
"""
parser.add_argument(
"RESOURCE_NAME",
metavar="RESOURCE_NAME",
help=("Resource name or namespaced name. The resource name should "
"be in the form {resource_type}/{numeric_id}. The namespaced name "
"should be in the form {org_id}/{short_name} where short_name "
"must be 1-63 characters, beginning and ending with an "
"alphanumeric character ([a-z0-9A-Z]) with dashes (-), underscores "
"( _ ), dots (.), and alphanumerics between."))
def AddForceArgToParser(parser):
"""Adds force argument to the parser.
Args:
parser: ArgumentInterceptor, An argparse parser.
"""
parser.add_argument(
"--force", action="store_true", help=("Force argument to bypass checks."))
def AddPolicyFileArgToParser(parser):
"""Adds argument for the local Policy file to set.
Args:
parser: ArgumentInterceptor, An argparse parser.
"""
parser.add_argument(
"POLICY_FILE",
metavar="POLICY_FILE",
help=(
"Path to a local JSON or YAML formatted file containing a valid "
"policy. The output of the `get-iam-policy` command is a valid "
"file, as is any JSON or YAML file conforming to the structure of "
"a [Policy](https://cloud.google.com/iam/reference/rest/v1/Policy)."))
def AddTagValueArgToParser(parser):
"""Adds the TagValue argument to the parser.
Args:
parser: ArgumentInterceptor, An argparse parser.
"""
parser.add_argument(
"--tag-value",
metavar="TAG_VALUE",
required=True,
help=("Tag value name or namespaced name. The name should "
"be in the form tagValues/{numeric_id}. The namespaced name "
"should be in the form {org_id}/{tag_key_short_name}/{short_name} "
"where short_name must be 1-63 characters, beginning and ending "
"with an alphanumeric character ([a-z0-9A-Z]) with dashes (-), "
"underscores (_), dots (.), and alphanumerics between."))
def AddLocationArgToParser(parser, message):
"""Adds argument for the location.
Args:
parser: ArgumentInterceptor, An argparse parser.
message: String, help text for flag.
"""
parser.add_argument(
"--location", metavar="LOCATION", required=False, help=message)
| 1.304688 | 1 |
day_3_1.py | Nishant-Mishra/Advent_of_Code_2019 | 1 | 12795947 | #!/usr/bin/python3 -u
import sys
def puzzle(filename):
with open(filename, "r") as f:
path1 = f.readline()
path2 = f.readline()
path1_list_str = path1.strip("\n").split(",")
path2_list_str = path2.strip("\n").split(",")
# print(path1_list)
# print(path2_list)
# Get relative coords of path
path1_list = []
path2_list = []
for i in range(0, len(path1_list_str)):
path1_list.append(get_coord(path1_list_str[i]))
for i in range(0, len(path2_list_str)):
path2_list.append(get_coord(path2_list_str[i]))
# print(path1_list)
# print(path2_list)
# Get absolute coords of line segments
path1 = {"complete": [(0, 0)]}
for i in range(0, len(path1_list)):
if i:
path1["complete"].insert(i + 1, (path1["complete"][i][0] + path1_list[i][0], path1["complete"][i][1] + path1_list[i][1]))
else:
path1["complete"].insert(1, path1_list[0])
path2 = {"complete": [(0, 0)]}
for i in range(0, len(path2_list)):
if i:
path2["complete"].insert(i + 1, (path2["complete"][i][0] + path2_list[i][0], path2["complete"][i][1] + path2_list[i][1]))
else:
path2["complete"].insert(1, path2_list[0])
# Segregate vertical and horizontal lines
path1["vertical"] = []
path1["horizontal"] = []
for i in range(1, len(path1["complete"])):
# 'x' coord is same
if path1["complete"][i - 1][0] == path1["complete"][i][0]:
path1["vertical"].append((path1["complete"][i - 1], path1["complete"][i]))
elif path1["complete"][i - 1][1] == path1["complete"][i][1]:
path1["horizontal"].append((path1["complete"][i - 1], path1["complete"][i]))
path2["vertical"] = []
path2["horizontal"] = []
for i in range(1, len(path2["complete"])):
# 'x' coord is same
if path2["complete"][i - 1][0] == path2["complete"][i][0]:
path2["vertical"].append((path2["complete"][i - 1], path2["complete"][i]))
elif path2["complete"][i - 1][1] == path2["complete"][i][1]:
path2["horizontal"].append((path2["complete"][i - 1], path2["complete"][i]))
# print("%s\n" % path1["horizontal"])
# print("%s\n" % path1["vertical"])
# print("%s\n" % path2["horizontal"])
# print("%s\n" % path2["vertical"])
intersection_points_list = []
# Check if horizontal line of one path intersects with vertical line of other abd vice-versa
for h_seg in path1["horizontal"]:
for v_seg in path2["vertical"]:
intersection_point = check_intersection(h_seg, v_seg)
if intersection_point:
intersection_points_list.append(intersection_point)
for h_seg in path2["horizontal"]:
for v_seg in path1["vertical"]:
intersection_point = check_intersection(h_seg, v_seg)
if intersection_point:
intersection_points_list.append(intersection_point)
print(intersection_points_list)
dist = abs(intersection_points_list[0][0]) + abs(intersection_points_list[0][1])
for point in intersection_points_list[1:]:
if dist > (abs(point[0]) + abs(point[1])):
dist = abs(point[0]) + abs(point[1])
print("Shortest Dist: %d" % dist)
def get_coord(cmd):
if cmd[0] == "R":
return (int(cmd[1:]), 0)
elif cmd[0] == "L":
return (-int(cmd[1:]), 0)
elif cmd[0] == "U":
return (0, int(cmd[1:]))
elif cmd[0] == "D":
return (0, -int(cmd[1:]))
def check_intersection(horiz, vert):
x = vert[0][0]
y1 = vert[0][1]
y2 = vert[1][1]
w = horiz[0][1]
z1 = horiz[0][0]
z2 = horiz[1][0]
to_return = None
if (z1 < z2 and y1 < y2 and z1 <= x <= z2 and y1 <= w <= y2) or\
(z1 > z2 and y1 < y2 and z1 >= x >= z2 and y1 <= w <= y2) or\
(z1 < z2 and y1 > y2 and z1 <= x <= z2 and y1 >= w >= y2) or\
(z1 > z2 and y1 > y2 and z1 >= x >= z2 and y1 >= w >= y2) :
to_return = (x, w)
# if to_return:
print("<< %s :: %s >> == %s" % (horiz, vert, (x,w)))
return to_return
def get_all_points_on_path(c1, c2):
coord_list = []
if c1[0] == c2[0]:
if c1[1] < c2[1]:
for i in range(c1[1], c2[1] + 1):
coord_list.append((c1[0], i))
else:
for i in range(c2[1], c1[1] + 1):
coord_list.append((c1[0], i))
# coord_list.reverse()
elif c1[1] == c2[1]:
if c1[0] < c2[0]:
for i in range(c1[0], c2[0] + 1):
coord_list.append((i, c1[1]))
else:
for i in range(c2[0], c1[0] + 1):
coord_list.append((i, c1[1]))
# coord_list.reverse()
return coord_list
def main():
puzzle("input_day_3_1.txt")
# test()
def test():
p = get_all_points_on_path((123, 67), (123, 15))
p = get_all_points_on_path((-123, 67), (123, 67))
print(p)
if __name__ == "__main__":
main()
| 2.71875 | 3 |
utils/utils.py | toandaominh1997/ProductDetectionShopee | 0 | 12796075 | import torch
def get_state_dict(model):
if type(model) == torch.nn.DataParallel:
state_dict = model.module.state_dict()
else:
state_dict = model.state_dict()
return state_dict
| 1.375 | 1 |
Informatik1/Midterms Prep/midterms hs19/count_keywords.py | Queentaker/uzh | 8 | 12796203 | <reponame>Queentaker/uzh<filename>Informatik1/Midterms Prep/midterms hs19/count_keywords.py
def count_keywords(path, keywords):
words = []
sol = dict()
with open(path) as file:
for line in file:
for word in line.split():
words.append(word.lower())
for element in words:
if element in keywords:
if element in sol:
sol[element] += 1
else: sol[element] = 1
else:
continue
return sol
print(count_keywords("/Users/merterol/Desktop/uzhpython/uzh/Informatik1/MidtermsInfk/midterms hs19/text.txt", ["forest", "the", "found"]))
print(count_keywords("/Users/merterol/Desktop/uzhpython/uzh/Informatik1/MidtermsInfk/midterms hs19/text.txt", ["black"]))
print(count_keywords("/Users/merterol/Desktop/uzhpython/uzh/Informatik1/MidtermsInfk/midterms hs19/text.txt", [])) | 2.453125 | 2 |
src/stock_logger.py | SteveZhengMe/TK-Gui-Assignment | 0 | 12796331 | <gh_stars>0
# This project is a assignment of College.
# Purpose: Practice TK and database connection
# Usage: The user can add a stock record to Sqllite database, and one can search and list the records
#
# Author: <NAME>
# Date: 2021-03-17
import tkinter as tk
from tkinter import ttk, messagebox, filedialog
from functools import partial
from datetime import datetime
import os
import sqlite3
from sqlite3 import Error
##
# Validate the input
class Validator:
# Return error message if the input is not a number (float)
# Return None if the input is valid
def isNumber(self,input):
errMsg = "Please input a number."
try:
if input=='NaN':
return errMsg
float(input)
except ValueError:
return errMsg
else:
return None
# Return error message if the input is blank
# Return None if the input is valid
def isEmpty(self, input):
errMsg = "Value required"
if input != "":
return None
else:
return errMsg
# Return error message if the input is not in a "yyyy-MM-dd" format
# Return None if the input is valid
def isDate(self, input):
errMsg = "Please input a date in yyyy-MM-dd format."
try:
datetime.strptime(input, "%Y-%m-%d")
except ValueError:
return errMsg
else:
return None
##
# One label and one combobox
class LabelDDCombo(tk.Frame):
def __init__(self, parent, labelName="Label Name", entryState="normal", packSide="left", size=(0,0), margin=(0,0),ddItems=[],*args,**kw):
super().__init__(master=parent, *args, **kw)
# Create label and pack
self.label = tk.Label(self, text=labelName, font=("Courier",9), fg="#333", anchor="e")
if size[0] != None:
self.label.config(width=size[0])
self.label.pack(side=packSide, padx=(margin[0],0), pady=margin[1])
# Create input and pack
self.inputValue = tk.StringVar()
self.input = ttk.Combobox(self, textvariable = self.inputValue, state=entryState, values=ddItems)
self.input.current(0)
if size[1] != None:
self.input.config(width=size[1])
self.input.pack(side=packSide, padx=(0,margin[0]), pady=margin[1])
# When the value is invalidate, this handler will display error message.
# The handler should be .config(text=XXX)
def setInputErrorHandler(self, handler):
self.errHandler = handler
# The validator. It will call validator class and loop
def validator(self):
validator = Validator()
for valRules in self.validatorArray:
#eval()
validationErrors = validator.isEmpty(self.getDDValue())
if validationErrors != None:
# Output the error message to "error handler"
self.errHandler.config(text=self.label["text"] + " - " + validationErrors)
return False
return True
# When focus, focus the input box
def focus(self):
self.input.focus()
# Return the input value
def getDDValue(self):
return self.inputValue.get()
def setValue(self, valueIndex):
self.input.current(valueIndex)
##
# One label and one input box
class LabelInputCombo(tk.Frame):
def __init__(self, parent, labelName="Label Name", entryState="normal", packSide="left", size=(0,0), margin=(0,0),validateArray=[],*args,**kw):
super().__init__(master=parent, *args, **kw)
# validateArray = ["isNumber", "isEmpty"], means the value needs two validation
self.validatorArray = validateArray
# Create label and pack
self.label = tk.Label(self, text=labelName, font=("Courier",9), fg="#333", anchor="e")
if size[0] != None:
self.label.config(width=size[0])
self.label.pack(side=packSide, padx=(margin[0],0), pady=margin[1])
# Create input and pack
self.inputValue = tk.StringVar()
self.input = tk.Entry(self, textvariable=self.inputValue, state=entryState)
if size[1] != None:
self.input.config(width=size[1])
self.input.pack(side=packSide, padx=(0,margin[0]), pady=margin[1])
# When the value is invalidate, this handler will display error message.
# The handler should be .config(text=XXX)
def setInputErrorHandler(self, handler):
self.errHandler = handler
# The validator. It will call validator class and loop
def validator(self):
self.errHandler.config(text="No Error")
validator = Validator()
for valRules in self.validatorArray:
#eval()
validationErrors = eval("validator." + valRules + "('" + self.inputValue.get() + "')")
if validationErrors != None:
# Output the error message to "error handler"
self.errHandler.config(text=self.label["text"] + " - " + validationErrors)
self.input.delete(0,"end")
return False
return True
# When focus, focus the input box
def focus(self):
self.input.focus()
# Return the input value
def getInputValue(self):
return self.inputValue.get()
def setValue(self, value):
if self.input["state"].lower() == "disabled":
self.input.config(state="normal")
self.input.delete(0,"end")
self.input.insert(0,value)
self.input.config(state="disabled")
self.input.delete(0,"end")
self.input.insert(0,value)
# Table view
class TreeViewWithScrollBar(tk.Frame):
def __init__(self, parent, columnsAttr, tableRows=5, *args,**kw):
super().__init__(master=parent, *args, **kw)
columns = list(item["colName"] for item in columnsAttr)
self.treeview = ttk.Treeview(self, height=tableRows, show="headings", columns=columns)
for aColumn in columnsAttr:
self.treeview.column(aColumn["colName"], width=aColumn["width"], anchor=aColumn["anchor"])
self.treeview.heading(aColumn["colName"], text=aColumn["colName"])
treeScroll = ttk.Scrollbar(self, orient="vertical",command=self.treeview.yview)
self.treeview.grid(row=0,column=0)
treeScroll.grid(row=0,column=1,sticky="NSEW")
self.treeview.configure(yscrollcommand=treeScroll.set)
def addValues(self,valueArray):
self.treeview.insert('','end',values=valueArray)
def clearAll(self):
self.treeview.delete(*self.treeview.get_children())
def setValues(self, tupleArray):
if tupleArray is not None:
self.clearAll()
for row in tupleArray[0]:
self.addValues(row)
def getRecordsCount(self):
return len(self.treeview.get_children())
##
# A layout to group some elements.
# Support two layouts:
# Use "h" to pack horizontally
# Use "v" to pack vertically
class LayoutFrame(tk.Frame):
def __init__(self, parent, *args, **kw):
super().__init__(master=parent, *args, **kw)
def layout(self, layout, *items):
if items != None:
for item in items:
if layout == "v":
item.pack(side='top', pady=5)
else:
item.pack(side='left', padx=5)
return self
############################# Above are the widgets; Below are the UI design ###########################
##
# "Activity Display" contains two buttons on the top: Summary and Activities
class ActivityDisplayWindow(tk.Frame):
summaryFrame = None
activitiesDataTableFrame = None
dbName = "stocks.db"
def __init__(self,parent):
self.parent = parent
self.parent.resizable(False, False)
self.windowSelfConfig()
self.createWidgets()
def windowSelfConfig(self):
self.parent.geometry('400x600+20+20')
self.parent.title("Activities Display")
self.parent.protocol("WM_DELETE_WINDOW", self.onClose)
def onClose(self):
if messagebox.askokcancel("Quit", "Do you want to quit both two windows?"):
self.parent.destroy()
def createWidgets(self):
# self.parent.rowconfigure(0,weight=1)
self.parent.columnconfigure(0,weight=1)
topButtonsArea = LayoutFrame(self.parent)
self.summaryButton = tk.Button(topButtonsArea, text="Summary", command=partial(self.switchButtonOnClick,"summary"))
self.activitiesButton = tk.Button(topButtonsArea, text="Activities", command=partial(self.switchButtonOnClick,"Activities"))
topButtonsArea.layout("h",self.summaryButton, self.activitiesButton).grid(row=0,column=0,pady=10)
self.buildSummaryPage()
def buildSummaryPage(self):
if self.summaryFrame is None:
self.summaryFrame = LayoutFrame(self.parent)
self.uniqueStockSymbols = tk.StringVar()
self.oldestTransactionSummary = LabelInputCombo(self.summaryFrame, labelName="Oldest Transaction:", entryState="disabled", size=(22,22), margin=(2,2))
self.newestTransactionSummary = LabelInputCombo(self.summaryFrame, labelName="Newest Transaction:", entryState="disabled", size=(22,22), margin=(2,2))
self.cheapestPriceSymmary = LabelInputCombo(self.summaryFrame, labelName="Cheapest Price:", entryState="disabled", size=(22,22), margin=(2,2))
self.mostExpensivePriceSummary = LabelInputCombo(self.summaryFrame, labelName="Most Expensive Price:", entryState="disabled", size=(22,22), margin=(2,2))
self.mostTradedStockSummary = LabelInputCombo(self.summaryFrame, labelName="Most Traded Stock:", entryState="disabled", size=(22,22), margin=(2,2))
self.summaryFrame.layout("v",
tk.Label(self.summaryFrame, text="", font=("Arial", 14), anchor="w"),
tk.Label(self.summaryFrame, text="Unique Stock Symbols", font=("Arial", 14), anchor="w"),
tk.Listbox(self.summaryFrame, listvariable=self.uniqueStockSymbols),
tk.Label(self.summaryFrame, text="", font=("Arial", 14), anchor="w"),
tk.Label(self.summaryFrame, text="Summary", font=("Arial", 14), anchor="w"),
self.oldestTransactionSummary,
self.newestTransactionSummary,
self.cheapestPriceSymmary,
self.mostExpensivePriceSummary,
self.mostTradedStockSummary
)
self.summaryFrame.grid(row=1,column=0)
self.updateInfo()
def buildActivitiesPage(self):
if self.activitiesDataTableFrame is None:
self.activitiesDataTableFrame = TreeViewWithScrollBar(self.parent,[
{"colName":"ID","width":10,"anchor":"center"},
{"colName":"Date","width":100,"anchor":"center"},
{"colName":"Symbol","width":80,"anchor":"center"},
{"colName":"Transation","width":70,"anchor":"center"},
{"colName":"Quantity","width":70,"anchor":"center"},
{"colName":"Price$","width":60,"anchor":"center"}],tableRows=26)
self.activitiesDataTableFrame.grid(row=1,column=0)
self.updateInfo()
# Update the data from DB
def updateInfo(self):
dataController = DataController(self.dbName)
if self.summaryFrame is not None:
summaryResults = dataController.getSummaryInfo()
if summaryResults is not None:
tradeSymbols = summaryResults[0]
self.uniqueStockSymbols.set([x[0] for x in tradeSymbols])
OldestTrade = summaryResults[1][0]
self.oldestTransactionSummary.setValue("%s %s %s" % (OldestTrade[1],OldestTrade[3],OldestTrade[2]))
newestTrade = summaryResults[2][0]
self.newestTransactionSummary.setValue("%s %s %s" % (newestTrade[1],newestTrade[3],newestTrade[2]))
cheapestTrade = summaryResults[3][0]
self.cheapestPriceSymmary.setValue("$%0.2f %s %s" % (cheapestTrade[5],cheapestTrade[3],cheapestTrade[2]))
expensiveTrade = summaryResults[4][0]
self.mostExpensivePriceSummary.setValue("$%0.2f %s %s" % (expensiveTrade[5],expensiveTrade[3],expensiveTrade[2]))
mostTrade = summaryResults[5][0]
self.mostTradedStockSummary.setValue("%s (%d Transactions)" % (mostTrade[1],mostTrade[0]))
if self.activitiesDataTableFrame is not None:
self.activitiesDataTableFrame.setValues(dataController.listTransactions())
def switchButtonOnClick(self, activity):
if activity.lower() == "summary":
if self.activitiesDataTableFrame is not None:
self.activitiesDataTableFrame.grid_forget()
self.buildSummaryPage()
elif activity.lower() == "activities":
if self.summaryFrame is not None:
self.summaryFrame.grid_forget()
self.buildActivitiesPage()
##
# "Activity Display" contains two buttons on the top: Summary and Activities
class ActivityEntryWindow(tk.Frame):
# will be overwritten in class constructor
dbName = "stocks_test.db"
def __init__(self,parent, parentWindowClass):
self.parent = parent
self.parentClass = parentWindowClass
self.dbName = parentWindowClass.dbName
self.parent.resizable(False, False)
self.windowSelfConfig()
self.createWidgets()
def windowSelfConfig(self):
self.parent.geometry('400x600+450+20')
self.parent.title("Activity Entry")
self.parent.protocol("WM_DELETE_WINDOW", self.onClose)
# Destroy parent window
def onClose(self):
if messagebox.askokcancel("Quit", "Do you want to quit both two windows?"):
self.parentClass.parent.destroy()
def createWidgets(self):
self.errorMessageDisplay = tk.Label(self.parent, text="No Error", font=("Arial", 10), fg="red", anchor="w")
self.dataInputForm().pack(side="top", pady=(20,10))
self.buttons().pack(side="top", pady=(0,20))
tk.Label(self.parent, text="All Transactions", font=("Arial", 14), anchor="w").pack(side="top")
self.allTransactions = TreeViewWithScrollBar(self.parent,[
{"colName":"ID","width":10,"anchor":"center"},
{"colName":"Date","width":100,"anchor":"center"},
{"colName":"Symbol","width":80,"anchor":"center"},
{"colName":"Transation","width":70,"anchor":"center"},
{"colName":"Quantity","width":70,"anchor":"center"},
{"colName":"Price","width":60,"anchor":"center"}],tableRows=19)
self.allTransactions.pack(side="top", pady=(10,0), fill="both")
self.errorMessageDisplay.pack(side="bottom", fill="x")
self.updateTransactions()
def dataInputForm(self):
dataInputFrame = LayoutFrame(self.parent)
self.dateInput = LabelInputCombo(dataInputFrame, labelName="Date", validateArray=["isDate", "isEmpty"], size=(5,10), packSide="top", margin=(1,1))
self.dateInput.setInputErrorHandler(self.errorMessageDisplay)
self.symbolInput = LabelInputCombo(dataInputFrame, labelName="Symbol", validateArray=["isEmpty"], size=(6,6), packSide="top", margin=(2,2))
self.symbolInput.setInputErrorHandler(self.errorMessageDisplay)
self.transationInput = LabelDDCombo(dataInputFrame, labelName="Transation", ddItems=["","buy","sell"], size=(10,5),entryState="readonly",packSide="top", margin=(2,2))
self.transationInput.setInputErrorHandler(self.errorMessageDisplay)
self.quantityInput = LabelInputCombo(dataInputFrame, labelName="Quantity", validateArray=["isNumber", "isEmpty"], size=(8,8), packSide="top", margin=(2,2))
self.quantityInput.setInputErrorHandler(self.errorMessageDisplay)
self.priceInput = LabelInputCombo(dataInputFrame, labelName="Price", validateArray=["isNumber", "isEmpty"], size=(5,6), packSide="top", margin=(2,2))
self.priceInput.setInputErrorHandler(self.errorMessageDisplay)
dataInputFrame.layout('h',
self.dateInput,
self.symbolInput,
self.transationInput,
self.quantityInput,
self.priceInput
)
return dataInputFrame
def buttons(self):
buttonsFrame = LayoutFrame(self.parent)
recordButton = tk.Button(buttonsFrame, text="Record", command=self.recordOnClick)
clearButton = tk.Button(buttonsFrame, text="Clear", command=self.clearOnClick)
searchButton = tk.Button(buttonsFrame, text="search", command=self.searchOnClick)
exportButton = tk.Button(buttonsFrame, text="Export", command=self.exportOnClick)
buttonsFrame.layout('h', recordButton, clearButton, searchButton, exportButton)
return buttonsFrame
def updateTransactions(self):
self.allTransactions.setValues(DataController(self.dbName).listTransactions())
def generateParametersDict(self):
queryDict = {}
if self.dateInput.getInputValue() != "" and self.dateInput.validator():
queryDict["transaction_date"] = self.dateInput.getInputValue()
if self.symbolInput.getInputValue() != "" and self.symbolInput.validator():
queryDict["symbol"] = self.symbolInput.getInputValue()
if self.transationInput.getDDValue() != "":
queryDict["transaction_direction"] = self.transationInput.getDDValue()
if self.quantityInput.getInputValue() != "" and self.quantityInput.validator():
queryDict["Quantity"] = self.quantityInput.getInputValue()
if self.priceInput.getInputValue() != "" and self.priceInput.validator():
queryDict["price"] = self.priceInput.getInputValue()
return queryDict
def recordOnClick(self):
inputDict = self.generateParametersDict()
# 5 means all items are inputted
if len(inputDict) == 5:
if DataController(self.dbName).addTransaction(inputDict["transaction_date"],inputDict["symbol"],inputDict["transaction_direction"],inputDict["Quantity"],inputDict["price"]):
self.updateTransactions()
self.parentClass.updateInfo()
self.clearOnClick()
self.errorMessageDisplay.config(text="Insert Successfully")
else:
self.errorMessageDisplay.config(text="Insert Fail.")
else:
self.errorMessageDisplay.config(text="Please complete all input items")
def clearOnClick(self):
self.dateInput.setValue("")
self.symbolInput.setValue("")
self.transationInput.setValue(0)
self.quantityInput.setValue("")
self.priceInput.setValue("")
self.errorMessageDisplay.config(text="All inputs are cleared")
def searchOnClick(self):
self.allTransactions.setValues(DataController(self.dbName).listTransactions(self.generateParametersDict()))
self.errorMessageDisplay.config(text=" %d records returned" % self.allTransactions.getRecordsCount())
def exportOnClick(self):
destFile = filedialog.asksaveasfile(filetypes = [('Text Document', '*.txt')], defaultextension = [('Text Document', '*.txt')])
if destFile is not None:
exportResult = DataController(self.dbName).listTransactions()
if exportResult:
destFile.write("User Activity")
for record in exportResult[0]:
destFile.write("\n%d, %s, %s, %s, %d, %.2f" % record)
destFile.close()
self.errorMessageDisplay.config(text="Export Successfully")
################################# Above are UI design, below are database access code ########################
##
# Controller: Manipulate the data and return to View
class DataController:
def __init__(self, dataFile):
self.db = dataFile
if not os.path.exists(dataFile):
# Create Data
if not self.initializeDatabase(withData = True):
raise Exception("Database Initialize Error")
# get all information in one connection
def getSummaryInfo(self):
isSuccess, dataResult = self.runSql([
'select distinct symbol from stocks',
'select * from stocks order by transaction_date asc limit 1',
'select * from stocks order by transaction_date desc limit 1',
'select * from stocks order by price asc limit 1',
'select * from stocks order by price desc limit 1',
'select count(id) as trade_times, symbol from stocks group by symbol order by trade_times desc limit 1'
])
if isSuccess:
return dataResult
return None
def listTransactions(self, paramDict={}):
queryParam = []
for item, value in paramDict.items():
if type(value) is str:
queryParam.append(item + "='" + value + "'")
else:
queryParam.append(item + "=" + str(value))
where = ""
if len(queryParam) > 0:
where = "where " + " and ".join(queryParam)
# TODO: put it in debug log
#print('select * from stocks ' + where + ' order by transaction_date asc')
isSuccess, dataResult = self.runSql([
'select * from stocks ' + where + ' order by transaction_date asc'
])
if isSuccess:
return dataResult
return None
def addTransaction(self, transDate,symbol,trans,quantity,price):
isSuccess, dataResult = self.runSql(
["insert into stocks (transaction_date,symbol,transaction_direction,Quantity,price) values (?,?,?,?,?)"],
[(transDate, symbol, trans, quantity, price)]
)
return isSuccess
# Run sql, support batch
# return 1: True/False for update/delete/insert
# return 2: fetch data for select
def runSql(self, sqlStatementArray, sqlStatementParamArray=[]):
conn = None
if len(sqlStatementParamArray) > 0:
if len(sqlStatementArray) != len(sqlStatementParamArray):
return False,[]
fetchResult = []
try:
conn = sqlite3.connect(self.db)
needCommit = False
for i in range(len(sqlStatementArray)):
if len(sqlStatementParamArray) > 0:
queryResults = conn.execute(sqlStatementArray[i], sqlStatementParamArray[i])
else:
queryResults = conn.execute(sqlStatementArray[i])
if sqlStatementArray[i].strip().lower().startswith("select"):
fetchResult.append(queryResults.fetchall())
else:
needCommit = True
if needCommit:
conn.commit()
except Error as e:
# TODO: Log the error
print(e)
return False, []
else:
return True, fetchResult
finally:
if conn:
conn.close()
# Create Table and initialize Data
# Transaction Data: yyyy-MM-dd
# Stock Symbol: MSFT
# Transaction: Buy/Sell
# Quantity: 100
# Transation Price: 12.34
def initializeDatabase(self, withData = False):
if self.runSql(['''CREATE TABLE stocks (
id INTEGER PRIMARY KEY AUTOINCREMENT,
transaction_date DATE,
symbol text,
transaction_direction text,
Quantity INTEGER,
price REAL
)'''])[0]:
return self.runSql(
["insert into stocks (transaction_date,symbol,transaction_direction,Quantity,price) values (?,?,?,?,?)" for x in range(10)],
[
('2020-01-01', 'AAPL', 'buy', 100, 12.3),
('2020-02-01', 'MSFT', 'buy', 80, 8.3),
('2020-03-01', 'AAPL', 'sell', 80, 10.3),
('2020-04-01', 'MSFT', 'sell', 80, 10.4),
('2020-05-01', 'AAPL', 'sell', 100, 9.3),
('2020-06-01', 'AAPL', 'buy', 100, 14.3),
('2020-07-01', 'MSFT', 'buy', 100, 16.3),
('2020-08-01', 'AAPL', 'buy', 100, 6.3),
('2020-09-01', 'MSFT', 'sell', 80, 10.3),
('2020-10-01', 'AAPL', 'sell', 80, 11.3)
]
)[0]
return False
if __name__ == "__main__":
activityDisplayWindow = tk.Tk()
displayWindowClass = ActivityDisplayWindow(activityDisplayWindow)
activityEntryWindow = tk.Toplevel(activityDisplayWindow)
ActivityEntryWindow(activityEntryWindow, displayWindowClass)
activityDisplayWindow.mainloop() | 2.390625 | 2 |
OneEncoder_MultiDecoders/models.py | Ali-Sahili/Background-Subtraction-Unsupervised-Learning | 5 | 12796459 |
import torch
import torch.nn as nn
from torch.autograd import Variable
from Param import nc, nz, device
class Model512(nn.Module):
def __init__(self,nz=nz,nef=8,ngf=8,nc=nc):
super(Model512, self).__init__()
self.nz=nz
self.nc=nc
## Encoder Part ##
self.encode = nn.Sequential(
# input is (nc) x 512 x 512
nn.Conv2d(nc, nef, 4, 2, 1, bias=False),
nn.BatchNorm2d(nef),
nn.LeakyReLU(0.2, inplace=True),
# state size is (nef) x 256 x 256
nn.Conv2d(nef, nef * 2, 4, 2, 1, bias=False),
nn.BatchNorm2d(nef * 2),
nn.LeakyReLU(0.2, inplace=True),
# state size. (nef*2) x 128 x 128
nn.Conv2d(nef*2, nef * 4, 4, 2, 1, bias=False),
nn.BatchNorm2d(nef * 4),
nn.LeakyReLU(0.2, inplace=True),
# state size. (nef*4) x 64 x 64
nn.Conv2d(nef * 4, nef * 8, 4, 2, 1, bias=False),
nn.BatchNorm2d(nef * 8),
nn.LeakyReLU(0.2, inplace=True),
# state size. (nef*8) x 32 x 32
nn.Conv2d(nef*8, nef * 16, 4, 2, 1, bias=False),
nn.BatchNorm2d(nef * 16),
nn.LeakyReLU(0.2, inplace=True),
# state size. (nef*16) x 16 x 16
nn.Conv2d(nef * 16, nef * 32, 4, 2, 1, bias=False),
nn.BatchNorm2d(nef * 32),
nn.LeakyReLU(0.2, inplace=True),
# state size. (nef*32) x 8 x 8
nn.Conv2d(nef * 32, nef * 64, 4, 2, 1, bias=False),
nn.BatchNorm2d(nef * 64),
nn.LeakyReLU(0.2, inplace=True),
# state size. (nef*64) x 4 x 4
nn.Conv2d(nef * 64, nef * 128, 4, 1, 0, bias=False),
nn.BatchNorm2d(nef * 128),
nn.LeakyReLU(0.2, inplace=True),
nn.Conv2d(nef * 128, nz, 1, 1, 0, bias=True),
nn.Sigmoid()
)
## #####
## Decoder Part ##
self.decode3 = nn.Sequential(
nn.ConvTranspose2d(nz, ngf *128 , 2, 1, 0, bias=False),
nn.BatchNorm2d(ngf * 128),
nn.ReLU(True),
# size ngf*128 x2 x2
nn.ConvTranspose2d(ngf * 128, ngf * 64, 4, 2, 1, bias=False),
nn.BatchNorm2d(ngf * 64),
nn.ReLU(True),
# size ngf*64 x4 x4
nn.ConvTranspose2d(ngf * 64, ngf * 32, 4, 2, 1, bias=False),
nn.BatchNorm2d(ngf * 32),
nn.ReLU(True),
# size ngf*32 x8 x8
nn.ConvTranspose2d(ngf*32, ngf * 16, 4, 2, 1, bias=False),
nn.BatchNorm2d(ngf * 16),
nn.ReLU(True),
# state size. (ngf*16) x 16 x16
nn.ConvTranspose2d(ngf * 16, ngf * 8, 4, 2, 1, bias=False),
nn.BatchNorm2d(ngf * 8),
nn.ReLU(True),
# state size. (ngf*8) x 32 x 32
nn.ConvTranspose2d(ngf * 8, ngf * 4, 4, 2, 1, bias=False),
nn.BatchNorm2d(ngf * 4),
nn.ReLU(True))
# state size. (ngf*4) x 64 x 64
self.conv_layer128 = nn.Sequential(
nn.ConvTranspose2d(ngf * 4, ngf * 2, 4, 2, 1, bias=False),
nn.BatchNorm2d(ngf * 2),
nn.ReLU(True),
nn.ConvTranspose2d(ngf * 2, ngf, 3, 1, 1, bias=False),
nn.ConvTranspose2d(ngf, nc, 3, 1, 1, bias=False))
self.decode2 = nn.Sequential(
nn.ConvTranspose2d(ngf * 4, ngf * 2, 4, 2, 1, bias=False),
nn.BatchNorm2d(ngf * 2),
nn.ReLU(True))
# state size. (ngf*2) x 128 x 128
self.conv_layer256 = nn.Sequential(
nn.ConvTranspose2d(ngf * 2, ngf, 4, 2, 1, bias=False),
nn.BatchNorm2d(ngf),
nn.ReLU(True),
nn.ConvTranspose2d(ngf, nc, 3, 1, 1, bias=False))
self.decode1 = nn.Sequential(
nn.ConvTranspose2d(ngf * 2, ngf, 4, 2, 1, bias=False),
nn.BatchNorm2d(ngf),
nn.ReLU(True),
# state size. (ngf) x 256 x 256
nn.ConvTranspose2d(ngf, nc, 4, 2, 1, bias=False),
nn.Tanh()
#nn.Sigmoid() # for VAE
# state size. (nc) x 512 x 512
)
self.output_layer = nn.Tanh() #nn.Sigmoid()
def forward(self, input):
x = self.encode(input)
x = self.decode3(x)
out128 = self.output_layer(self.conv_layer128(x))
x = self.decode2(x)
out256 = self.output_layer(self.conv_layer256(x))
out512 = self.decode1(x)
return out128, out256, out512
""" VAE with three losses at three scales of the decoder """
class VAE_Model512(nn.Module):
def __init__(self,nz=nz,ngf=8,nef=8,nc=3):
super(VAE_Model512, self).__init__()
self.nz=nz
self.nc=nc
## Encoder Part ##
self.encode = nn.Sequential(
# input is (nc) x 512 x 512
nn.Conv2d(nc, nef, 4, 2, 1, bias=False),
nn.BatchNorm2d(nef),
nn.LeakyReLU(0.2, inplace=True),
# state size is (nef) x 256 x 256
nn.Conv2d(nef, nef * 2, 4, 2, 1, bias=False),
nn.BatchNorm2d(nef * 2),
nn.LeakyReLU(0.2, inplace=True),
# state size. (nef*2) x 128 x 128
nn.Conv2d(nef*2, nef * 4, 4, 2, 1, bias=False),
nn.BatchNorm2d(nef * 4),
nn.LeakyReLU(0.2, inplace=True),
# state size. (nef*4) x 64 x 64
nn.Conv2d(nef * 4, nef * 8, 4, 2, 1, bias=False),
nn.BatchNorm2d(nef * 8),
nn.LeakyReLU(0.2, inplace=True),
# state size. (nef*8) x 32 x 32
nn.Conv2d(nef*8, nef * 16, 4, 2, 1, bias=False),
nn.BatchNorm2d(nef * 16),
nn.LeakyReLU(0.2, inplace=True),
# state size. (nef*16) x 16 x 16
nn.Conv2d(nef * 16, nef * 32, 4, 2, 1, bias=False),
nn.BatchNorm2d(nef * 32),
nn.LeakyReLU(0.2, inplace=True),
# state size. (nef*32) x 8 x 8
nn.Conv2d(nef * 32, nef * 64, 4, 2, 1, bias=False),
nn.BatchNorm2d(nef * 64),
nn.LeakyReLU(0.2, inplace=True),
# state size. (nef*64) x 4 x 4
nn.Conv2d(nef * 64, nef * 128, 4, 1, 0, bias=False),
nn.BatchNorm2d(nef * 128),
nn.LeakyReLU(0.2, inplace=True),
nn.Conv2d(nef * 128, nz, 1, 1, 0, bias=True),
nn.Sigmoid()
)
## #####
## Decoder Part ##
self.decode3 = nn.Sequential(
nn.ConvTranspose2d(nz, ngf *128 , 2, 1, 0, bias=False),
nn.BatchNorm2d(ngf * 128),
nn.ReLU(True),
# size ngf*128 x2 x2
nn.ConvTranspose2d(ngf * 128, ngf * 64, 4, 2, 1, bias=False),
nn.BatchNorm2d(ngf * 64),
nn.ReLU(True),
# size ngf*64 x4 x4
nn.ConvTranspose2d(ngf * 64, ngf * 32, 4, 2, 1, bias=False),
nn.BatchNorm2d(ngf * 32),
nn.ReLU(True),
# size ngf*32 x8 x8
nn.ConvTranspose2d(ngf*32, ngf * 16, 4, 2, 1, bias=False),
nn.BatchNorm2d(ngf * 16),
nn.ReLU(True),
# state size. (ngf*16) x 16 x16
nn.ConvTranspose2d(ngf * 16, ngf * 8, 4, 2, 1, bias=False),
nn.BatchNorm2d(ngf * 8),
nn.ReLU(True),
# state size. (ngf*8) x 32 x 32
nn.ConvTranspose2d(ngf * 8, ngf * 4, 4, 2, 1, bias=False),
nn.BatchNorm2d(ngf * 4),
nn.ReLU(True))
# state size. (ngf*4) x 64 x 64
self.conv_layer128 = nn.Sequential(
nn.ConvTranspose2d(ngf * 4, ngf * 2, 4, 2, 1, bias=False),
nn.BatchNorm2d(ngf * 2),
nn.ReLU(True),
nn.ConvTranspose2d(ngf * 2, ngf, 3, 1, 1, bias=False),
nn.ConvTranspose2d(ngf, nc, 3, 1, 1, bias=False))
self.decode2 = nn.Sequential(
nn.ConvTranspose2d(ngf * 4, ngf * 2, 4, 2, 1, bias=False),
nn.BatchNorm2d(ngf * 2),
nn.ReLU(True))
# state size. (ngf*2) x 128 x 128
self.conv_layer256 = nn.Sequential(
nn.ConvTranspose2d(ngf * 2, ngf, 4, 2, 1, bias=False),
nn.BatchNorm2d(ngf),
nn.ReLU(True),
nn.ConvTranspose2d(ngf, nc, 3, 1, 1, bias=False))
self.decode1 = nn.Sequential(
nn.ConvTranspose2d(ngf * 2, ngf, 4, 2, 1, bias=False),
nn.BatchNorm2d(ngf),
nn.ReLU(True),
# state size. (ngf) x 256 x 256
nn.ConvTranspose2d(ngf, nc, 4, 2, 1, bias=False),
nn.Tanh()
#nn.Sigmoid() # for VAE
# state size. (nc) x 512 x 512
)
self.output_layer = nn.Tanh() #nn.Sigmoid()
self.fc1 = nn.Linear(nz, 64)
self.fc2 = nn.Linear(nz, 64)
self.fc3 = nn.Linear(64, nz)
def reparametrize(self, mu, logvar):
std = logvar.mul(0.5).exp_()
eps = torch.FloatTensor(std.size()).normal_().to(device)
eps = Variable(eps)
return eps.mul(std).add_(mu)
def forward(self, input):
b_size = input.shape[0]
x = self.encode(input).view(b_size, nz)
mu = self.fc1(x) #fc1
logvar = self.fc2(x) #fc2
z = self.reparametrize(mu, logvar)
z = self.fc3(z).reshape(-1, self.nz, 1, 1) #fc3
#del x
x = self.decode3(z)
out128 = self.output_layer(self.conv_layer128(x))
x = self.decode2(x)
out256 = self.output_layer(self.conv_layer256(x))
out512 = self.decode1(x)
return out128, out256, out512, mu, logvar
| 1.632813 | 2 |
deprecated/drivers/sub8_videoray_m5_thruster/nodes/thruster_driver.py | ericgorday/SubjuGator | 27 | 12796587 | #!/usr/bin/env python
import numpy as np
import copy
import rospy
import rospkg
import rosparam
import threading
import argparse
from geometry_msgs.msg import Vector3
from std_msgs.msg import Header, Float64
from sub8_msgs.msg import Thrust, ThrusterStatus
from mil_ros_tools import wait_for_param, thread_lock, numpy_to_point
from sub8_msgs.srv import ThrusterInfo, ThrusterInfoResponse, FailThruster, UnfailThruster
from sub8_thruster_comm import thruster_comm_factory
from ros_alarms import AlarmBroadcaster, AlarmListener
lock = threading.Lock()
class BusVoltageMonitor(object):
'''
Class that estimates sub8's thruster bus voltage.
As of May 2017, this is just a simple rolling average with a constant width sliding
window. However add_reading and get_estimate methods are left for when smarter
filtering is needed
'''
VMAX = 50 # volts
VMIN = 0 # volts
class VoltageReading(object):
def __init__(self, voltage, time):
self.v = voltage
self.t = time
def __init__(self, window_duration):
'''
window_duration - float (amount of seconds for which to keep a reading in the buffer)
'''
self.bus_voltage_alarm = AlarmBroadcaster("bus-voltage")
self.bus_voltage_pub = rospy.Publisher('bus_voltage', Float64, queue_size=1)
self.warn_voltage = rospy.get_param("/battery/warn_voltage", 44.5)
self.kill_voltage = rospy.get_param("/battery/kill_voltage", 44.0)
self.last_estimate_time = rospy.Time.now()
self.WINDOW_DURATION = rospy.Duration(window_duration)
self.ESTIMATION_PERIOD = rospy.Duration(0.2)
self.cached_severity = 0
self.buffer = []
def add_reading(self, voltage, time):
''' Adds voltage readings to buffer '''
voltage = float(voltage)
# Only add if it makes sense (the M5's will give nonsense feedback at times)
if voltage >= self.VMIN and voltage <= self.VMAX:
self.buffer.append(self.VoltageReading(voltage, time))
self.prune_buffer()
# check bus voltage if enough time has passed
if rospy.Time.now() - self.last_estimate_time > self.ESTIMATION_PERIOD:
self.check_bus_voltage()
def prune_buffer(self):
''' Removes readings older than the window_duration from buffer '''
for reading in self.buffer:
age = rospy.Time.now() - reading.t
if age > self.WINDOW_DURATION:
self.buffer.remove(reading)
def get_voltage_estimate(self):
''' Returns average voltage in buffer '''
voltages = []
if len(self.buffer) == 0:
return None
for r in self.buffer:
voltages.append(r.v)
return np.mean(voltages)
def check_bus_voltage(self):
''' Publishes bus_voltage estimate and raises alarm if necessary '''
bus_voltage = self.get_voltage_estimate()
if bus_voltage is None:
return
self.bus_voltage_pub.publish(Float64(bus_voltage))
severity = None
if bus_voltage < self.warn_voltage:
severity = 3
if bus_voltage < self.kill_voltage:
severity = 5
if severity is not None and self.cached_severity != severity:
self.bus_voltage_alarm.raise_alarm(
problem_description='Bus voltage has fallen to {}'.format(bus_voltage),
parameters={'bus_voltage': bus_voltage},
severity=severity
)
self.cached_severity = severity
class ThrusterDriver(object):
_dropped_timeout = 1.0 # s
_window_duration = 30.0 # s
_NODE_NAME = rospy.get_name()
def __init__(self, ports_layout, thruster_definitions):
'''Thruster driver, an object for commanding all of the sub's thrusters
- Gather configuration data and make it available to other nodes
- Instantiate ThrusterPorts, (Either simulated or real), for communicating with thrusters
- Track a thrust_dict, which maps thruster names to the appropriate port
- Given a command message, route that command to the appropriate port/thruster
- Send a thruster status message describing the status of the particular thruster
'''
self.failed_thrusters = set() # This is only determined by comms
self.deactivated_thrusters = set() # These will not come back online even if comms are good (user managed)
# Alarms
self.thruster_out_alarm = AlarmBroadcaster("thruster-out")
AlarmListener("thruster-out", self.check_alarm_status, call_when_raised=False) # Prevent outside interference
# Create ThrusterPort objects in a dict indexed by port name
self.load_thruster_ports(ports_layout, thruster_definitions)
# Feedback on thrusters (thruster mapper blocks until it can use this service)
self.thruster_info_service = rospy.Service('thrusters/thruster_info', ThrusterInfo, self.get_thruster_info)
self.status_publishers = {name: rospy.Publisher('thrusters/status/' + name, ThrusterStatus, queue_size=10)
for name in self.thruster_to_port_map.keys()}
# These alarms require this service to be available before things will work
rospy.wait_for_service("update_thruster_layout")
self.update_thruster_out_alarm()
# Bus voltage
self.bus_voltage_monitor = BusVoltageMonitor(self._window_duration)
# Command thrusters
self.thrust_sub = rospy.Subscriber('thrusters/thrust', Thrust, self.thrust_cb, queue_size=1)
# To programmatically deactivate thrusters
self.fail_thruster_server = rospy.Service('fail_thruster', FailThruster, self.fail_thruster)
self.unfail_thruster_server = rospy.Service('unfail_thruster', UnfailThruster, self.unfail_thruster)
@thread_lock(lock)
def load_thruster_ports(self, ports_layout, thruster_definitions):
''' Loads a dictionary ThrusterPort objects '''
self.ports = {} # ThrusterPort objects
self.thruster_to_port_map = {} # node_id to ThrusterPort
rospack = rospkg.RosPack()
self.make_fake = rospy.get_param('simulate', False)
if self.make_fake:
rospy.logwarn("Running fake thrusters for simulation, based on parameter '/simulate'")
# Instantiate thruster comms port
for port_info in ports_layout:
port_name = port_info['port']
self.ports[port_name] = thruster_comm_factory(port_info, thruster_definitions, fake=self.make_fake)
# Add the thrusters to the thruster dict and configure if present
for thruster_name in port_info['thruster_names']:
self.thruster_to_port_map[thruster_name] = port_info['port']
if thruster_name not in self.ports[port_name].online_thruster_names:
rospy.logerr("ThrusterDriver: {} IS MISSING!".format(thruster_name))
else:
rospy.loginfo("ThrusterDriver: {} registered".format(thruster_name))
# Set firmware settings
port = self.ports[port_name]
node_id = thruster_definitions[thruster_name]['node_id']
config_path = (rospack.get_path('sub8_videoray_m5_thruster') + '/config/firmware_settings/' +
thruster_name + '.yaml')
rospy.loginfo('Configuring {} with settings specified in {}.'.format(thruster_name,
config_path))
port.set_registers_from_dict(node_id=node_id,
reg_dict=rosparam.load_file(config_path)[0][0])
port.reboot_thruster(node_id) # Necessary for some settings to take effect
def get_thruster_info(self, srv):
''' Get the thruster info for a particular thruster name '''
query_name = srv.thruster_name
info = self.ports[self.thruster_to_port_map[query_name]].thruster_info[query_name]
thruster_info = ThrusterInfoResponse(
node_id=info.node_id,
min_force=info.thrust_bounds[0],
max_force=info.thrust_bounds[1],
position=numpy_to_point(info.position),
direction=Vector3(*info.direction)
)
return thruster_info
def check_alarm_status(self, alarm):
# If someone else cleared this alarm, we need to make sure to raise it again
if not alarm.raised and alarm.node_name != self._NODE_NAME:
self.update_thruster_out_alarm()
def update_thruster_out_alarm(self):
'''
Raises or clears the thruster out alarm
Updates the 'offline_thruster_names' parameter accordingly
Sets the severity to the number of failed thrusters (clipped at 5)
'''
offline_names = list(self.failed_thrusters)
if len(self.failed_thrusters) > 0:
self.thruster_out_alarm.raise_alarm(
node_name=self._NODE_NAME,
parameters={'offline_thruster_names': offline_names},
severity=int(np.clip(len(self.failed_thrusters), 1, 5)))
else:
self.thruster_out_alarm.clear_alarm(
node_name=self._NODE_NAME,
parameters={'offline_thruster_names': offline_names})
@thread_lock(lock)
def command_thruster(self, name, thrust):
'''
Issue a a force command (in Newtons) to a named thruster
Example names are BLR, FLH, etc.
Raises RuntimeError if a thrust value outside of the configured thrust bounds is commanded
Raises UnavailableThrusterException if a thruster that is offline is commanded a non-zero thrust
'''
port_name = self.thruster_to_port_map[name]
target_port = self.ports[port_name]
thruster_model = target_port.thruster_info[name]
if thrust < thruster_model.thrust_bounds[0] or thrust > thruster_model.thrust_bounds[1]:
rospy.logwarn('Tried to command thrust ({}) outside of physical thrust bounds ({})'.format(
thrust, thruster_model.thrust_bounds))
if name in self.failed_thrusters:
if not np.isclose(thrust, 0):
rospy.logwarn('ThrusterDriver: commanding non-zero thrust to offline thruster (' + name + ')')
effort = target_port.thruster_info[name].get_effort_from_thrust(thrust)
# We immediately get thruster_status back
thruster_status = target_port.command_thruster(name, effort)
# Keep track of thrusters going online or offline
offline_on_port = target_port.get_offline_thruster_names()
for offline in offline_on_port:
if offline not in self.failed_thrusters:
self.failed_thrusters.add(offline) # Thruster went offline
for failed in copy.deepcopy(self.failed_thrusters):
if (failed in target_port.get_declared_thruster_names() and
failed not in offline_on_port and
failed not in self.deactivated_thrusters):
self.failed_thrusters.remove(failed) # Thruster came online
# Don't try to do anything if the thruster status is bad
if thruster_status is None:
return
message_contents = [
'rpm',
'bus_v',
'bus_i',
'temp',
'fault',
'command_tx_count',
'status_rx_count',
'command_latency_avg'
]
message_keyword_args = {key: thruster_status[key] for key in message_contents}
power = thruster_status['bus_v'] * thruster_status['bus_i']
self.status_publishers[name].publish(
ThrusterStatus(
header=Header(stamp=rospy.Time.now()),
name=name,
node_id=thruster_model.node_id,
power=power,
effort=effort,
thrust=thrust,
**message_keyword_args
)
)
# Will publish bus_voltage and raise alarm if necessary
self.bus_voltage_monitor.add_reading(message_keyword_args['bus_v'], rospy.Time.now())
# Undervolt/overvolt faults are unreliable (might not still be true - David)
if message_keyword_args['fault'] > 2:
fault_codes = {
(1 << 0): 'UNDERVOLT',
(1 << 1): 'OVERRVOLT',
(1 << 2): 'OVERCURRENT',
(1 << 3): 'OVERTEMP',
(1 << 4): 'STALL',
(1 << 5): 'STALL_WARN',
}
fault = int(message_keyword_args['fault'])
faults = []
for code, fault_name in fault_codes.items():
if code & fault != 0:
faults.append(fault_name)
rospy.logwarn("Thruster: {} has entered fault with status {}".format(name, message_keyword_args))
rospy.logwarn("Fault causes are: {}".format(faults))
return
def thrust_cb(self, msg):
'''
Callback for receiving thrust commands
These messages contain a list of instructions, one for each thruster
If there are any updates to the list of failed thrusters, it will raise and alarm
'''
failed_before = {x for x in self.failed_thrusters}
for thrust_cmd in list(msg.thruster_commands):
self.command_thruster(thrust_cmd.name, thrust_cmd.thrust)
# Raise or clear 'thruster-out' alarm
if not self.failed_thrusters == failed_before:
rospy.logdebug('Failed thrusters:', self.failed_thrusters)
self.update_thruster_out_alarm()
def stop(self):
''' Commands 0 thrust to all thrusters '''
for port in self.ports.values():
for thruster_name in port.online_thruster_names.copy():
self.command_thruster(thruster_name, 0.0)
def fail_thruster(self, srv):
''' Makes a thruster unavailable for thrust allocation '''
# So that thrust is not allocated to the thruster
self.failed_thrusters.add(srv.thruster_name)
# So that it won't come back online even if comms are good
self.deactivated_thrusters.add(srv.thruster_name)
# So that thruster_mapper updates the B-matrix
self.update_thruster_out_alarm()
return {}
def unfail_thruster(self, srv):
''' Undoes effect of self.fail_thruster '''
self.failed_thrusters.remove(srv.thruster_name)
self.deactivated_thrusters.remove(srv.thruster_name)
self.update_thruster_out_alarm()
return {}
if __name__ == '__main__':
PKG = 'sub8_videoray_m5_thruster'
usage_msg = "Interface to Sub8's VideoRay M5 thrusters"
desc_msg = "Specify a path to the configuration.json file containing the thrust calibration data"
parser = argparse.ArgumentParser(usage=usage_msg, description=desc_msg)
args = parser.parse_args(rospy.myargv()[1:])
rospy.init_node('videoray_m5_thruster_driver')
layout_parameter = '/thruster_layout'
rospy.loginfo("Thruster Driver waiting for parameter, {}".format(layout_parameter))
thruster_layout = wait_for_param(layout_parameter)
if thruster_layout is None:
raise IOError('/thruster_layout rosparam needs to be set before launching the thruster driver')
thruster_driver = ThrusterDriver(thruster_layout['thruster_ports'], thruster_layout['thrusters'])
rospy.spin()
| 1.851563 | 2 |
pipescaler/processors/threshold_processor.py | KarlTDebiec/PipeScaler | 1 | 12796715 | <reponame>KarlTDebiec/PipeScaler<gh_stars>1-10
#!/usr/bin/env python
# pipescaler/processors/threshold_processor.py
#
# Copyright (C) 2020-2021 <NAME>
# All rights reserved.
#
# This software may be modified and distributed under the terms of the
# BSD license.
from __future__ import annotations
from argparse import ArgumentParser
from inspect import cleandoc
from typing import Any, no_type_check
import numba as nb
import numpy as np
from PIL import Image
from pipescaler.core import Processor, validate_image
class ThresholdProcessor(Processor):
"""Converts image to black and white using threshold, optionally denoising."""
def __init__(
self, threshold: int = 128, denoise: bool = False, **kwargs: Any
) -> None:
super().__init__(**kwargs)
# Store configuration
self.threshold = threshold
self.denoise = denoise
def process_file(self, infile: str, outfile: str) -> None:
# Read image
input_image = validate_image(infile, "L")
# Process image
output_image = input_image.point(lambda p: p > self.threshold and 255)
if self.denoise:
output_data = np.array(output_image)
self.denoise_data(output_data)
output_image = Image.fromarray(output_data)
output_image = output_image.convert("L")
# Write image
output_image.save(outfile)
@classmethod
def construct_argparser(cls, **kwargs: Any) -> ArgumentParser:
"""
Constructs argument parser.
Args:
kwargs (Any): Additional keyword arguments
Returns:
parser (ArgumentParser): Argument parser
"""
description = kwargs.pop("description", cleandoc(cls.__doc__))
parser = super().construct_argparser(description=description, **kwargs)
parser.add_argument(
"--threshold",
default=128,
type=int,
help="threshold differentiating black and white (0-255, default: "
"%(default)s)",
)
parser.add_argument(
"--denoise",
default=False,
type=bool,
help="Flip color of pixels bordered by less than 5 pixels of "
"the same color",
)
return parser
@no_type_check
@staticmethod
@nb.jit(nopython=True, nogil=True, cache=True, fastmath=True)
def denoise_data(data: np.ndarray) -> None:
for x in range(1, data.shape[1] - 1):
for y in range(1, data.shape[0] - 1):
slc = data[y - 1 : y + 2, x - 1 : x + 2]
if data[y, x] == 0:
if (slc == 0).sum() < 4:
data[y, x] = 255
else:
if (slc == 255).sum() < 4:
data[y, x] = 0
if __name__ == "__main__":
ThresholdProcessor.main()
| 2.09375 | 2 |
filtering_posts/models.py | Unkorunk/filtering-posts | 0 | 12796843 | from django.db import models
import datetime
class Region(models.Model):
name = models.CharField(max_length=200)
class University(models.Model):
address = models.CharField(max_length=255)
affilation_name = models.CharField(max_length=255)
author_count = models.IntegerField(default=0)
city = models.CharField(max_length=200)
country = models.CharField(max_length=200)
date_created = models.DateField()
document_count = models.IntegerField(default=0)
eid = models.CharField(max_length=200)
identifier = models.CharField(max_length=200)
org_domain = models.CharField(max_length=200)
org_type = models.CharField(max_length=200)
org_url = models.CharField(max_length=200)
postal_code = models.CharField(max_length=200)
scopus_affiliation_link = models.CharField(max_length=200)
search_link = models.CharField(max_length=200)
self_link = models.CharField(max_length=200)
state = models.ForeignKey(Region, on_delete=models.CASCADE)
url = models.CharField(max_length=200)
lat = models.FloatField(default=0.0)
lon = models.FloatField(default=0.0)
class Author(models.Model):
affilation_current = models.ForeignKey(University, on_delete=models.CASCADE)
citation_count = models.IntegerField(default=0)
cited_by_count = models.IntegerField(default=0)
coauthor_count = models.IntegerField(default=0)
coauthor_link = models.CharField(max_length=255)
date_created = models.DateField()
document_count = models.IntegerField(default=0)
eid = models.CharField(max_length=200)
given_name = models.CharField(max_length=200)
h_index = models.CharField(max_length=100)
identifier = models.CharField(max_length=100)
indexed_name = models.CharField(max_length=100)
initials = models.CharField(max_length=100)
orc_id = models.CharField(max_length=100)
publication_range = models.CharField(max_length=100)
scopus_author_link = models.CharField(max_length=255)
search_link = models.CharField(max_length=255)
self_link = models.CharField(max_length=255)
status = models.CharField(max_length=100)
surname = models.CharField(max_length=100)
url = models.CharField(max_length=255)
school_name = models.CharField(max_length=255, default='')
russian_fullname = models.CharField(max_length=255, default='')
job_category = models.CharField(max_length=255, default='')
job_position = models.CharField(max_length=255, default='')
job_unit = models.CharField(max_length=255, default='')
job_parent_unit = models.CharField(max_length=255, default='')
job_rate = models.CharField(max_length=255, default='0.0')
type_employment = models.CharField(max_length=255, default='')
date_birth = models.DateField(default=datetime.date(1900, 1, 1))
last_degree = models.CharField(max_length=255, default='')
phd = models.BooleanField(default=False)
last_academic_title = models.CharField(max_length=255, default='')
relevant = models.BooleanField(default=False)
class Journal(models.Model):
sourcetitle = models.CharField(max_length=255)
abbreviation = models.CharField(max_length=200)
type_journal = models.CharField(max_length=100)
issn = models.CharField(max_length=100)
source_id = models.IntegerField(null=True)
cnt_publications = models.IntegerField(default=0)
class Document(models.Model):
class Meta:
db_table = 'api_document'
eid = models.CharField(max_length=200)
doi = models.CharField(max_length=200)
pii = models.CharField(max_length=200, default="-1")
pubmed_id = models.CharField(max_length=200)
title = models.CharField(max_length=255)
subtype = models.CharField(max_length=200)
# subtype_description = models.CharField(max_length=200)
creator = models.ForeignKey(Author, on_delete=models.CASCADE)
author_count = models.IntegerField(default=0)
cover_date = models.DateField()
cover_display_date = models.CharField(max_length=200)
publication_name = models.CharField(max_length=255)
issn = models.ForeignKey(Journal, on_delete=models.CASCADE)
source_id = models.CharField(max_length=200)
eIssn = models.CharField(max_length=200)
aggregation_type = models.CharField(max_length=200)
volume = models.CharField(max_length=100, default="0")
issue_identifier = models.CharField(max_length=200)
article_number = models.CharField(max_length=200)
page_range = models.CharField(max_length=200, default="-1")
description = models.TextField()
authkeywords = models.TextField()
citedby_count = models.IntegerField(default=0)
openaccess = models.IntegerField(default=0)
fund_acr = models.CharField(max_length=200)
fund_no = models.CharField(max_length=200)
fund_sponsor = models.CharField(max_length=200)
citation_by_year = models.TextField(default="")
citation_by_year_with_self = models.TextField(default="")
class Subject(models.Model):
name = models.CharField(max_length=200)
full_name = models.CharField(max_length=255)
class DocumentSubject(models.Model):
id_doc = models.ForeignKey(Document, on_delete=models.CASCADE, default=0)
id_sub = models.ForeignKey(Subject, on_delete=models.CASCADE, default=0)
class AuthorJournal(models.Model):
id_auth = models.ForeignKey(Author, on_delete=models.CASCADE, default=0)
id_journal = models.ForeignKey(Journal, on_delete=models.CASCADE, default=0)
class AuthorUniversity(models.Model):
id_auth = models.ForeignKey(Author, on_delete=models.CASCADE, default=0)
id_university = models.ForeignKey(University, on_delete=models.CASCADE, default=0)
class DocumentAuthorUniversity(models.Model):
id_doc = models.ForeignKey(Document, on_delete=models.CASCADE, default=0, null=True)
id_auth = models.ForeignKey(Author, on_delete=models.CASCADE, default=0, null=True)
id_university = models.ForeignKey(University, on_delete=models.CASCADE, default=0, null=True)
class AuthorSubject(models.Model):
id_author = models.ForeignKey(Author, on_delete=models.CASCADE)
id_sub = models.ForeignKey(Subject, on_delete=models.CASCADE)
class DocumentUniversityAffiliations(models.Model):
id_doc = models.ForeignKey(Document, on_delete=models.CASCADE, default=0, null=True)
id_university = models.ForeignKey(University, on_delete=models.CASCADE, default=0, null=True)
class Rankings(models.Model):
name = models.CharField(max_length=255)
class UniversityRankPlace(models.Model):
id_university = models.ForeignKey(University, on_delete=models.CASCADE, default=0)
id_ranking = models.ForeignKey(Rankings, on_delete=models.CASCADE, default=0)
year = models.IntegerField(default=0)
place = models.CharField(max_length=255, default="")
class UniversityRankCriteria(models.Model):
id_university = models.ForeignKey(University, on_delete=models.CASCADE, default=0)
id_ranking = models.ForeignKey(Rankings, on_delete=models.CASCADE, default=0)
criteria = models.CharField(max_length=255, default="")
score = models.FloatField(default=0.0)
class DateCitationCount(models.Model):
date = models.DateField(auto_now=True)
citation_count = models.IntegerField(default=0)
self_citation_count = models.IntegerField(default=0)
| 1.1875 | 1 |
bliss/urls.py | jugovich/teresajugovich | 0 | 12796971 | <reponame>jugovich/teresajugovich
from django.conf.urls import include, url
from dolove import views
# Uncomment the next two lines to enable the admin:
from django.contrib import admin
from django.conf.urls.static import static
from django.conf import settings
admin.autodiscover()
urlpatterns = [
url(r'^$', views.home, name='home'),
url(r'about$', views.about, name='about'),
url(r'doula_description$', views.doula_description, name='doula_description'),
url(r'doula_services$', views.doula_services, name='doula_services'),
url(r'photo_gallery$', views.photo_gallery, name='photo_gallery'),
url(r'photo_price$', views.photo_price, name='photo_price'),
url(r'yoga_class$', views.yoga_class, name='yoga_class'),
url(r'yoga_locations$', views.yoga_locations, name='yoga_locations'),
url(r'yoga_schedule$', views.yoga_schedule, name='yoga_schedule'),
url(r'yoga_price$', views.yoga_price, name='yoga_price'),
url(r'^admin/doc/', include('django.contrib.admindocs.urls')),
url(r'^admin/', include(admin.site.urls))
] + static(settings.STATIC_URL, document_root=settings.STATIC_ROOT)
| 0.765625 | 1 |
utils_nlp/models/gensen/utils.py | gohanlon/nlp | 4,407 | 12797099 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License.
"""Minibatching utilities."""
import itertools
import operator
import os
import pickle
import numpy as np
import torch
from sklearn.utils import shuffle
from torch.autograd import Variable
# Change to python3+.
# from itertools import zip
class DataIterator(object):
"""Data Iterator."""
@staticmethod
def _trim_vocab(vocab, vocab_size):
"""Discard start, end, pad and unk tokens if already present.
Args:
vocab(list): Vocabulary.
vocab_size(int): The size of the vocabulary.
Returns:
word2id(list): Word to index list.
id2word(list): Index to word list.
"""
if "<s>" in vocab:
del vocab["<s>"]
if "<pad>" in vocab:
del vocab["<pad>"]
if "</s>" in vocab:
del vocab["</s>"]
if "<unk>" in vocab:
del vocab["<unk>"]
word2id = {"<s>": 0, "<pad>": 1, "</s>": 2, "<unk>": 3}
id2word = {0: "<s>", 1: "<pad>", 2: "</s>", 3: "<unk>"}
sorted_word2id = sorted(
vocab.items(), key=operator.itemgetter(1), reverse=True
)
if vocab_size != -1:
sorted_words = [x[0] for x in sorted_word2id[:vocab_size]]
else:
sorted_words = [x[0] for x in sorted_word2id]
for ind, word in enumerate(sorted_words):
word2id[word] = ind + 4
for ind, word in enumerate(sorted_words):
id2word[ind + 4] = word
return word2id, id2word
def construct_vocab(
self, sentences, vocab_size, lowercase=False, charlevel=False
):
"""Create vocabulary.
Args:
sentences(list): The list of sentences.
vocab_size(int): The size of vocabulary.
lowercase(bool): If lowercase the sentences.
charlevel(bool): If need to split the sentence with space.
Returns:
word2id(list): Word to index list.
id2word(list): Index to word list.
"""
vocab = {}
for sentence in sentences:
if isinstance(sentence, str):
if lowercase:
sentence = sentence.lower()
if not charlevel:
sentence = sentence.split()
for word in sentence:
if word not in vocab:
vocab[word] = 1
else:
vocab[word] += 1
word2id, id2word = self._trim_vocab(vocab, vocab_size)
return word2id, id2word
class BufferedDataIterator(DataIterator):
"""Multi Parallel corpus data iterator."""
def __init__(
self,
src,
trg,
src_vocab_size,
trg_vocab_size,
tasknames,
save_dir,
buffer_size=1e6,
lowercase=False,
seed=0,
):
"""Initialize params.
Args:
src(list): source dataset.
trg(list): target dataset.
src_vocab_size(int): The size of source vocab.
trg_vocab_size(int): The size of target vocab.
tasknames(list): The list of task names.
save_dir(str): The saving dir.
buffer_size(float): Buffer size.
lowercase(bool): if lowercase the data.
"""
self.seed = seed
self.fname_src = src
self.fname_trg = trg
self.src_vocab_size = src_vocab_size
self.trg_vocab_size = trg_vocab_size
self.tasknames = tasknames
self.save_dir = save_dir
self.buffer_size = buffer_size
self.lowercase = lowercase
# Open a list of file pointers to all the files.
self.f_src = [
open(fname, "r", encoding="utf-8") for fname in self.fname_src
]
self.f_trg = [
open(fname, "r", encoding="utf-8") for fname in self.fname_trg
]
# Initialize dictionaries that contain sentences & word mapping dicts
self.src = [
{"data": [], "word2id": None, "id2word": None}
for i in range(len(self.fname_src))
]
self.trg = [
{"data": [], "word2id": None, "id2word": None}
for i in range(len(self.fname_trg))
]
self.build_vocab()
"""Reset file pointers to the start after reading the file to
build vocabularies."""
for idx in range(len(self.src)):
self._reset_filepointer(idx)
for idx in range(len(self.src)):
self.fetch_buffer(idx)
def _reset_filepointer(self, idx):
"""Reset file pointer.
Args:
idx(int): Index used to reset file pointer.
"""
self.f_src[idx] = open(self.fname_src[idx], "r", encoding="utf-8")
self.f_trg[idx] = open(self.fname_trg[idx], "r", encoding="utf-8")
def fetch_buffer(self, idx, reset=True):
"""Fetch sentences from the file into the buffer.
Args:
idx(int): Index used to fetch the sentences.
reset(bool): If need to reset the contents of the current buffer.
"""
# Reset the contents of the current buffer.
if reset:
self.src[idx]["data"] = []
self.trg[idx]["data"] = []
# Populate buffer
for src, trg in zip(self.f_src[idx], self.f_trg[idx]):
if len(self.src[idx]["data"]) == self.buffer_size:
break
if self.lowercase:
self.src[idx]["data"].append(src.lower().split())
self.trg[idx]["data"].append(trg.lower().split())
else:
self.src[idx]["data"].append(src.split())
self.trg[idx]["data"].append(trg.split())
# Sort sentences by decreasing length (hacky bucketing)
self.src[idx]["data"], self.trg[idx]["data"] = zip(
*sorted(
zip(self.src[idx]["data"], self.trg[idx]["data"]),
key=lambda x: len(x[0]),
reverse=True,
)
)
"""If buffer isn't full after reading the contents of the file,
cycle around. """
if len(self.src[idx]["data"]) < self.buffer_size:
assert len(self.src[idx]["data"]) == len(self.trg[idx]["data"])
# Cast things to list to avoid issue with calling .append above
self.src[idx]["data"] = list(self.src[idx]["data"])
self.trg[idx]["data"] = list(self.trg[idx]["data"])
self._reset_filepointer(idx)
self.fetch_buffer(idx, reset=False)
def build_vocab(self):
"""Build a memory efficient vocab."""
# Construct common source vocab.
# Check if save directory exists.
if not os.path.exists(self.save_dir):
raise ValueError("Could not find save dir : %s" % self.save_dir)
# Check if a cached vocab file exists.
if os.path.exists(os.path.join(self.save_dir, "src_vocab.pkl")):
vocab = pickle.load(
open(os.path.join(self.save_dir, "src_vocab.pkl"), "rb")
)
word2id, id2word = vocab["word2id"], vocab["id2word"]
# If not, compute the vocab from scratch and store a cache.
else:
word2id, id2word = self.construct_vocab(
itertools.chain.from_iterable(self.f_src),
self.src_vocab_size,
self.lowercase,
)
pickle.dump(
{"word2id": word2id, "id2word": id2word},
open(os.path.join(self.save_dir, "src_vocab.pkl"), "wb"),
)
for corpus in self.src:
corpus["word2id"], corpus["id2word"] = word2id, id2word
# Do the same for the target vocabulary.
if os.path.exists(os.path.join(self.save_dir, "trg_vocab.pkl")):
vocab = pickle.load(
open(os.path.join(self.save_dir, "trg_vocab.pkl"), "rb")
)
for idx, (corpus, fname) in enumerate(zip(self.trg, self.f_trg)):
word2id, id2word = (
vocab[self.tasknames[idx]]["word2id"],
vocab[self.tasknames[idx]]["id2word"],
)
corpus["word2id"], corpus["id2word"] = word2id, id2word
else:
trg_vocab_dump = {}
for idx, (corpus, fname) in enumerate(zip(self.trg, self.f_trg)):
word2id, id2word = self.construct_vocab(
fname, self.trg_vocab_size, self.lowercase
)
corpus["word2id"], corpus["id2word"] = word2id, id2word
trg_vocab_dump[self.tasknames[idx]] = {}
trg_vocab_dump[self.tasknames[idx]]["word2id"] = word2id
trg_vocab_dump[self.tasknames[idx]]["id2word"] = id2word
pickle.dump(
trg_vocab_dump,
open(os.path.join(self.save_dir, "trg_vocab.pkl"), "wb"),
)
def shuffle_dataset(self, idx):
"""Shuffle current buffer."""
self.src[idx]["data"], self.trg[idx]["data"] = shuffle(
self.src[idx]["data"],
self.trg[idx]["data"],
random_state=self.seed,
)
def get_parallel_minibatch(
self, corpus_idx, index, batch_size, max_len_src, max_len_trg
):
"""Prepare minibatch.
Args:
corpus_idx(int): Corpus Index.
index(int): Index.
batch_size(int): Batch Size.
max_len_src(int): Max length for resource.
max_len_trg(int): Max length ofr target.
Returns: minibatch of src-trg pairs(dict).
"""
src_lines = [
["<s>"] + line[: max_len_src - 2] + ["</s>"]
for line in self.src[corpus_idx]["data"][
index : index + batch_size
]
]
trg_lines = [
["<s>"] + line[: max_len_trg - 2] + ["</s>"]
for line in self.trg[corpus_idx]["data"][
index : index + batch_size
]
]
"""Sort sentences by decreasing length within a minibatch for
`torch.nn.utils.packed_padded_sequence`"""
src_lens = [len(line) for line in src_lines]
sorted_indices = np.argsort(src_lens)[::-1]
sorted_src_lines = [src_lines[idx] for idx in sorted_indices]
sorted_trg_lines = [trg_lines[idx] for idx in sorted_indices]
sorted_src_lens = [len(line) for line in sorted_src_lines]
sorted_trg_lens = [len(line) for line in sorted_trg_lines]
max_src_len = max(sorted_src_lens)
max_trg_len = max(sorted_trg_lens)
# Map words to indices
input_lines_src = [
[
self.src[corpus_idx]["word2id"][w]
if w in self.src[corpus_idx]["word2id"]
else self.src[corpus_idx]["word2id"]["<unk>"]
for w in line
]
+ [self.src[corpus_idx]["word2id"]["<pad>"]]
* (max_src_len - len(line))
for line in sorted_src_lines
]
input_lines_trg = [
[
self.trg[corpus_idx]["word2id"][w]
if w in self.trg[corpus_idx]["word2id"]
else self.trg[corpus_idx]["word2id"]["<unk>"]
for w in line[:-1]
]
+ [self.trg[corpus_idx]["word2id"]["<pad>"]]
* (max_trg_len - len(line))
for line in sorted_trg_lines
]
output_lines_trg = [
[
self.trg[corpus_idx]["word2id"][w]
if w in self.trg[corpus_idx]["word2id"]
else self.trg[corpus_idx]["word2id"]["<unk>"]
for w in line[1:]
]
+ [self.trg[corpus_idx]["word2id"]["<pad>"]]
* (max_trg_len - len(line))
for line in sorted_trg_lines
]
# Cast lists to torch tensors
input_lines_src = Variable(torch.LongTensor(input_lines_src)).cuda()
input_lines_trg = Variable(torch.LongTensor(input_lines_trg)).cuda()
output_lines_trg = Variable(torch.LongTensor(output_lines_trg)).cuda()
sorted_src_lens = (
Variable(torch.LongTensor(sorted_src_lens), volatile=True)
.squeeze()
.cuda()
)
# Return minibatch of src-trg pairs
return {
"input_src": input_lines_src,
"input_trg": input_lines_trg,
"output_trg": output_lines_trg,
"src_lens": sorted_src_lens,
"type": "seq2seq",
}
class NLIIterator(DataIterator):
"""Data iterator for tokenized NLI datasets."""
def __init__(
self, train, dev, test, vocab_size, lowercase=True, vocab=None, seed=0
):
"""Initialize params.
Each of train/dev/test is a tab-separate file of the form
premise \t hypothesis \t label.
Args:
train(torch.Tensor): Training dataset.
dev(torch.Tensor): Validation dataset.
test(torch.Tensor): Testing dataset.
vocab_size(int): The size of the vocabulary.
lowercase(bool): If lowercase the dataset.
vocab(Union[bytes,str): The list of the vocabulary.
"""
self.seed = seed
self.train = train
self.dev = dev
self.test = test
self.vocab_size = vocab_size
self.lowercase = lowercase
self.vocab = vocab
self.train_lines = [
line.strip().lower().split("\t")
for line in open(self.train, encoding="utf-8")
]
self.dev_lines = [
line.strip().lower().split("\t")
for line in open(self.dev, encoding="utf-8")
]
self.test_lines = [
line.strip().lower().split("\t")
for line in open(self.test, encoding="utf-8")
]
if self.vocab is not None:
# binary mode doesn't take an encoding argument
self.vocab = pickle.load(open(self.vocab, "rb"))
self.word2id = self.vocab["word2id"]
self.id2word = self.vocab["id2word"]
self.vocab_size = len(self.word2id)
else:
self.word2id, self.id2word = self.construct_vocab(
[x[0] for x in self.train_lines]
+ [x[1] for x in self.train_lines],
self.vocab_size,
lowercase=self.lowercase,
)
# Label text to class mapping.
self.text2label = {"entailment": 0, "neutral": 1, "contradiction": 2}
self.shuffle_dataset()
def shuffle_dataset(self):
"""Shuffle training data."""
self.train_lines = shuffle(self.train_lines, random_state=self.seed)
def get_parallel_minibatch(self, index, batch_size, sent_type="train"):
"""Prepare minibatch.
Args:
index(int): The index for line.
batch_size(int): Batch size.
sent_type(str): Type of dataset.
Returns:
dict for batch training.
"""
if sent_type == "train":
lines = self.train_lines
elif sent_type == "dev":
lines = self.dev_lines
else:
lines = self.test_lines
sent1 = [
["<s>"] + line[0].split() + ["</s>"]
for line in lines[index : index + batch_size]
]
sent2 = [
["<s>"] + line[1].split() + ["</s>"]
for line in lines[index : index + batch_size]
]
labels = [
self.text2label[line[2]]
for line in lines[index : index + batch_size]
]
sent1_lens = [len(line) for line in sent1]
sorted_sent1_indices = np.argsort(sent1_lens)[::-1]
sorted_sent1_lines = [sent1[idx] for idx in sorted_sent1_indices]
rev_sent1 = np.argsort(sorted_sent1_indices)
sent2_lens = [len(line) for line in sent2]
sorted_sent2_indices = np.argsort(sent2_lens)[::-1]
sorted_sent2_lines = [sent2[idx] for idx in sorted_sent2_indices]
rev_sent2 = np.argsort(sorted_sent2_indices)
sorted_sent1_lens = [len(line) for line in sorted_sent1_lines]
sorted_sent2_lens = [len(line) for line in sorted_sent2_lines]
max_sent1_len = max(sorted_sent1_lens)
max_sent2_len = max(sorted_sent2_lens)
sent1 = [
[
self.word2id[w] if w in self.word2id else self.word2id["<unk>"]
for w in line
]
+ [self.word2id["<pad>"]] * (max_sent1_len - len(line))
for line in sorted_sent1_lines
]
sent2 = [
[
self.word2id[w] if w in self.word2id else self.word2id["<unk>"]
for w in line
]
+ [self.word2id["<pad>"]] * (max_sent2_len - len(line))
for line in sorted_sent2_lines
]
sent1 = Variable(torch.LongTensor(sent1)).cuda()
sent2 = Variable(torch.LongTensor(sent2)).cuda()
labels = Variable(torch.LongTensor(labels)).cuda()
sent1_lens = (
Variable(torch.LongTensor(sorted_sent1_lens), requires_grad=False)
.squeeze()
.cuda()
)
sent2_lens = (
Variable(torch.LongTensor(sorted_sent2_lens), requires_grad=False)
.squeeze()
.cuda()
)
rev_sent1 = (
Variable(torch.LongTensor(rev_sent1), requires_grad=False)
.squeeze()
.cuda()
)
rev_sent2 = (
Variable(torch.LongTensor(rev_sent2), requires_grad=False)
.squeeze()
.cuda()
)
return {
"sent1": sent1,
"sent2": sent2,
"sent1_lens": sent1_lens,
"sent2_lens": sent2_lens,
"rev_sent1": rev_sent1,
"rev_sent2": rev_sent2,
"labels": labels,
"type": "nli",
}
def get_validation_minibatch(
src, trg, index, batch_size, src_word2id, trg_word2id
):
"""Prepare minibatch.
Args:
src(list): source data.
trg(list): target data.
index(int): index for the file.
batch_size(int): batch size.
src_word2id(list): Word to index for source.
trg_word2id(list): Word to index for target.
Returns:
Dict for seq2seq model.
"""
src_lines = [
["<s>"] + line + ["</s>"] for line in src[index : index + batch_size]
]
trg_lines = [
["<s>"] + line + ["</s>"] for line in trg[index : index + batch_size]
]
src_lens = [len(line) for line in src_lines]
sorted_indices = np.argsort(src_lens)[::-1]
sorted_src_lines = [src_lines[idx] for idx in sorted_indices]
sorted_trg_lines = [trg_lines[idx] for idx in sorted_indices]
sorted_src_lens = [len(line) for line in sorted_src_lines]
sorted_trg_lens = [len(line) for line in sorted_trg_lines]
max_src_len = max(sorted_src_lens)
max_trg_len = max(sorted_trg_lens)
input_lines_src = [
[src_word2id[w] if w in src else src_word2id["<unk>"] for w in line]
+ [src_word2id["<pad>"]] * (max_src_len - len(line))
for line in sorted_src_lines
]
input_lines_trg = [
[
trg_word2id[w] if w in trg_word2id else trg_word2id["<unk>"]
for w in line[:-1]
]
+ [trg_word2id["<pad>"]] * (max_trg_len - len(line))
for line in sorted_trg_lines
]
output_lines_trg = [
[
trg_word2id[w] if w in trg_word2id else trg_word2id["<unk>"]
for w in line[1:]
]
+ [trg_word2id["<pad>"]] * (max_trg_len - len(line))
for line in sorted_trg_lines
]
# For pytroch 0.4
with torch.no_grad():
input_lines_src = Variable(torch.LongTensor(input_lines_src)).cuda()
input_lines_trg = Variable(torch.LongTensor(input_lines_trg)).cuda()
output_lines_trg = Variable(torch.LongTensor(output_lines_trg)).cuda()
# sorted_src_lens = Variable(
# torch.LongTensor(sorted_src_lens)
# ).squeeze().cuda()
sorted_src_lens = (
Variable(torch.LongTensor(sorted_src_lens))
.view(len(sorted_src_lens))
.cuda()
)
return {
"input_src": input_lines_src,
"input_trg": input_lines_trg,
"output_trg": output_lines_trg,
"src_lens": sorted_src_lens,
"type": "seq2seq",
}
def compute_validation_loss(
config, model, train_iterator, criterion, task_idx, lowercase=False
):
"""Compute validation loss for a task.
Args:
config(dict): configuration list.
model(MultitaskModel): model.
train_iterator(BufferedDataIterator): Multi Parallel corpus data iterator.
criterion(nn.CrossEntropyLoss): criterion function for loss.
task_idx(int): Task index.
lowercase(bool): If lowercase the data.
Returns: float as the mean of the loss.
"""
val_src = config["data"]["paths"][task_idx]["val_src"]
val_trg = config["data"]["paths"][task_idx]["val_trg"]
if lowercase:
val_src = [
line.strip().lower().split()
for line in open(val_src, "r", encoding="utf-8")
]
val_trg = [
line.strip().lower().split()
for line in open(val_trg, "r", encoding="utf-8")
]
else:
val_src = [
line.strip().split()
for line in open(val_src, "r", encoding="utf-8")
]
val_trg = [
line.strip().split()
for line in open(val_trg, "r", encoding="utf-8")
]
batch_size = config["training"]["batch_size"]
losses = []
for j in range(0, len(val_src), batch_size):
minibatch = get_validation_minibatch(
val_src,
val_trg,
j,
batch_size,
train_iterator.src[task_idx]["word2id"],
train_iterator.trg[task_idx]["word2id"],
)
decoder_logit = model(minibatch, task_idx)
loss = criterion(
decoder_logit.contiguous().view(-1, decoder_logit.size(2)),
minibatch["output_trg"].contiguous().view(-1),
)
# losses.append(loss.data[0])
losses.append(loss.item())
return np.mean(losses)
# Original source: https://github.com/Maluuba/gensen
| 2.1875 | 2 |
Hashtable/1124. Longest Well-Performing Interval.py | viewv/leetcode | 2 | 12797227 | class Solution:
def longestWPI(self, hours: List[int]) -> int:
res = dict()
s = 0
ans = 0
for i, c in enumerate(hours):
s += 1 if c > 8 else -1
if s > 0:
ans = i + 1
if s not in res:
res[s] = i
if s - 1 in res:
ans = max(ans, i-res[s-1])
return ans
| 2.234375 | 2 |