max_stars_repo_path
stringlengths
3
269
max_stars_repo_name
stringlengths
4
119
max_stars_count
int64
0
191k
id
stringlengths
1
7
content
stringlengths
6
1.05M
score
float64
0.23
5.13
int_score
int64
0
5
testing/tests/001-main/002-createrepository.py
darobin/critic
1
12799651
import time def check_repository(document): rows = document.findAll("tr", attrs=testing.expect.with_class("repository")) testing.expect.check(1, len(rows)) def check_cell(row, class_name, expected_string, inline_element_type=None): cells = row.findAll("td", attrs=testing.expect.with_class(class_name)) testing.expect.check(1, len(cells)) if inline_element_type: testing.expect.check(1, len(cells[0].findAll(inline_element_type))) string = cells[0].findAll("i")[0].string else: string = cells[0].string if string is None: string = "" testing.expect.check(expected_string, string) check_cell(rows[0], "name", "critic") check_cell(rows[0], "location", "http://%s/critic.git" % instance.hostname) check_cell(rows[0], "upstream", "&nbsp;") rows = document.findAll("tr", attrs=testing.expect.with_class("details")) testing.expect.check(1, len(rows)) tables = rows[0].findAll("table", attrs=testing.expect.with_class("trackedbranches")) testing.expect.check(1, len(tables)) # Would like to use 'tables[0].findAll()' here, but BeautifulSoup apparently # doesn't parse nested tables correctly, so these rows aren't actually part # of the 'trackedbranches' table according to it. rows = document.findAll("tr", attrs=testing.expect.with_class("branch")) testing.expect.check(2, len(rows)) check_cell(rows[0], "localname", "Tags", inline_element_type="i") check_cell(rows[0], "remote", repository.url) check_cell(rows[0], "remotename", "N/A", inline_element_type="i") check_cell(rows[0], "enabled", "Yes") check_cell(rows[0], "users", "") check_cell(rows[1], "localname", "master") check_cell(rows[1], "remote", repository.url) check_cell(rows[1], "remotename", "master") check_cell(rows[1], "enabled", "Yes") check_cell(rows[1], "users", "") with frontend.signin(): # Check that this URL isn't handled already. We're using it later to detect # that the repository has been created and the tracked branch fetched, and # if it's already handled for some reason, that check won't be reliable. frontend.page("critic/master", expected_http_status=404) frontend.operation("addrepository", data={ "name": "critic", "path": "critic", "remote": { "url": repository.url, "branch": "master" }}) # If it hasn't happened after 30 seconds, something must be wrong. deadline = time.time() + 30 finished = False while not finished and time.time() < deadline: # The frontend.page() function returns None if the HTTP status was # 404, and a BeautifulSoup object if it was 200. if frontend.page("critic/master", expected_http_status=[200, 404]) is None: time.sleep(0.5) while True: mail = mailbox.pop(accept=testing.mailbox.with_subject("^branchtracker.log: ")) if not mail: break logger.error("Administrator message: %s\n > %s" % (mail.header("Subject"), "\n > ".join(mail.lines))) raise testing.TestFailure else: finished = True if not finished: logger.error("Repository main branch ('refs/heads/master') not fetched after 30 seconds.") raise testing.TestFailure # Check that /repositories still loads correctly now that there's a # repository in the system. frontend.page( "repositories", expect={ "document_title": testing.expect.document_title(u"Repositories"), "content_title": testing.expect.paleyellow_title(0, u"Repositories"), "repository": check_repository }) frontend.operation("addrepository", data={ "name": "a" * 65, "path": "validpath2" }, expect={ "status": "failure", "code": "invalidshortname" }) frontend.operation("addrepository", data={ "name": "", "path": "validpath1" }, expect={ "status": "failure", "code": "invalidshortname" })
2.40625
2
Dataset/Leetcode/train/102/63.py
kkcookies99/UAST
0
12799652
class Solution: def XXX(self, root: TreeNode) -> List[List[int]]: queue = [] queue.insert(0,root) res = [] if not root: return [] while queue: n = len(queue) layer = [] for i in range(n): temp = queue.pop() print(temp.val) if temp.left: queue.insert(0,temp.left) if temp.right: queue.insert(0,temp.right) layer.append(temp.val) res.append(layer) return res
3.328125
3
src/5_expression/GSE64913.py
reemagit/flowcentrality
2
12799653
#!/usr/bin/env python3 """ Generate GSE64913 """ __author__ = "<NAME>" __version__ = "0.1.0" __license__ = "MIT" import logging import GEOparse import argparse import pandas as pd from funcs import utils from os.path import join import numpy as np #def append_postfix(filename,postfix): # return "{0}_{2}.{1}".format(*filename.rsplit('.', 1) + postfix) def main(args): logging.basicConfig(level=logging.INFO, format='%(module)s:%(levelname)s:%(asctime)s:%(message)s', handlers=[logging.FileHandler("../logs/report.log")]) logging.info(args) utils.create_dir_if_not_exist(args.out_expr_dir) utils.create_dir_if_not_exist(join(args.out_expr_dir,'raw')) utils.create_dir_if_not_exist(join(args.out_expr_dir,'processed')) gse = GEOparse.get_GEO(geo='GSE64913', destdir=join(args.out_expr_dir,'raw')) annotated = gse.pivot_and_annotate('VALUE', gse.gpls['GPL570'], 'ENTREZ_GENE_ID') annotated2 = annotated[~pd.isnull(annotated.ENTREZ_GENE_ID)] annotated2 = annotated2.loc[~annotated2.isnull().values.all(axis=1)] annotated2['ENTREZ_GENE_ID'] = annotated2.ENTREZ_GENE_ID.str.split('///').str[0].astype(int) annotated2 = annotated2.set_index('ENTREZ_GENE_ID') classes = {} classes['healthy_cae'] = ['diagnosis: Healthy','cell type: Central airway epithelium'] classes['healthy_pae'] = ['diagnosis: Healthy', 'cell type: Peripheral airway epithelium'] classes['asthma_cae'] = ['diagnosis: Severe Asthmatic', 'cell type: Central airway epithelium'] classes['asthma_pae'] = ['diagnosis: Severe Asthmatic', 'cell type: Peripheral airway epithelium'] logging.info(classes) gsms = {cls: [gsm for gsm in gse.gsms if gse.gsms[gsm].metadata['characteristics_ch1'][1] == classes[cls][0] and gse.gsms[gsm].metadata['characteristics_ch1'][5] == classes[cls][1]] for cls in classes} logging.info(' '.join(['{} GSM:{}'.format(cls, len(gsms[cls])) for cls in classes])) utils.create_dir_if_not_exist(args.out_expr_dir) utils.write_expr(join(args.out_expr_dir, 'processed', 'expr.tsv'), annotated2) for cls in classes: utils.write_text(join(args.out_expr_dir, 'processed', '{}_gsms.txt'.format(cls)), gsms[cls]) if __name__ == "__main__": parser = argparse.ArgumentParser(description='Process GSE64913') parser.add_argument('out_expr_dir', type=str, help='Output directory for expression data file and GSM lists') args = parser.parse_args() main(args)
2.265625
2
test.py
cooool/doublejian
0
12799654
<reponame>cooool/doublejian def main(): print ("hello") return 0 if __name__ == '__main__': main()
1.6875
2
backends/redis.py
iliadmitriev/auth-api
3
12799655
<reponame>iliadmitriev/auth-api<filename>backends/redis.py<gh_stars>1-10 import aioredis async def init_redis(app): app['redis'] = aioredis.from_url( app['redis_location'], ) async def close_redis(app): await app['redis'].close() def setup_redis(app, redis_location): app['redis_location'] = redis_location app.on_startup.append(init_redis) app.on_cleanup.append(close_redis) async def get_redis_key(redis, key): async with redis.client() as conn: val = await conn.get(key) return val async def set_redis_key(redis, key, value, expire=None): async with redis.client() as conn: if expire is None: res = await conn.set(key, value) else: res = await conn.set(key, value, ex=expire) return res
2.375
2
utils/BaseFlags.py
cvsubmittemp/BraVL
0
12799656
<reponame>cvsubmittemp/BraVL import os import argparse import torch import scipy.io as sio parser = argparse.ArgumentParser() # TRAINING parser.add_argument('--batch_size', type=int, default=512, help="batch size for training") parser.add_argument('--initial_learning_rate', type=float, default=0.0001, help="starting learning rate") parser.add_argument('--beta_1', type=float, default=0.9, help="default beta_1 val for adam") parser.add_argument('--beta_2', type=float, default=0.999, help="default beta_2 val for adam") parser.add_argument('--start_epoch', type=int, default=0, help="flag to set the starting epoch for training") parser.add_argument('--end_epoch', type=int, default=100, help="flag to indicate the final epoch of training") # DATA DEPENDENT parser.add_argument('--class_dim', type=int, default=32, help="dimension of common factor latent space") # SAVE and LOAD parser.add_argument('--mm_vae_save', type=str, default='mm_vae', help="model save for vae_bimodal") parser.add_argument('--load_saved', type=bool, default=False, help="flag to indicate if a saved model will be loaded") # DIRECTORIES # experiments parser.add_argument('--dir_experiment', type=str, default='./logs', help="directory to save logs in") parser.add_argument('--dataname', type=str, default='DIR-Wiki', help="dataset") parser.add_argument('--sbj', type=str, default='sub-03', help="fmri subject") parser.add_argument('--roi', type=str, default='LVC_HVC_IT', help="ROI") parser.add_argument('--text_model', type=str, default='GPTNeo', help="text embedding model") parser.add_argument('--image_model', type=str, default='pytorch/repvgg_b3g4', help="image embedding model") parser.add_argument('--test_type', type=str, default='zsl', help='normal or zsl') parser.add_argument('--aug_type', type=str, default='image_text', help='no_aug, image_text, image_only, text_only') #multimodal parser.add_argument('--method', type=str, default='joint_elbo', help='choose method for training the model') parser.add_argument('--modality_jsd', type=bool, default=False, help="modality_jsd") parser.add_argument('--modality_poe', type=bool, default=False, help="modality_poe") parser.add_argument('--modality_moe', type=bool, default=False, help="modality_moe") parser.add_argument('--joint_elbo', type=bool, default=False, help="modality_moe") parser.add_argument('--poe_unimodal_elbos', type=bool, default=True, help="unimodal_klds") parser.add_argument('--factorized_representation', action='store_true', default=False, help="factorized_representation") # LOSS TERM WEIGHTS parser.add_argument('--beta', type=float, default=0.0, help="default initial weight of sum of weighted divergence terms") parser.add_argument('--beta_style', type=float, default=1.0, help="default weight of sum of weighted style divergence terms") parser.add_argument('--beta_content', type=float, default=1.0, help="default weight of sum of weighted content divergence terms") parser.add_argument('--lambda1', type=float, default=0.001, help="default weight of intra_mi terms") parser.add_argument('--lambda2', type=float, default=0.001, help="default weight of inter_mi terms") FLAGS = parser.parse_args() data_dir_root = os.path.join('./data', FLAGS.dataname) brain_dir = os.path.join(data_dir_root, 'brain_feature', FLAGS.roi, FLAGS.sbj) image_dir_train = os.path.join(data_dir_root, 'visual_feature/ImageNetTraining', FLAGS.image_model+'-PCA', FLAGS.sbj) text_dir_train = os.path.join(data_dir_root, 'textual_feature/ImageNetTraining/text', FLAGS.text_model, FLAGS.sbj) train_brain = sio.loadmat(os.path.join(brain_dir, 'fmri_train_data.mat'))['data'].astype('double') train_image = sio.loadmat(os.path.join(image_dir_train, 'feat_pca_train.mat'))['data'].astype('double')#[:,0:3000] train_text = sio.loadmat(os.path.join(text_dir_train, 'text_feat_train.mat'))['data'].astype('double') train_brain = torch.from_numpy(train_brain) train_image = torch.from_numpy(train_image) train_text = torch.from_numpy(train_text) dim_brain = train_brain.shape[1] dim_image = train_image.shape[1] dim_text = train_text.shape[1] parser.add_argument('--m1_dim', type=int, default=dim_brain, help="dimension of modality brain") parser.add_argument('--m2_dim', type=int, default=dim_image, help="dimension of modality image") parser.add_argument('--m3_dim', type=int, default=dim_text, help="dimension of modality text") parser.add_argument('--data_dir_root', type=str, default=data_dir_root, help="data dir") FLAGS = parser.parse_args() print(FLAGS)
2.203125
2
mcpipy/dragoncurve.py
wangtt03/raspberryjammod
338
12799657
# # Code by <NAME> and under the MIT license # from mineturtle import * import lsystem t = Turtle() t.pendelay(0) t.turtle(None) t.penblock(block.BRICK_BLOCK) # ensure angles are always integral multiples of 90 degrees t.gridalign() rules = {'X':'X+YF+', 'Y':'-FX-Y'} def go(): # draw a wall segment with a door t.pendown() t.penblock(block.BRICK_BLOCK) t.startface() for i in range(4): t.go(4) t.pitch(90) t.endface() t.penup() t.go(2) t.pendown() t.penblock(block.AIR) t.pitch(90) t.go(1) t.penup() t.pitch(180) t.go(1) t.pitch(90) t.go(2) dictionary = { '+': lambda: t.yaw(90), '-': lambda: t.yaw(-90), 'F': lambda: go() } lsystem.lsystem('FX', rules, dictionary, 14)
3.0625
3
server/app/tests/users/test_endpoints.py
josenava/meal-calendar
0
12799658
import pytest from fastapi.testclient import TestClient @pytest.mark.integration @pytest.mark.usefixtures("test_db_session") class TestSignupEndpoint: def test_signup_returns_200(self, client: TestClient): response = client.post( "/users/signup", json={ "email": "<EMAIL>", "password": "<PASSWORD>" } ) assert response.status_code == 201 def test_signup_existing_user_returns_422(self, client: TestClient): response = client.post( "/users/signup", json={ "email": "<EMAIL>", "password": "<PASSWORD>" } ) assert response.status_code == 201 response_2 = client.post( "/users/signup", json={ "email": "<EMAIL>", "password": "<PASSWORD>" } ) assert response_2.status_code == 422
2.484375
2
queries/charQuery.py
Kadantte/AniPy-Bot
11
12799659
def searchChar(): query = ''' query ($search: String) { Character(search: $search) { siteUrl name { full } media(perPage: 1) { nodes { title { romaji english } siteUrl } } image { large } description(asHtml: true) } } ''' return query
1.820313
2
scripts/fitnessMutations/annotate_mutations.py
felixhorns/BCellSelection
3
12799660
<gh_stars>1-10 import sys import numpy as np import pandas as pd from Bio.Seq import Seq from Bio.Alphabet import generic_dna from Bio import SeqIO, Align, AlignIO, Phylo from itertools import izip def load_tree(f): t = Phylo.read(f, 'newick') t.root_with_outgroup("germline") t.get_nonterminals()[0].branch_length = 0.0 # t.ladderize(reverse=True) return t def load_aln(infile): aln = Align.MultipleSeqAlignment([]) aln_dict = {} with open(infile, 'r') as f: for seq_record in SeqIO.parse(f, 'fasta'): aln.append(seq_record) aln_dict[seq_record.id] = str(seq_record.seq) return aln, aln_dict def get_parent(tree, child_clade): node_path = tree.get_path(child_clade) return node_path[-2] def str_diffs(X, Y): diffs = [] for i, (x, y) in enumerate(izip(X,Y)): if x != y: d = [i, x, y] diffs.append(d) return diffs def get_mutations(T, aln_dict): """ Get mutations on each branch of the tree """ header = ["name", "parent_name", "position", "base_before", "base_after"] df = pd.DataFrame(columns=header) i = 0 for clade in T.find_clades(): if clade.name in [None, "germline", "2_"]: continue parent = get_parent(T, clade) seq_parent = aln_dict[parent.name] seq_clade = aln_dict[clade.name] diffs = str_diffs(seq_parent, seq_clade) for diff in diffs: position, base_before, base_after = tuple(diff) features = [clade.name, parent.name, position, base_before, base_after] df.loc[i] = features i += 1 return df def find_frame(s): # Finds longest ORF s = s.replace("-", "") seq1 = Seq(s, generic_dna).translate() # translate in every frame seq2 = Seq(s[1:], generic_dna).translate() seq3 = Seq(s[2:], generic_dna).translate() L_seq1 = max([len(x) for x in seq1.split("*")]) # find longest ORF in each frame L_seq2 = max([len(x) for x in seq2.split("*")]) L_seq3 = max([len(x) for x in seq3.split("*")]) Ls = [L_seq1, L_seq2, L_seq3] L_max = max(Ls) # get longest ORF among all frames frames_max = [i for i, x in enumerate(Ls) if x == L_max] # get frame of longest ORF if len(frames_max) > 1: print "Warning: more than one reading frame had max length ORF" return frames_max[0] def annotate_coding(df_mutations, aln_dict): """ Annotate each mutation as either nonsynonymous or synonymous """ coding_status = [] for i, row in df_mutations.iterrows(): seq_parent = aln_dict[row["parent_name"]] # get parent sequence seq_mutated = list(seq_parent) seq_mutated[int(row["position"])] = row["base_after"] # introduce mutation seq_mutated = "".join(seq_mutated) seq_parent = seq_parent.replace("-", "") # collapse gaps seq_mutated = seq_mutated.replace("-", "") AA_parent = Seq(seq_parent, generic_dna).translate() # translate AA_mutated = Seq(seq_mutated, generic_dna).translate() if AA_parent != AA_mutated: # compare AA before and after mutation coding_status.append("N") else: coding_status.append("S") df_mutations["coding_status"] = coding_status return df_mutations def map_positions(s, positions): """ Maps positions in an ungapped sequence to corresponding positions in a gapped sequence """ # count number of gaps before each position counter = 0 gaps = [] for i, x in enumerate(s): if x == "-": counter += 1 gaps.append(counter) # transform boundaries to corresponding positions in new sequence positions_transformed = [] for x in positions: my_gaps = gaps[x] x_transformed = x + my_gaps positions_transformed.append(x_transformed) return positions_transformed def annotate_regions(df_mutations, aln_dict, df_seqs): """ Annotate region of each mutation (CDR/FWR) """ # get one sequence sequence_uids = [x for x in aln_dict.keys() if "_" not in x] my_sequence_uid = int(sequence_uids[0]) s = aln_dict[str(my_sequence_uid)] # transform positions of region boundaries to corresponding positions in gapped alignment fields = ["FWR1_start", "CDR1_start", "FWR2_start", "CDR2_start", "FWR3_start", "CDR3_start", "FWR4_start", "C_start"] boundaries_ungapped = df_seqs.loc[my_sequence_uid][fields] boundaries_ungapped = np.array(boundaries_ungapped) - 1 # transform to zero-indexed positions boundaries_ungapped[-1] -= 1 # decrement C region boundary (end of sequence) to fit within array boundaries_gapped = map_positions(s, boundaries_ungapped) # boundaries_gapped = np.array(boundaries_gapped) - 1 # not used anymore (we do transform earlier) boundaries_gapped[0] = 0 boundaries_gapped[-1] += 1 # map mutations to regions using boundaries labels = ["FWR1", "CDR1", "FWR2", "CDR2", "FWR3", "CDR3", "FWR4"] regions = pd.cut(df_mutations["position"], boundaries_gapped, include_lowest=True, right=False, labels=labels) df_mutations["region"] = regions return df_mutations if __name__ == "__main__": infile_aln = sys.argv[1] infile_fitness_tree = sys.argv[2] outfile = sys.argv[3] print infile_fitness_tree print infile_aln infile_df_seqs = "/local10G/rfhorns/Bcell/flu_highres/figures/v5/data/FitnessMutations.df_seqs_raw.csv" df_seqs = pd.read_csv(infile_df_seqs, header=0, index_col=0) aln, aln_dict = load_aln(infile_aln) fitness_tree = load_tree(infile_fitness_tree) df_mutations = get_mutations(fitness_tree, aln_dict) df_mutations = annotate_coding(df_mutations, aln_dict) df_mutations = annotate_regions(df_mutations, aln_dict, df_seqs) df_mutations.to_csv(outfile) print "Done!!"
2.59375
3
import/a.py
abos5/pythontutor
0
12799661
<reponame>abos5/pythontutor<gh_stars>0 print "a before b" import b print "a after import b" _once = 0 def dofoo(): global _once; _once += 1 if _once > 1: print "what the hell are u thinking?" print "a very important function that can only exec once" dofoo() print "complete a"
3.046875
3
src/telliot_core/queries/query.py
tellor-io/pytelliot
2
12799662
""" Oracle Query Module """ import json from clamfig import Serializable from web3 import Web3 from telliot_core.dtypes.value_type import ValueType class OracleQuery(Serializable): """Oracle Query An OracleQuery specifies how to pose a question to the Tellor Oracle and how to format/interpret the response. The OracleQuery class serves as the base class for all Queries, and implements default behaviors. Each subclass corresponds to a unique Query Type supported by the TellorX network. All public attributes of an OracleQuery represent an input that can be used to customize the query. The base class provides: - Calculation of the contents of the `data` field to include with the `TellorX.Oracle.tipQuery()` contract call. - Calculation of the `id` field field to include with the `TellorX.Oracle.tipQuery()` and `TellorX.Oracle.submitValue()` contract calls. """ @property def descriptor(self) -> str: """Get the query descriptor string. The Query descriptor is a unique string representation of the query. The descriptor is required for users to specify the query to TellorX through the ``TellorX.Oracle.tipQuery()`` contract call. """ state = self.get_state() jstr = json.dumps(state, separators=(",", ":")) return jstr @property def value_type(self) -> ValueType: """Returns the ValueType expected by the current Query configuration The value type defines required data type/structure of the ``value`` submitted to the contract through ``TellorX.Oracle.submitValue()`` This method must be overridden by subclasses """ pass @property def query_data(self) -> bytes: """Returns the ``data`` field for use in ``TellorX.Oracle.tipQuery()`` contract call. """ return self.descriptor.encode("utf-8") @property def query_id(self) -> bytes: """Returns the query ``id`` for use with the ``TellorX.Oracle.tipQuery()`` and ``TellorX.Oracle.submitValue()`` contract calls. """ return bytes(Web3.keccak(self.query_data))
2.9375
3
gpu_tasker/email_settings_sample.py
cnstark/awesome_gpu_scheduler
35
12799663
<reponame>cnstark/awesome_gpu_scheduler EMAIL_BACKEND = 'django.core.mail.backends.smtp.EmailBackend' EMAIL_HOST = 'smtp.163.com' EMAIL_PORT = 465 EMAIL_HOST_USER = '<EMAIL>' EMAIL_HOST_PASSWORD = '<PASSWORD>' EMAIL_USE_SSL = True EMAIL_USE_LOCALTIME = True DEFAULT_FROM_EMAIL = 'GPUTasker<{}>'.format(EMAIL_HOST_USER) SERVER_EMAIL = EMAIL_HOST_USER
1.453125
1
debug.py
Epsilon-Lee/OpenNMT-V1
7
12799664
<filename>debug.py import torch import torch.nn as nn import onmt from onmt.BleuCal import fetch_data import sys if torch.cuda.is_available(): torch.cuda.set_device(3) checkpoint = torch.load('../Models/V1_IWSLT_Models/de2en_30k_bz64_bc5_bleu_26.06_e24.pt') opt = checkpoint['opt'] # del(checkpoint) opt.cuda = True srcData, references = fetch_data('IWSLT/test.de.small.tok', 'IWSLT/test.en.small.tok') encoder = onmt.Models.Encoder(opt, checkpoint['dicts']['src']) decoder = onmt.Models.Decoder(opt, checkpoint['dicts']['tgt']) model = onmt.Models.NMTModel(encoder, decoder) model.load_state_dict(checkpoint['model']) generator = nn.Sequential( nn.Linear(opt.rnn_size, checkpoint['dicts']['tgt'].size()), nn.LogSoftmax()) model.generator = generator model.cuda() opt.model = '../Models/V1_IWSLT_Models/de2en_30k_bz64_bc5_bleu_26.06_e24.pt' translator = onmt.Translator(opt, model, checkpoint['dicts']['src'], checkpoint['dicts']['tgt']) srcBatch, tgtBatch, candidate = [], [], [] lenSrcData = len(srcData) for i, line in enumerate(srcData): sys.stdout.write('\r') sys.stdout.write("%s" % (str(i) + ' of ' + str(lenSrcData))) sys.stdout.flush() srcTokens = line.split() srcBatch += [srcTokens] if (i + 1) % opt.trans_batch_size == 0: predBatch, _, _ = translator.translate(srcBatch, tgtBatch) print 'predBatch:', len(predBatch) for b in range(len(predBatch)): candidate += [" ".join(predBatch[b][0]) + '\n'] srcBatch = [] elif (i + 1) == lenSrcData: predBatch, _, _ = translator.translate(srcBatch, tgtBatch) print 'predBatch:', len(predBatch) for b in range(len(predBatch)): candidate += [" ".join(predBatch[b][0]) + '\n'] srcBatch = [] else: continue print 'candidate length:', len(candidate) print 'referece length', len(references[0])
1.90625
2
test_pytest/test_unit/test_database.py
hat-open/hat-syslog
1
12799665
<reponame>hat-open/hat-syslog<filename>test_pytest/test_unit/test_database.py import datetime import os import socket import pytest from hat.syslog.server import common import hat.syslog.server.database pytestmark = pytest.mark.asyncio @pytest.fixture def db_path(tmp_path): return tmp_path / 'syslog.db' @pytest.fixture def timestamp(): dt = datetime.datetime.now(tz=datetime.timezone.utc) return dt.timestamp() @pytest.fixture def create_msg(timestamp): counter = 0 def create_msg(facility=common.Facility.USER, severity=common.Severity.ERROR, hostname=socket.gethostname(), app_name=pytest.__file__, procid=os.getpid()): nonlocal counter counter += 1 return common.Msg(facility=facility, severity=severity, version=1, timestamp=timestamp, hostname=hostname, app_name=app_name, procid=str(procid), msgid='test_syslog.backend', data="", msg=f'message no {counter}') return create_msg async def test_create(db_path): assert not db_path.exists() db = await hat.syslog.server.database.create_database(db_path, False) assert db_path.exists() await db.async_close() assert db_path.exists() async def test_add_msgs(db_path, timestamp, create_msg): db = await hat.syslog.server.database.create_database(db_path, False) first_id = await db.get_first_id() last_id = await db.get_last_id() assert first_id is None assert last_id is None msgs = [] entries = await db.add_msgs(msgs) assert entries == [] msgs = [create_msg() for i in range(10)] entries = await db.add_msgs([(timestamp, msg) for msg in msgs]) assert len(entries) == len(msgs) assert [entry.msg for entry in entries] == msgs first_id = await db.get_first_id() last_id = await db.get_last_id() assert first_id == entries[0].id assert last_id == entries[-1].id await db.async_close() db = await hat.syslog.server.database.create_database(db_path, False) first_id = await db.get_first_id() last_id = await db.get_last_id() assert first_id == entries[0].id assert last_id == entries[-1].id await db.async_close() async def test_delete(db_path, timestamp, create_msg): db = await hat.syslog.server.database.create_database(db_path, False) msgs = [create_msg() for i in range(10)] entries = await db.add_msgs([(timestamp, msg) for msg in msgs]) await db.delete(entries[0].id) first_id = await db.get_first_id() last_id = await db.get_last_id() assert first_id == entries[0].id assert last_id == entries[-1].id await db.delete(entries[-1].id) first_id = await db.get_first_id() last_id = await db.get_last_id() assert first_id == entries[-1].id assert last_id == entries[-1].id msgs = [create_msg() for i in range(10)] new_entries = await db.add_msgs([(timestamp, msg) for msg in msgs]) entries = [entries[-1], *new_entries] first_id = await db.get_first_id() last_id = await db.get_last_id() assert first_id == entries[0].id assert last_id == entries[-1].id await db.delete(entries[-1].id + 1) first_id = await db.get_first_id() last_id = await db.get_last_id() assert first_id is None assert last_id is None msgs = [create_msg() for i in range(10)] entries = await db.add_msgs([(timestamp, msg) for msg in msgs]) first_id = await db.get_first_id() last_id = await db.get_last_id() assert first_id == entries[0].id assert last_id == entries[-1].id await db.async_close()
2.09375
2
web_scraping/beautifulsoup/bs4_sample2.py
manual123/Nacho-Jupyter-Notebooks
2
12799666
<filename>web_scraping/beautifulsoup/bs4_sample2.py from pprint import pprint import re from bs4 import BeautifulSoup html_content = open('bs_sample.html') # http://dl.dropbox.com/u/49962071/blog/python/resource/bs_sample.html soup = BeautifulSoup(html_content) # making soap for tag in soup.find_all(re.compile("^p")): # find all tag start with p print tag.name for tag in soup.find_all(re.compile("t")): # find all tag contains t print tag.name for tag in soup.find_all(True): # find all tag print tag.name pprint(soup.find_all('a')) # find all a tag print 20*"++" pprint(soup.find_all(["a", "b"])) # find multiple tag def has_class_but_no_id(tag): return tag.has_key('class') and not tag.has_key('id') pprint(soup.find_all(has_class_but_no_id)) # pass a function to find_all pprint(soup.find_all(text=re.compile("sisters"))) # find all tag content contains key 'sisters' print 20*"++" pprint(soup.find_all(href=re.compile("my_url"))) # all links contains key "my_url" pprint(soup.find_all(id=True)) # all links has id pprint(soup.find_all(class_=True)) # all links has class def has_six_characters(css_class): return css_class is not None and len(css_class) == 7 pprint(soup.find_all(class_=has_six_characters)) # find all class name contains 7 characters pprint(soup.find_all("a", "sister")) # find all a tag have class named 'sister' pprint(soup.find_all("a", re.compile("sister"))) # find all a tag have class named contains 'sister' print 20*"++" pprint(soup.find_all(href=re.compile("elsie"), id='link1')) # url name contains elsie and have id = link1 pprint(soup.find_all(attrs={'href' : re.compile("elsie"), 'id': 'link1'})) # url name contains elsie and have id = link1 pprint(soup.find_all("a", limit=2)) # use limit on findall pprint(soup.html.find_all("title", recursive=True)) # use recursive on findall
3.375
3
python/busmap/fetch.py
ehabkost/busmap
4
12799667
<reponame>ehabkost/busmap<filename>python/busmap/fetch.py<gh_stars>1-10 # -*- coding: utf-8 -* import horarios, linhas, env, dias def get_linha_hor(idhor, nome): c = env.db.cursor() # look for id horario r = c.select_onerow('linhas', ['id'], 'idhor=%s', [idhor]) if r: c.close() return r[0] # not found. look for a similar name, but with no idhor set r = c.select_onerow('linhas', ['id'], 'idhor is null and nome=%s', [nome]) if r: id = r[0] # found. set idhor c.execute('update linhas set idhor=%s where id=%s', [idhor, id]) c.close() return id # not found. insert a new record c.insert_one('linhas', idhor=idhor, nome=nome) id = c.lastrowid c.close() return id def get_ponto_hor(nome): c = env.db.cursor() r = c.select_onerow('pontos', ['id'], 'nome=%s', nome) if r: c.close() return r[0] # not found c.insert_one('pontos', nome=nome) id = c.lastrowid c.close() return id def fetch_horarios(idhor, nome): c = env.db.cursor() idlinha = get_linha_hor(idhor, nome) #TODO: check if this really works c.execute('start transaction') try: c.execute('delete from hs, h \ using horsets hs, horarios h \ where hs.idlinha=%s and h.idset=hs.id', [idlinha]) html = horarios.get_horarios_html(idhor) for pto,dia,apartir,horas in horarios.parse_hor_html(html): print 'ponto: %s, dias: %s' % (pto, dia) idponto = get_ponto_hor(pto) d = dias.id_dias(dia) c.insert_one('horsets', idlinha=idlinha, idponto=idponto, dia=d, apartir=apartir) idset = c.lastrowid for sp,h in horas: c.insert_one('horarios', idset=idset, hora=h, special=sp) except: c.execute('rollback') else: c.execute('commit') c.close() def fetch_hor_all(): for cod,nome in horarios.lista_linhas(): print 'Fetching %s:%s' % (cod, nome) fetch_horarios(cod, nome) if __name__ == '__main__': #fetch_horarios('022', u'INTER 2 (Horário)') fetch_hor_all()
2.546875
3
facenet.py
ndoo/libfreenect2-facial-recognition
0
12799668
<reponame>ndoo/libfreenect2-facial-recognition<filename>facenet.py<gh_stars>0 import argparse import cv2 import numpy as np import sys from pylibfreenect2 import Freenect2, SyncMultiFrameListener from pylibfreenect2 import FrameType, Registration, Frame ap = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter) ap.add_argument("-s", "--depth-smooth", type=int, default=20, help="Number of samples for moving average of nearest point") ap.add_argument("-r", "--depth-range", type=int, default=80, help="Range to clip from nearest object, in millimeters") ap.add_argument("-m", "--ir-min", type=int, default=1024, help="IR minimum value clip, out of a maximum value of 65535") ap.add_argument("-M", "--ir-max", type=int, default=32768, help="IR maximum value clip, out of a maximum value of 65535") args = vars(ap.parse_args()) cascPath = "haarcascade_frontalface_default.xml" faceCascade = cv2.CascadeClassifier(cascPath) anterior = 0 try: from pylibfreenect2 import CudaPacketPipeline pipeline = CudaPacketPipeline() except: try: from pylibfreenect2 import OpenGLPacketPipeline pipeline = OpenGLPacketPipeline() except: try: from pylibfreenect2 import OpenCLPacketPipeline pipeline = OpenCLPacketPipeline() except: from pylibfreenect2 import CpuPacketPipeline pipeline = CpuPacketPipeline() print("Packet pipeline:", type(pipeline).__name__) fn = Freenect2() num_devices = fn.enumerateDevices() if num_devices == 0: print("No device connected!") sys.exit(1) serial = fn.getDeviceSerialNumber(0) device = fn.openDevice(serial, pipeline=pipeline) types = FrameType.Color | FrameType.Ir | FrameType.Depth listener = SyncMultiFrameListener(types) # Register listeners device.setColorFrameListener(listener) device.setIrAndDepthFrameListener(listener) # Start streams device.startStreams(rgb=True,depth=True) # Initialize buffer for moving average of nearest value nearest_buffer = np.empty(args["depth_smooth"]) nearest_buffer[:] = np.NaN # Iterate acquiring frames while True: frames = listener.waitForNewFrame() depth = frames["depth"].asarray(np.float32) color = frames["color"].asarray() # Flip invalid depth value (0) to maximum value, to clean up blown-out patches at infinity depth[depth == 0] = np.amax(depth) # Apply clip on infrared image ir = np.uint8( (np.clip(frames["ir"].asarray(), args["ir_min"], args["ir_max"]) - args["ir_min"] - 1) / ((args["ir_max"] - args["ir_min"]) / 256) ) #ir = np.uint8(frames["ir"].asarray() / 256) faces = faceCascade.detectMultiScale(ir, 1.3, 5) for (x,y,w,h) in faces: cv2.imshow("Face IR", cv2.resize(cv2.equalizeHist(ir[y:y+h, x:x+w]), (800, 800))) face_depth = depth[y:y+h, x:x+w] # Clip noise around nearest value by taking 10th lowest value nearest = np.partition(face_depth, 10, None)[9] # Determine nearest value by updating buffer and taking the average # Needed to combat flickering due to depth noise #nearest_buffer[:-1] = nearest_buffer[1:] #nearest_buffer[-1] = nearest #nearest = np.average(nearest_buffer) # Apply clip from nearest on depth image face_depth = np.clip(face_depth, nearest, nearest + args["depth_range"]) face_depth -= nearest face_depth /= args["depth_range"] cv2.imshow("Face Depth", cv2.resize(face_depth, (800, 800))) listener.release(frames) key = cv2.waitKey(delay=1) if key == ord('q'): break device.stop() device.close() sys.exit(0)
2.171875
2
src/unicon/plugins/nxos/mds/statemachine.py
nielsvanhooy/unicon.plugins
18
12799669
<gh_stars>10-100 __author__ = "<NAME> <<EMAIL>>" from unicon.plugins.nxos.statemachine import NxosSingleRpStateMachine from unicon.plugins.nxos.mds.patterns import NxosMdsPatterns from unicon.statemachine import State, Path patterns = NxosMdsPatterns() class NxosMdsSingleRpStateMachine(NxosSingleRpStateMachine): def create(self): super().create() self.remove_path('enable', 'shell') self.remove_path('shell', 'enable') self.remove_state('shell') shell = State('shell', patterns.shell_prompt) tie = State('tie', patterns.tie_prompt) enable = self.get_state('enable') self.add_state(shell) self.add_state(tie) enable_to_shell = Path(enable, shell, 'bash', None) shell_to_enable = Path(shell, enable, 'exit', None) enable_to_tie = Path(enable, tie, 'san-ext-tuner', None) tie_to_enable = Path(tie, enable, 'end', None) # Add State and Path to State Machine self.add_path(enable_to_shell) self.add_path(shell_to_enable) self.add_path(enable_to_tie) self.add_path(tie_to_enable) class NxosMdsDualRpStateMachine(NxosMdsSingleRpStateMachine): def create(self): super().create()
1.96875
2
cardice/__init__.py
ogrisel/cardice
2
12799670
"""Compute Cloud setup with SaltStack and Apache Libcloud""" __version__ = '0.1.0-git' if __name__ == '__main__': from cardice.commandline import main main()
1.070313
1
boo/okved/__init__.py
vishalbelsare/boo
14
12799671
<filename>boo/okved/__init__.py from .okved import all_codes_v2, name_v2
1.03125
1
genestack_client/genestack_exceptions.py
genestack/python-client
2
12799672
# -*- coding: utf-8 -*- from __future__ import absolute_import from __future__ import division from __future__ import print_function from __future__ import unicode_literals from future import standard_library standard_library.install_aliases() from builtins import * from urllib.error import URLError MASTER_BRANCH = 'https://github.com/genestack/python-client/archive/master.zip' PYPI_PACKAGE = 'genestack-client' class GenestackBaseException(Exception): """ Base class for Genestack exceptions. Use it to catch all exceptions raised explicitly by Genestack Python Client. """ pass class GenestackException(GenestackBaseException): """ Client-side exception class. Raise its instances (instead of :py:class:`~exceptions.Exception`) if anything is wrong on client side. """ pass class GenestackServerException(GenestackException): """ Server-side exception class. Raised when Genestack server returns an error response (error message generated by Genestack Java code, not an HTTP error). """ def __init__(self, message, path, post_data, debug=False, stack_trace=None): """ :param message: exception message :type message: str :param path: path after server URL of connection. :type path: str :param post_data: POST data (file or dict) :type debug: bool :param debug: flag if stack trace should be printed :param stack_trace: server stack trace :type stack_trace: str """ message = (message.decode('utf-8', 'ignore') if isinstance(message, bytes) else message) GenestackException.__init__(self, message, path, post_data, debug, stack_trace) self.message = message self.debug = debug self.stack_trace = stack_trace self.path = path self.post_data = post_data def __str__(self): if isinstance(self.post_data, dict): message = 'Got error "%s" at call of method "%s" of "%s"' % ( self.message, self.post_data.get('method', '<unknown>'), self.path ) else: # upload file message = 'Got error "%s" at call of "%s"' % ( self.message, self.path ) if self.stack_trace: if self.debug: message += '\nStacktrace from server is:\n%s' % self.stack_trace else: message += '\nEnable debug option to retrieve traceback' return message class GenestackResponseError(GenestackBaseException, URLError): """ Wrapper for HTTP response errors. Extends :py:class:`urllib2.URLError` for backward compatibility. """ def __init__(self, reason): self.args = reason, self.reason = reason def __str__(self): return '<urlopen error %s>' % self.reason class GenestackConnectionFailure(GenestackBaseException, URLError): """ Wrapper for server connection failures. Extends :py:class:`urllib2.URLError` for backward compatibility. """ def __init__(self, message): self.message = "<connection failed %s>" % message def __str__(self): return self.message class GenestackAuthenticationException(GenestackException): """ Exception thrown on an authentication error response from server. """ pass class GenestackVersionException(GenestackException): """ Exception thrown if server requires a newer version on Python Client. """ def __init__(self, current_version, required_version=None): """ :param current_version: current version :type current_version: distutils.version.StrictVersion :param required_version: minimum required version :type required_version: distutils.version.StrictVersion """ if required_version: package = MASTER_BRANCH if required_version.prerelease else PYPI_PACKAGE message = ( 'Your Genestack Client version "{current_version}" is too old, ' 'at least "{required_version}" is required.\n' ).format(current_version=current_version, required_version=required_version) else: package = PYPI_PACKAGE message = 'Cannot get required version from server.\n' message += ( 'You can update client with the following command:\n' ' pip install {package} --upgrade' ).format(package=package) super(GenestackVersionException, self).__init__(message)
2.359375
2
roll_dice.py
sarveshbhatnagar/CompetetiveProgramming
0
12799673
<gh_stars>0 from collections import defaultdict class Solution: def rollDice(self, arr): map_val = { 6: 1, 1: 6, 2: 4, 4: 2, 3: 5, 5: 3 } count_elem = defaultdict(int) showFace = None max_count = 0 for elem in arr: count_elem[elem] += 1 if count_elem[elem] > max_count: max_count = count_elem[elem] showFace = elem return len(arr) - count_elem[showFace] + count_elem[map_val[showFace]] # def number_of_rotations(dice: List[int]) -> int: # return min( # sum(0 if d == v else 1 if d + v != 7 else 2 for d in dice) # for v in range(1, 7) # ) # N = [6, 6, 1] N = [6, 1, 5, 4] print(Solution().rollDice(N))
2.984375
3
python/conversations.py
grcanosa/renfe-checker
7
12799674
""" """ from enum import Enum import logging from telegram import (ReplyKeyboardMarkup, ReplyKeyboardRemove) from telegram.ext import ConversationHandler from telegramcalendarkeyboard import telegramcalendar from telegramcalendarkeyboard import telegramoptions from texts import texts as TEXTS from texts import keyboards as KEYBOARDS logging.basicConfig(format='%(asctime)s - %(name)s - %(levelname)s - %(message)s', level=logging.DEBUG) logger = logging.getLogger(__name__) class BotOptions(Enum): ADD_QUERY = 1 DEL_QUERY = 2 DO_QUERY = 3 class ConvStates(Enum): OPTION = 1 STATION = 2 DATE = 3 NUMERIC_OPTION = 4 class RenfeBotConversations: class Conversation: def __init__(self, userid): self._userid = userid self.reset() def reset(self): self._option = 0 self._origin = None self._dest = None self._date = None self._data = None def __init__(self, renfebot): self._conversations = {} self._RB = renfebot def _start_conv_for_user(self, userid): if userid not in self._conversations: self._conversations[userid] = self.Conversation(userid) self._conversations[userid].reset() def handler_start(self, bot, update): ret_code = 0 userid = update.message.from_user.id username = update.message.from_user.first_name if update.message.from_user.last_name is not None: username += " " + update.message.from_user.last_name auth = self._RB._DB.get_user_auth(userid, username) if auth == 0: # Not authorized logger.debug("NOT AUTHORIZED USER") update.message.reply_text(TEXTS["NOT_AUTH_REPLY"].format(username=username), reply_markup=ReplyKeyboardRemove()) self._RB.ask_admin_for_access(bot, userid, username) ret_code = ConversationHandler.END else: # Authorized logger.debug("AUTHORIZED USER") self._start_conv_for_user(userid) update.message.reply_text(TEXTS["OPTION_SELECTION"], reply_markup=ReplyKeyboardMarkup( KEYBOARDS["MAIN_OPTIONS"]), one_time_keyboard=True) ret_code = ConvStates.OPTION return ret_code def handler_cancel(self, bot, update): return ConversationHandler.END def handler_option(self, bot, update): userid = update.message.from_user.id ret_code = 0 if update.message.text == TEXTS["MAIN_OP_DO_QUERY"]: ret_code = self._h_op_do_query(userid, bot, update) elif update.message.text == TEXTS["MAIN_OP_ADD_QUERY"]: ret_code = self._h_op_add_query(userid, bot, update) elif update.message.text == TEXTS["MAIN_OP_DEL_QUERY"]: ret_code = self._h_op_del_query(userid, bot, update) elif update.message.text == TEXTS["MAIN_OP_CHECK_QUERY"]: ret_code = self._h_op_check_queries(userid, bot, update) else: update.message.reply_text(TEXTS["MAIN_OP_UNKNOWN"]) ret_code = ConversationHandler.END return ret_code def _h_op_do_query(self, userid, bot, update): self._conversations[userid]._option = BotOptions.DO_QUERY update.message.reply_text(TEXTS["DO_ONETIME_QUERY"]) update.message.reply_text(TEXTS["SELECT_ORIGIN_STATION"], reply_markup=ReplyKeyboardMarkup(KEYBOARDS["STATIONS"], one_time_keyboard=True)) return ConvStates.STATION def _h_op_add_query(self, userid, bot, update): self._conversations[userid]._option = BotOptions.ADD_QUERY update.message.reply_text(TEXTS["ADD_PERIODIC_QUERY"]) update.message.reply_text(TEXTS["SELECT_ORIGIN_STATION"], reply_markup=ReplyKeyboardMarkup(KEYBOARDS["STATIONS"], one_time_keyboard=True)) return ConvStates.STATION def _h_op_del_query(self, userid, bot, update): self._conversations[userid]._option = BotOptions.DEL_QUERY user_queries = self._RB._DB.get_user_queries(userid) ret_code = 0 if len(user_queries) == 0: update.message.reply_text(TEXTS["NO_QUERIES_FOR_USERID"]) ret_code = ConversationHandler.END else: options = [] for q in user_queries: options.append(TEXTS["QUERY_IN_DB"]. format(origin=q["origin"], destination=q["destination"], date=self._RB._DB.timestamp_to_date(q["date"]))) bot.send_message(chat_id=userid, text=TEXTS["SELECT_TRIP_TO_DETELE"], reply_markup=telegramoptions.create_options_keyboard(options,TEXTS["CANCEL"])) self._conversations[userid]._data = user_queries ret_code = ConvStates.NUMERIC_OPTION return ret_code def _h_op_check_queries(self, userid, bot, update): user_queries = self._RB._DB.get_user_queries(userid) if len(user_queries) == 0: update.message.reply_text(TEXTS["NO_QUERIES_FOR_USERID"]) else: update.message.reply_text(TEXTS["QUERIES_FOR_USERID"]) for q in user_queries: update.message.reply_text(TEXTS["QUERY_IN_DB"]. format(origin=q["origin"], destination=q["destination"], date=self._RB._DB.timestamp_to_date(q["date"]))) update.message.reply_text(TEXTS["END_MESSAGE"],reply_markup=ReplyKeyboardRemove()) return ConversationHandler.END def handler_numeric_option(self, bot, update): logger.debug("Processing numeric opion") userid = update.callback_query.from_user.id user_queries = self._conversations[userid]._data selected, query_index = telegramoptions.process_option_selection(bot, update) if not selected: logger.debug("Nothing selected") bot.send_message(chat_id= userid, text=TEXTS["DB_QUERY_NOT_REMOVED"],reply_markup=ReplyKeyboardRemove()) return ConversationHandler.END else: logger.debug("Deleting query with index "+str(query_index)) if len(user_queries) > query_index: query = user_queries[query_index] if self._RB._DB.remove_periodic_query(query["userid"], query["origin"], query["destination"], query["date"]): bot.send_message(chat_id=userid,text=TEXTS["DB_QUERY_REMOVED"],reply_markup=ReplyKeyboardRemove()) else: bot.send_message(chat_id=userid,text=TEXTS["DB_QUERY_NOT_PRESENT"],reply_markup=ReplyKeyboardRemove()) return ConversationHandler.END def handler_date(self, bot, update): logger.debug("Processing date") selected, date = telegramcalendar.process_calendar_selection(bot, update) if not selected: logger.debug("Not selected") return ConvStates.DATE else: logger.debug("selected") userid = update.callback_query.from_user.id conv = self._conversations[userid] conv._date = date.strftime("%d/%m/%Y") logger.debug("Date is " + conv._date) bot.send_message(chat_id=userid, text=TEXTS["SELECTED_DATA"]. format(origin=conv._origin, destination=conv._dest, date=conv._date)) if conv._option == BotOptions.ADD_QUERY: res = self._RB._DB.add_periodic_query( userid, conv._origin, conv._dest, conv._date) bot.send_message(chat_id=userid,text=res[1]) elif conv._option == BotOptions.DO_QUERY: bot.send_message(chat_id=userid,text=TEXTS["WAIT_FOR_TRAINS"]) res = self._RB._RF.check_trip(conv._origin, conv._dest, conv._date) self._RB.send_query_results_to_user(bot, userid, res, conv._origin, conv._dest, conv._date) else: logger.error("Problem, no other option should lead HERE!") return ConversationHandler.END def handler_station(self, bot, update): logger.debug("Setting Station") userid = update.message.from_user.id if self._conversations[userid]._origin is None: logger.debug("Origin Station") self._conversations[userid]._origin = update.message.text.upper() update.message.reply_text(TEXTS["SELECT_DESTINATION_STATION"], reply_markup=ReplyKeyboardMarkup(KEYBOARDS["STATIONS"], one_time_keyboard=True)) return ConvStates.STATION else: logger.debug("Destination Station") self._conversations[userid]._dest = update.message.text.upper() bot.send_message(chat_id=userid, text=TEXTS["SELECTED_TRIP"].format( origin=self._conversations[userid]._origin, destination=self._conversations[userid]._dest ), reply_markup=ReplyKeyboardRemove()) bot.send_message(chat_id=userid, text=TEXTS["SELECT_TRIP_DATE"], reply_markup=telegramcalendar.create_calendar()) return ConvStates.DATE
2.390625
2
FullStackTests.py
stefanos-86/Remember
3
12799675
<reponame>stefanos-86/Remember<gh_stars>1-10 # Test rig to do tests on the completed machinery. # Runs GDB on one (or all) test core files and compare the test result with what # is stored in the <test case name>_result.txt file(s). # This too relies on a convention on the file names linking test cases, expected results, executables and core files. import argparse import difflib import os import re import subprocess import TestSuite as ts LOGS_FOR_TESTS = "FullStackTest.log" def parse_command_line(): parser = argparse.ArgumentParser(description='Utility to quickly run the full stack test cases. ' 'Run CreateTestCores.py first.' 'Stick to the file naming conventions.' 'Test cases are listed in the CMakeList.txt in' ' ' + ts.TEST_PROGRAMS_FOLDER, epilog="(C) 2017 - Stefano", formatter_class=argparse.ArgumentDefaultsHelpFormatter) parser.add_argument("-t", "--test_case", help="Runs only the specified test case. Runs everything if not provided (None).", required=False) return parser.parse_args() def find_core_file(test_name): """Walks the test directory to detect a core file with the same name as the test case.""" for subdir, dirs, files in os.walk(ts.CORES_FOLDER): for file in files: if file.startswith(test_name): return os.path.join(subdir, file) return None def assert_result(test_name): """Compares the expected and actual results. Prints the diff in case of error, nothing otherwise.""" expected_result_file = os.path.join(ts.TEST_PROGRAMS_FOLDER, test_name + "_result.dot") with open(expected_result_file) as f: expected = f.readlines() with open(os.path.abspath("RememberOutput.dot")) as f: actual = f.readlines() expected = generic_addresses(expected) actual = generic_addresses(actual) changes = difflib.unified_diff(expected, actual, fromfile='expected_result_file', tofile='RESULT_FROM_TEST', lineterm='') result_lines = [] for line in changes: result_lines.append(line) if len(result_lines) > 0: print " #################### FAIL ####################" for line in result_lines: print line, print " ##############################################" def generic_addresses(dot_content): """The stack changes every time the core files are recreated. Should I commit the core files and the binaries in the repo? Or should I "wildcard" the addresses? Or abandon this way of testing? ...I wish I knew. For now, I replace the addresses with "generics".""" address_translation = {} address = re.compile("(0x[0-9a-f]+)") address_counter = 0 filtered_result = [] for line in dot_content: parts = address.split(line) filtered_line = "" for part in parts: if re.match(address, part): replacement = address_translation.get(part) if replacement is None: replacement = "0xAddress_" + str(address_counter) address_translation[part] = replacement filtered_line += replacement address_counter += 1 else: filtered_line += part filtered_result.append(filtered_line) return filtered_result def run_test(test_name): """Calls gdb with our driver and compares the result with the expected output.""" core_file = find_core_file(test_name) if core_file is None: raise Exception("No core file for " + test_name) exeutable_file = os.path.join(ts.TEST_PROGRAMS_FOLDER, test_name) subprocess.call(["python", "remember.py", core_file, exeutable_file, "-l", LOGS_FOR_TESTS, "-o", "GraphFromLastTest.svg"]) assert_result(test_name) if __name__ == "__main__": args = parse_command_line() if args.test_case is None: test_list = ts.find_test_cases() else: test_list = [args.test_case] print "Tests to run " + str(test_list) for test_case in test_list: run_test(test_case)
2.71875
3
models/room.py
Kaezon/BoredRPG
0
12799676
""" Module containing the Room class """ from collections import namedtuple from entity import Entity """ Exit is a namedtuple which describes a unidirectional connection between rooms. Properties: direction: Short name string destination: Room refrence """ Exit = namedtuple("Exit", ['direction', 'destination']) class Room(Entity): """ Class representing locations in the game world. Properties: exits: List of Exit namedtuples """ def __init__(self, name, short_description, full_description=None, exits=[] meta={}): super(name, location, short_description, full_description, meta) self._exits = exits @property def exits(self): """ Return the list of exits. """ return self._exits @property def short_description(self): """ Return the short description of this room. Exits are appended to the end. """ description = self._short_description + "\n\nThe exits are " for exit in exits: description += "{}, ".formatexit.direction) return description @property def full_description(self): """ Return the short description of this room. Exits are appended to the end. """ description = self._full_description + "\n\nThe exits are " for exit in exits: description += "{}, ".format(exit.direction) return description
3.8125
4
main.py
jsleb333/quadboost
1
12799677
import torch import logging from quadboost import QuadBoostMHCR from quadboost.label_encoder import LabelEncoder, OneHotEncoder, AllPairsEncoder from quadboost.weak_learner import * from quadboost.callbacks import * from quadboost.datasets import MNISTDataset from quadboost.utils import parse, timed from quadboost.data_preprocessing.data_augmentation import extend_mnist from quadboost.weak_learner.random_convolution import plot_images @timed @parse def main(m=60_000, val=10_000, da=0, dataset='mnist', center=True, reduce=True, encodings='onehot', wl='rccridge', max_round=1000, patience=1000, resume=0, n_jobs=1, max_n_leaves=4, n_filters=10, fs=11, fsh=0, locality=4, init_filters='from_bank', bank_ratio=.05, fn='c', seed=42, nl='maxpool', maxpool=3, device='cpu', degrees=.0, scale=.0, shear=.0, margin=2, nt=1): if seed: torch.manual_seed(seed) np.random.seed(seed) ### Data loading mnist = MNISTDataset.load(dataset+'.pkl') (Xtr, Ytr), (X_val, Y_val), (Xts, Yts) = mnist.get_train_valid_test(valid=val, center=False, reduce=False, shuffle=seed) Xtr, Ytr = Xtr[:m], Ytr[:m] if da: logging.info(f'Adding {da} examples with data augmentation.') Xtr, Ytr = extend_mnist(Xtr, Ytr, N=da, degrees=degrees, scale=(1-scale, 1/(1-scale)), shear=shear) mnist.fit_scaler(Xtr, center=center, reduce=reduce) Xtr, Ytr = mnist.transform_data(Xtr.reshape(Xtr.shape[0],-1), Ytr) X_val, Y_val = mnist.transform_data(X_val.reshape(X_val.shape[0],-1), Y_val) Xts, Yts = mnist.transform_data(Xts.reshape(Xts.shape[0],-1), Yts) logging.info(f'Loaded dataset: {dataset} (center: {center}, reduce: {reduce})') logging.info(f'Number of examples - train: {len(Xtr)}, valid: {len(X_val)}, test: {len(Xts)}') ### Choice of encoder if encodings == 'onehot': encoder = OneHotEncoder(Ytr) elif encodings == 'allpairs': encoder = AllPairsEncoder(Ytr) else: encoder = LabelEncoder.load_encodings(encodings) if all(label.isdigit() for label in encoder.labels_encoding): encoder = LabelEncoder({int(label):encoding for label, encoding in encoder.labels_encoding.items()}) logging.info(f'Encoding: {encodings}') filename = f'd={dataset}-e={encodings}-wl={wl}' ### Choice of weak learner kwargs = {} if wl in ['ds', 'decision-stump']: weak_learner = MulticlassDecisionStump() kwargs = dict(zip(('sorted_X', 'sorted_X_idx'), weak_learner.sort_data(Xtr))) kwargs['n_jobs'] = n_jobs elif wl in ['dt', 'decision-tree']: weak_learner = MulticlassDecisionTree(max_n_leaves=max_n_leaves) kwargs = dict(zip(('sorted_X', 'sorted_X_idx'), weak_learner.sort_data(Xtr))) kwargs['n_jobs'] = n_jobs filename += f'{max_n_leaves}' elif wl == 'ridge': weak_learner = WLThresholdedRidge(threshold=.5) elif wl.startswith('rcc') or wl.startswith('rlc'): if device.startswith('cuda'): Xtr = RandomConvolution.format_data(Xtr).to(device=device) X_val = RandomConvolution.format_data(X_val).to(device=device) Xts = RandomConvolution.format_data(Xts).to(device=device) filename += f'-nf={n_filters}-fs={fs}' if fsh: filename += f'_to_{fsh}' if wl.startswith('rlc'): filename += f'-loc={locality}' activation = None if 'maxpool' in nl: filename += f'-maxpool{maxpool}' if 'relu' in nl: filename += f'-relu' activation = torch.nn.functional.relu elif 'sigmoid' in nl: filename += f'-sigmoid' activation = torch.sigmoid filename += f'-{init_filters}' if degrees: filename += f'-deg={degrees}' if scale: filename += f'-scale={scale}' scale = (1-scale, 1/(1-scale)) else: scale = None if shear: filename += f'-shear={shear}' else: shear = None filter_bank = None if init_filters == 'from_bank': if 0 < bank_ratio < 1: bank_size = int(m*bank_ratio) filter_bank = Xtr[:bank_size] Xtr, Ytr = Xtr[bank_size:], Ytr[bank_size:] logging.info(f'Bank size: {bank_size}') else: raise ValueError(f'Invalid bank_size {bank_size}.') filename += f'_br={bank_ratio}' elif init_filters == 'from_data': filter_bank = Xtr if fn: filename += f'_{fn}' f_proc = [] if 'c' in fn: f_proc.append(center_weight) if 'n' in fn: f_proc.append(normalize_weight) if 'r' in fn: f_proc.append(reduce_weight) w_gen = WeightFromBankGenerator(filter_bank=filter_bank, filters_shape=(fs, fs), filters_shape_high=(fsh, fsh) if fsh else None, filter_processing=f_proc, margin=margin, degrees=degrees, scale=scale, shear=shear, ) if wl.startswith('rcc'): filters = Filters(n_filters=n_filters, weights_generator=w_gen, activation=activation, maxpool_shape=(nt, maxpool, maxpool)) elif wl.startswith('rlc'): filters = LocalFilters(n_filters=n_filters, weights_generator=w_gen, locality=locality, maxpool_shape=(nt, maxpool, maxpool)) if nt > 1: filename += f'-nt={nt}' if wl.endswith('ridge'): weak_learner = RandomConvolution(filters=filters, weak_learner=Ridge) if wl.endswith('ds'): weak_learner = RandomConvolution(filters=filters, weak_learner=MulticlassDecisionStump) kwargs['n_jobs'] = n_jobs else: raise ValueError(f'Invalid weak learner name: "{wl}".') logging.info(f'Weak learner: {type(weak_learner).__name__}') ### Callbacks ckpt = ModelCheckpoint(filename=filename+'-{round}.ckpt', dirname='./results', save_last=True) logger = CSVLogger(filename=filename+'-log.csv', dirname='./results/log') zero_risk = BreakOnZeroRiskCallback() callbacks = [ckpt, logger, zero_risk, ] logging.info(f'Filename: {filename}') ### Fitting the model if not resume: logging.info(f'Beginning fit with max_round_number={max_round} and patience={patience}.') qb = QuadBoostMHCR(weak_learner, encoder=encoder) qb.fit(Xtr, Ytr, max_round_number=max_round, patience=patience, X_val=X_val, Y_val=Y_val, callbacks=callbacks, **kwargs) ### Or resume fitting a model else: logging.info(f'Resuming fit with max_round_number={max_round}.') qb = QuadBoostMHCR.load(f'results/{filename}-{resume}.ckpt') qb.resume_fit(Xtr, Ytr, X_val=X_val, Y_val=Y_val, max_round_number=max_round, **kwargs) print(f'Best round recap:\nBoosting round {qb.best_round.step_number+1:03d} | Train acc: {qb.best_round.train_acc:.3%} | Valid acc: {qb.best_round.valid_acc:.3%} | Risk: {qb.best_round.risk:.3f}') print(f'Test accuracy on best model: {qb.evaluate(Xts, Yts):.3%}') print(f'Test accuracy on last model: {qb.evaluate(Xts, Yts, mode="last"):.3%}') if __name__ == '__main__': logging.basicConfig(level=logging.INFO, style='{', format='[{levelname}] {message}') main()
2.0625
2
test/run/t418.py
timmartin/skulpt
2,671
12799678
# lists print "\nlists" print min([1,2,3,4]) print min([2,1],[1,2],[1,1],[1,1,0]) # tuples print "\ntuples" print min((1,2,3,4)) print min((2,1),(1,2),(1,1),(1,1,0)) # dictionaries print "\ndictionaries" print min({1:2,3:4,5:6}) print min({1:6,3:4,5:2})
3.359375
3
devel/.private/hector_uav_msgs/lib/python2.7/dist-packages/hector_uav_msgs/srv/__init__.py
arijitnoobstar/UAVProjectileCatcher
10
12799679
from ._EnableMotors import *
1.21875
1
ServiceRelationExtraction/relationExtractService.py
black938/RelationExtractionProject
0
12799680
from concurrent import futures import grpc import relationExtractService_pb2 import relationExtractService_pb2_grpc import tools class relationExtractService(relationExtractService_pb2_grpc.relationExtractServiceServicer): def ExtractTriple(self,request,context): sentence = request.sentence triples = tools.extract_items(sentence) response = relationExtractService_pb2.relationExtractResponse() for triple in triples: data = response.triples.add() data.sub=triple[0] data.pred=triple[1] data.obj=triple[2] return response def serve(): server = grpc.server(futures.ThreadPoolExecutor(max_workers=8)) relationExtractService_pb2_grpc.add_relationExtractServiceServicer_to_server(relationExtractService(),server) server.add_insecure_port("[::]:4232") server.start() server.wait_for_termination() if __name__ == '__main__': serve()
2.453125
2
src/delete_network.py
chrisdxie/rice
16
12799681
import itertools from collections import OrderedDict import numpy as np import cv2 import torch import torch.nn as nn from torch.nn import Sequential as Seq, Linear, ReLU import torch.nn.functional as F from torch_geometric.data import Data, Batch from . import base_networks from . import graph_construction as gc from . import constants from .util import utilities as util_ class DeleteNet(nn.Module): def __init__(self, config): super(DeleteNet, self).__init__() self.node_encoder = base_networks.NodeEncoder(config['node_encoder_config']) self.bg_fusion_module = base_networks.LinearEncoder(config['bg_fusion_module_config']) def forward(self, graph): """DeleteNet forward pass. Note: Assume that the graph contains the background node as the first node. Args: graph: a torch_geometric.Data instance with attributes: - rgb: a [N, 256, h, w] torch.FloatTensor of ResnNet50+FPN rgb image features - depth: a [N, 3, h, w] torch.FloatTensor. XYZ image - mask: a [N, h, w] torch.FloatTensor of values in {0, 1} - orig_masks: a [N, H, W] torch.FloatTensor of values in {0, 1}. Original image size. - crop_indices: a [N, 4] torch.LongTensor. xmin, ymin, xmax, ymax. Returns: a [N] torch.FloatTensor of delete score logits. The first logit (background) is always low, so BG is never deleted. """ encodings = self.node_encoder(graph) # dictionary concat_features = torch.cat([encodings[key] for key in encodings], dim=1) # [N, \sum_i d_i] bg_feature = concat_features[0:1] # [1, \sum_i d_i] node_features = concat_features[1:] # [N-1, \sum_i d_i] node_minus_bg_features = node_features - bg_feature # [N-1, \sum_i d_i] node_delete_logits = self.bg_fusion_module(node_minus_bg_features) # [N-1, 1] delete_logits = torch.cat([torch.ones((1, 1), device=constants.DEVICE) * -100, node_delete_logits], dim=0) return delete_logits[:,0] class DeleteNetWrapper(base_networks.NetworkWrapper): def setup(self): if 'deletenet_model' in self.config: self.model = self.config['deletenet_model'] else: self.model = DeleteNet(self.config) self.model.to(self.device) def get_new_potential_masks(self, masks, fg_mask): """Compute new potential masks. See if any connected components of fg_mask _setminus_ mask can be considered as a new mask. Concatenate them to masks. Args: masks: a [N, H, W] torch.Tensor with values in {0, 1}. fg_mask: a [H, W] torch.Tensor with values in {0, 1}. Returns: a [N + delta, H, W] np.ndarray of new masks. delta = #new_masks. """ occupied_mask = masks.sum(dim=0) > 0.5 fg_mask = fg_mask.cpu().numpy().astype(np.uint8) fg_mask[occupied_mask.cpu().numpy()] = 0 fg_mask = cv2.erode(fg_mask, np.ones((3,3)), iterations=1) nc, components = cv2.connectedComponents(fg_mask, connectivity=8) components = torch.from_numpy(components).float().to(constants.DEVICE) for j in range(1, nc): mask = components == j component_size = mask.sum().float() if component_size > self.config['min_pixels_thresh']: masks = torch.cat([masks, mask[None].float()], dim=0) return masks def delete_scores(self, graph): """Compute delete scores for each node in the graph. Args: graph: a torch_geometric.Data instance Returns: a [N] torch.Tensor with values in [0, 1] """ return torch.sigmoid(self.model(graph))
2.359375
2
util.py
CSI-Woo-Lab/PandaSchedulingModel
3
12799682
def get_util_range(num_proc): util = [str(x) for x in range(10, num_proc * 100, 10)] ret = [] for x in util: if len(x) == 2: ret.append('0.' + x) else: ret.append(x[:len(x) - 2] + '.' + x[len(x) - 2:]) return ret
2.765625
3
rss/subreddits.py
victorchen796/reddit-submission-scraper
0
12799683
from resources import get_subreddits, update_subreddits """ subreddits: { '<subreddit name>': { 'phrases': [ '<phrases>' ], 'flairs': [ '<flairs>' ], 'include': <boolean>, 'unflaired': <boolean> }, ... } """ subreddits = get_subreddits() def list(): return subreddits.keys() def add(name): subreddits[name] = { 'phrases': [], 'flairs': [], 'include': False, 'unflaired': True } update_subreddits(subreddits) def remove(name): del subreddits[name] update_subreddits(subreddits) def clear(): subreddits.clear() update_subreddits(subreddits)
2.78125
3
events/migrations/0006_auto_20150811_1213.py
Morozzzko/django-events
0
12799684
<filename>events/migrations/0006_auto_20150811_1213.py<gh_stars>0 # -*- coding: utf-8 -*- from __future__ import unicode_literals from django.db import models, migrations from django.conf import settings class Migration(migrations.Migration): dependencies = [ ('events', '0005_auto_20150809_0203'), ] operations = [ migrations.RemoveField( model_name='team', name='members', ), migrations.AlterField( model_name='teammembership', name='team', field=models.ForeignKey(null=True, blank=True, to='events.Team'), ), migrations.AlterField( model_name='teammembership', name='user', field=models.OneToOneField(to=settings.AUTH_USER_MODEL), ), ]
1.328125
1
tests/test_nested_sampling.py
LoLab-VU/Gleipnir
2
12799685
<filename>tests/test_nested_sampling.py """ Tests using an implementation of a 5-dimensional Gaussian problem and its Nested Sampling using via Gleipnir's built-in Nested Sampler. Adapted from the DNest4 python gaussian example: https://github.com/eggplantbren/DNest4/blob/master/python/examples/gaussian/gaussian.py """ import pytest import numpy as np from numpy import exp, log, pi from scipy.stats import uniform from scipy.special import erf from gleipnir.sampled_parameter import SampledParameter from gleipnir.nestedsampling import NestedSampling from gleipnir.nestedsampling.samplers import MetropolisComponentWiseHardNSRejection from gleipnir.nestedsampling.stopping_criterion import NumberOfIterations import os import glob # Number of paramters to sample is 5 ndim = 5 # Set up the list of sampled parameters: the prior is Uniform(-5:5) -- # we are using a fixed uniform prior from scipy.stats sampled_parameters = [SampledParameter(name=i, prior=uniform(loc=-5.0,scale=10.0)) for i in range(ndim)] # Set the active point population size population_size = 20 sampler = MetropolisComponentWiseHardNSRejection(iterations=10, tuning_cycles=1) stopping_criterion = NumberOfIterations(120) # Define the loglikelihood function def loglikelihood(sampled_parameter_vector): const = -0.5*np.log(2*np.pi) return -0.5*np.sum(sampled_parameter_vector**2) + ndim * const width = 10.0 def analytic_log_evidence(ndim, width): lZ = (ndim * np.log(erf(0.5*width/np.sqrt(2)))) - (ndim * np.log(width)) return lZ shared = {'NS': None} def test_initialization(): NS = NestedSampling(sampled_parameters=sampled_parameters, loglikelihood=loglikelihood, sampler = sampler, population_size=population_size, stopping_criterion=stopping_criterion) shared['NS'] = NS def test_attributes(): NS = shared['NS'] sp = NS.sampled_parameters assert sp == sampled_parameters lnl = NS.loglikelihood spv = np.array([5.,5.,5.,5.,5.]) assert lnl(spv) == loglikelihood(spv) pop = NS.population_size assert pop == population_size def test_func_run(): NS = shared['NS'] log_evidence, log_evidence_error = NS.run(verbose=False) analytic = analytic_log_evidence(ndim, width) print(analytic, log_evidence) assert np.isclose(log_evidence, analytic, rtol=1.) shared['NS'] = NS def test_properties(): NS = shared['NS'] analytic = analytic_log_evidence(ndim, width) lnZ = NS.log_evidence assert np.isclose(lnZ, analytic, rtol=1.) lnZ_err = NS.log_evidence_error Z = NS.evidence Z_err = NS.evidence_error H = NS.information def test_func_posteriors(): NS = shared['NS'] posteriors = NS.posteriors() keys = list(posteriors.keys()) assert len(keys) == len(sampled_parameters) def test_func_akaike_ic(): NS = shared['NS'] aic = NS.akaike_ic() def test_func_bayesian_ic(): NS = shared['NS'] bic = NS.bayesian_ic(n_data=5) def test_func_deviance_ic(): NS = shared['NS'] dic = NS.deviance_ic() if __name__ == '__main__': test_initialization() test_attributes() test_func_run() test_properties() test_func_posteriors() test_func_akaike_ic() test_func_bayesian_ic() test_func_deviance_ic()
2.546875
3
src/objects/variable.py
CameronWr/ModelTranslator
0
12799686
class Variable: var_type_dic = {'int' : 0, 'bool' : 1, 'boolean' : 1, 'float' : 2, 'double' : 2, 'string' : 3, 'str' : 3} def __init__(self, var_type, var_name, var_value): self.var_type = var_type self.var_name = var_name self.var_value = var_value if var_value == "": self.set_defualt_value(var_type) else: self.var_value = var_value def set_defualt_value(self, var_type): var_type = var_type.lower() var_type_index = Variable.var_type_dic.get(var_type, None) if var_type_index == 0: self.var_value = 0 elif var_type_index == 1: self.var_value = False elif var_type_index == 2: self.var_value = 0.0 elif var_type_index == 3: self.var_value = "" else: print("Variable type error") def set_variable_type(self, type): self.var_type = type def set_variable_name(self, name): self.var_name = name def set_variable_value(self, value): self.var_value = value def get_variable_type(self): return self.var_type def get_variable_name(self): return self.var_name def get_variable_value(self): return self.var_value
3.4375
3
real_robots/generate_goals.py
GOAL-Robots/real_robots
35
12799687
# -*- coding: utf-8 -*- """Console script to generate goals for real_robots""" import click import numpy as np from real_robots.envs import Goal import gym import math basePosition = None slow = False render = False def pairwise_distances(a): b = a.reshape(a.shape[0], 1, a.shape[1]) return np.sqrt(np.einsum('ijk, ijk->ij', a-b, a-b)) def runEnv(env, max_t=1000): reward = 0 done = False render = slow action = {'joint_command': np.zeros(9), 'render': render} objects = env.robot.used_objects[1:] positions = np.vstack([env.get_obj_pose(obj) for obj in objects]) still = False stable = 0 for t in range(max_t): old_positions = positions observation, reward, done, _ = env.step(action) positions = np.vstack([env.get_obj_pose(obj) for obj in objects]) maxPosDiff = 0 maxOrientDiff = 0 for i, obj in enumerate(objects): posDiff = np.linalg.norm(old_positions[i][:3] - positions[i][:3]) q1 = old_positions[i][3:] q2 = positions[i][3:] orientDiff = min(np.linalg.norm(q1 - q2), np.linalg.norm(q1+q2)) maxPosDiff = max(maxPosDiff, posDiff) maxOrientDiff = max(maxOrientDiff, orientDiff) if maxPosDiff < 0.0001 and maxOrientDiff < 0.001 and t > 10: stable += 1 else: stable = 0 action['render'] = slow if stable > 19: action['render'] = True if stable > 20: still = True break pos_dict = {} for obj in objects: pos_dict[obj] = env.get_obj_pose(obj) print("Exiting environment after {} timesteps..".format(t)) if not still: print("Failed because maxPosDiff:{:.6f}," "maxOrientDiff:{:.6f}".format(maxPosDiff, maxOrientDiff)) return observation['retina'], pos_dict, not still, t, observation['mask'] class Position: def __init__(self, start_state=None, fixed_state=None, retina=None, mask=None): self.start_state = start_state self.fixed_state = fixed_state self.retina = retina self.mask = mask def generatePosition(env, obj, fixed=False, tablePlane=None): if tablePlane is None: min_x = -.25 max_x = .25 elif tablePlane: min_x = -.25 max_x = .05 else: min_x = .10 max_x = .25 min_y = -.45 max_y = .45 x = np.random.rand()*(max_x-min_x)+min_x y = np.random.rand()*(max_y-min_y)+min_y if x <= 0.05: z = 0.40 else: z = 0.50 if fixed: orientation = basePosition[obj][3:] else: orientation = (np.random.rand(3)*math.pi*2).tolist() orientation = env._p.getQuaternionFromEuler(orientation) pose = [x, y, z] + np.array(orientation).tolist() return pose def generateRealPosition(env, startPositions): env.reset() runEnv(env) # Generate Images for obj in startPositions: pos = startPositions[obj] env.robot.object_bodies[obj].reset_pose(pos[:3], pos[3:]) actual_image, actual_position, failed, it, mask = runEnv(env) return actual_image, actual_position, failed, it, mask def checkMinSeparation(state): positions = np.vstack([state[obj][:3] for obj in state]) if len(positions) > 1: distances = pairwise_distances(positions) clearance = distances[distances > 0].min() else: clearance = np.inf return clearance def drawPosition(env, fixedOrientation=False, fixedObjects=[], fixedPositions=None, minSeparation=0, objOnTable=None): failed = True while failed: # skip 1st object, i.e the table objects = env.robot.used_objects[1:] position = Position() startPositions = {} for obj in fixedObjects: startPositions[obj] = fixedPositions[obj] for obj in np.random.permutation(objects): if obj in fixedObjects: continue while True: table = None if objOnTable is not None: if obj in objOnTable: table = objOnTable[obj] startPose = generatePosition(env, obj, fixedOrientation, tablePlane=table) startPositions[obj] = startPose if len(startPositions) == 1: break clearance = checkMinSeparation(startPositions) if clearance >= minSeparation: break print("Failed minimum separation ({}), draw again {}.." .format(clearance, obj)) (a, p, f, it, m) = generateRealPosition(env, startPositions) actual_image = a actual_mask = m actual_position = p failed = f if failed: print("Failed image generation...") continue clearance = checkMinSeparation(actual_position) if clearance < minSeparation: failed = True print("Failed minimum separation ({}) after real generation, " "draw again everything..".format(clearance)) continue if fixedOrientation: for obj in objects: q1 = startPositions[obj][3:] q2 = actual_position[obj][3:] orientDiff = min(np.linalg.norm(q1 - q2), np.linalg.norm(q1+q2)) # TODO CHECK This - we had to rise it many times failed = failed or orientDiff > 0.041 if failed: print("{} changed orientation by {}" .format(obj, orientDiff)) break else: print("{} kept orientation.".format(obj)) if failed: print("Failed to keep orientation...") continue for obj in fixedObjects: posDiff = np.linalg.norm(startPositions[obj][:3] - actual_position[obj][:3]) q1 = startPositions[obj][3:] q2 = actual_position[obj][3:] orientDiff = min(np.linalg.norm(q1 - q2), np.linalg.norm(q1+q2)) failed = failed or posDiff > 0.002 or orientDiff > 0.041 if failed: print("{} changed pos by {} and orientation by {}" .format(obj, posDiff, orientDiff)) print(startPositions[obj]) print(actual_position[obj]) break if failed: print("Failed to keep objects fixed...") continue position.start_state = startPositions position.fixed_state = actual_position position.retina = actual_image position.mask = actual_mask return position def checkRepeatability(env, goals): maxDiffPos = 0 maxDiffOr = 0 for goal in goals: _, pos, failed, _, _ = generateRealPosition(env, goal.initial_state) objects = [o for o in goal.initial_state] p0 = np.vstack([goal.initial_state[o] for o in objects]) p1 = np.vstack([pos[o] for o in objects]) diffPos = np.linalg.norm(p1[:, :3]-p0[:, :3]) diffOr = min(np.linalg.norm(p1[:, 3:]-p0[:, 3:]), np.linalg.norm(p1[:, 3:]+p0[:, 3:])) maxDiffPos = max(maxDiffPos, diffPos) maxDiffOr = max(maxDiffPos, diffOr) print("Replicated diffPos:{} diffOr:{}".format(diffPos, diffOr)) if failed: print("*****************FAILED************!!!!") return 1000000 return maxDiffPos, maxDiffOr def isOnShelf(obj, state): z = state[obj][2] if obj == 'cube' and z > 0.55 - 0.15: return True if obj == 'orange' and z > 0.55 - 0.15: return True if obj == 'tomato' and z > 0.55 - 0.15: return True if obj == 'mustard' and z > 0.545 - 0.15: return True return False def isOnTable(obj, state): z = state[obj][2] if obj == 'cube' and z < 0.48 - 0.15: return True if obj == 'orange' and z < 0.48 - 0.15: return True if obj == 'tomato' and z < 0.49 - 0.15: return True if obj == 'mustard' and z < 0.48 - 0.15: return True return False def generateGoalREAL2020(env, n_obj, goal_type, on_shelf=False, min_start_goal_dist=0.1, min_objects_dist=0.05, max_objects_dist=2): print("Generating GOAL..") objOnTable = None if not on_shelf: objects = env.robot.used_objects[1:] objOnTable = {} for obj in objects: objOnTable[obj] = True if goal_type == '3D': fixedOrientation = False else: fixedOrientation = True found = False while not(found): initial = drawPosition(env, fixedOrientation=fixedOrientation, objOnTable=objOnTable, minSeparation=min_objects_dist) found = True # checks whether at least two objects are close together as specified in max_objects_dist if n_obj == 1: at_least_two_near_objects = True else: at_least_two_near_objects = False for obj1 in initial.fixed_state.keys(): for obj2 in initial.fixed_state.keys(): if obj1 == obj2: continue if np.linalg.norm(initial.fixed_state[obj1][:3]-initial.fixed_state[obj2][:3]) <= max_objects_dist or goal_type != '3D' or len(initial.fixed_state.keys()) == 1: at_least_two_near_objects = True break if at_least_two_near_objects: break # checks if at least one object is on the table at_least_one_on_shelf = False for obj in initial.fixed_state.keys(): if isOnShelf(obj, initial.fixed_state) or goal_type == '2D': at_least_one_on_shelf = True break found = False while not(found): found = True final = drawPosition(env, fixedOrientation=fixedOrientation, objOnTable=objOnTable, minSeparation=min_objects_dist) # checks whether at least two objects are close together as specified in max_objects_dist. This only if in the initial positions it is not true if not at_least_two_near_objects: found = False for obj1 in final.fixed_state.keys(): for obj2 in final.fixed_state.keys(): if obj1 == obj2: continue if np.linalg.norm(final.fixed_state[obj1][:3]-final.fixed_state[obj2][:3]) <= max_objects_dist: found = True break if found: break # checks if at least one object is on the table. This only if in the initial positions it is not true if found and not at_least_one_on_shelf: found = False for obj in final.fixed_state.keys(): if isOnShelf(obj, final.fixed_state): found = True break # checks if the distance between initial and final positions of the objects is at least how much specified in min_start_goal_dist for obj in final.fixed_state.keys(): if min_start_goal_dist > np.linalg.norm(final.fixed_state[obj][:2]-initial.fixed_state[obj][:2]): found = False break goal = Goal() goal.challenge = goal_type goal.subtype = str(n_obj) goal.initial_state = initial.fixed_state goal.final_state = final.fixed_state goal.retina_before = initial.retina goal.retina = final.retina goal.mask = final.mask print("SUCCESSFULL generation of GOAL {}!".format(goal_type)) return goal def visualizeGoalDistribution(all_goals, images=True): import matplotlib.pyplot as plt challenges = np.unique([goal.challenge for goal in all_goals]) fig, axes = plt.subplots(max(2, len(challenges)), 3) for c, challenge in enumerate(challenges): goals = [goal for goal in all_goals if goal.challenge == challenge] if len(goals) > 0: if images: # Superimposed images view tomatos = sum([goal.mask == 2 for goal in goals]) mustards = sum([goal.mask == 3 for goal in goals]) cubes = sum([goal.mask == 4 for goal in goals]) axes[c, 0].imshow(tomatos, cmap='gray') axes[c, 1].imshow(mustards, cmap='gray') axes[c, 2].imshow(cubes, cmap='gray') else: # Positions scatter view for i, o in enumerate(goals[0].final_state.keys()): positions = np.vstack([goal.final_state[o] for goal in goals]) axes[c, i].set_title("{} {}".format(o, challenge)) axes[c, i].hist2d(positions[:, 0], positions[:, 1]) axes[c, i].set_xlim([-0.3, 0.3]) axes[c, i].set_ylim([-0.6, 0.6]) plt.show() @click.command() @click.option('--seed', type=int, help='Generate goals using this SEED for numpy.random') @click.option('--n_2d_goals', type=int, default=25, help='# of 2D goals (default 25)') @click.option('--n_25d_goals', type=int, default=15, help='# of 2.5D goals (default 15)') @click.option('--n_3d_goals', type=int, default=10, help='# of 3D goals (default 10)') @click.option('--n_obj', type=int, default=3, help='# of objects (default 3)') def main(seed=None, n_2d_goals=25, n_25d_goals=15, n_3d_goals=10, n_obj=3): """ Generates the specified number of goals and saves them in a file.\n The file is called goals-REAL2020-s{}-{}-{}-{}-{}.npy.npz where enclosed brackets are replaced with the supplied options (seed, n_2d_goals, n_25d_goals, n_3d_goals, n_obj) or the default value. """ np.random.seed(seed) allgoals = [] env = gym.make('REALRobot2020-R1J{}-v0'.format(n_obj)) if render: env.render('human') env.reset() global basePosition _, basePosition, _, _, _ = runEnv(env) # In these for loops, we could add some progress bar... for _ in range(n_2d_goals): allgoals += [generateGoalREAL2020(env, n_obj, "2D", on_shelf=False, min_start_goal_dist=0.2, min_objects_dist=0.25)] for _ in range(n_25d_goals): allgoals += [generateGoalREAL2020(env, n_obj, "2.5D", on_shelf=True, min_start_goal_dist=0.2, min_objects_dist=0.25)] for _ in range(n_3d_goals): allgoals += [generateGoalREAL2020(env, n_obj, "3D", on_shelf=True, min_start_goal_dist=0.2, min_objects_dist=0)] np.savez_compressed('goals-REAL2020-s{}-{}-{}-{}-{}.npy' .format(seed, n_2d_goals, n_25d_goals, n_3d_goals, n_obj), allgoals) checkRepeatability(env, allgoals) visualizeGoalDistribution(allgoals) if __name__ == "__main__": main()
2.46875
2
tools/objects/object_gen.py
andyc655/gunyah-hypervisor
61
12799688
<gh_stars>10-100 #!/usr/bin/env python3 # # © 2021 Qualcomm Innovation Center, Inc. All rights reserved. # # SPDX-License-Identifier: BSD-3-Clause from Cheetah.Template import Template import argparse import subprocess import sys class Object: def __init__(self, name): self.name = name def __str__(self): return self.name def type_enum(self): return "OBJECT_TYPE_{:s}".format(self.name.upper()) def rcu_destroy_enum(self): return "RCU_UPDATE_CLASS_{:s}_DESTROY".format(self.name.upper()) def main(): args = argparse.ArgumentParser() mode_args = args.add_mutually_exclusive_group(required=True) mode_args.add_argument('-t', '--template', type=argparse.FileType('r', encoding="utf-8"), help="Template file used to generate output") args.add_argument('-o', '--output', type=argparse.FileType('w', encoding="utf-8"), default=sys.stdout, help="Write output to file") args.add_argument("-f", "--formatter", help="specify clang-format to format the code") args.add_argument('input', metavar='INPUT', nargs='+', action='append', help="List of objects to process") options = args.parse_args() object_list = [Object(o) for group in options.input for o in group] output = "// Automatically generated. Do not modify.\n" output += "\n" ns = {'object_list': object_list} output += str(Template(file=options.template, searchList=ns)) if options.formatter: ret = subprocess.run([options.formatter], input=output.encode("utf-8"), stdout=subprocess.PIPE) output = ret.stdout.decode("utf-8") if ret.returncode != 0: raise Exception("failed to format output:\n ", ret.stderr) options.output.write(output) if __name__ == '__main__': main()
2.265625
2
main.py
bichu136/svgrasterize.py
0
12799689
import os import argparse from svgrasterize import * import numpy as np np.set_printoptions(formatter={'float': lambda x: "{0:0.3f}".format(x)}) parser = argparse.ArgumentParser() parser.add_argument("svg", help="input SVG file") parser.add_argument("output", help="output PNG file") parser.add_argument("-bg", type=svg_color, help="set default background color") parser.add_argument("-fg", type=svg_color, help="set default foreground color") parser.add_argument("-w", "--width", type=int, help="output width") parser.add_argument("-id", help="render single element with specified `id`") parser.add_argument( "-t", "--transform", type=svg_transform, help="apply additional transformation" ) parser.add_argument("--linear-rgb", action="store_true", help="use linear RGB for rendering") parser.add_argument("--fonts", nargs="*", help="paths to SVG files containing all fonts") opts = parser.parse_args() if not os.path.exists(opts.svg): sys.stderr.write(f"[error] file does not exsits: {opts.svg}\n") sys.exit(1) fonts = FontsDB() for font in opts.fonts or [DEFAULT_FONTS]: fonts.register_file(font) transform = Transform().matrix(0, 1, 0, 1, 0, 0) if opts.transform: transform @= opts.transform if opts.svg.endswith(".path"): path = Path.from_svg(open(opts.svg).read()) print(path,"\n") opts.bg = svg_color("white") if opts.bg is None else opts.bg opts.fg = svg_color("black") if opts.fg is None else opts.fg scene = Scene.fill(path, opts.fg) ids, size = {}, None else: scene, ids, size = svg_scene_from_filepath( opts.svg, fg=opts.fg, width=opts.width, fonts=fonts ) if scene is None: sys.stderr.write("[error] nothing to render\n") else: pass if opts.id is not None: size = None scene = ids.get(opts.id) if scene is None: sys.stderr.write(f"[error] no object with id: {opts.id}\n") sys.exit(1) start = time.time() if size is not None: w, h = size result = scene.render( transform, viewport=[0, 0, int(h), int(w)], linear_rgb=opts.linear_rgb ) else: result = scene.render(transform, linear_rgb=opts.linear_rgb) stop = time.time() sys.stderr.write("[info] rendered in {:.2f}\n".format(stop - start)) sys.stderr.flush() if result is None: sys.stderr.write("[error] nothing to render\n") sys.exit(1) output, _convex_hull = result if size is not None: w, h = size output = output.convert(pre_alpha=True, linear_rgb=opts.linear_rgb) base = np.zeros((int(h), int(w), 4), dtype=FLOAT) image = canvas_merge_at(base, output.image, output.offset) output = Layer(image, (0, 0), pre_alpha=True, linear_rgb=opts.linear_rgb) if opts.bg is not None: output = output.background(opts.bg) filename = opts.output if opts.output != "-" else 1 closefd = opts.output != "-" with open(filename, "wb", closefd=closefd) as file: output.write_png(file) #path.fill trả về vùng ảnh, offset của ảnh lớn. #hàm gộp là Layer.compose trả về vùng ảnh và offset, trả về ảnh lớn nhất. # canvas_compose cho biết cách mà blend tam giác vào ảnh. # blend của hàm canvas_merge_union là canvas_compose truyền vào tham số đầu tiên, Mặc định là COMPOSE_OVER # Path.mask trả về mass của tam giác. bên trong tam giác là 1, ngoài tam giác là 0 # còn các cạnh của tam giác sẽ được là có giá trị trong khoảng từ 0 đến 1. # còn trả về offset nữa.
2.84375
3
bin/train_lm.py
mcharnelli/DenoiseSum
0
12799690
<gh_stars>0 import argparse from DenoiseSum import DATA_PATH from DenoiseSum.LanguageModel.training import train_language_model if __name__ == '__main__': parser = argparse.ArgumentParser() parser.add_argument('--dataset', default='rotten', type=str) parser.add_argument('--no_instance', default=40, type=int) parser.add_argument('--batch_size', default=16, type=int) parser.add_argument('--word_dim', default=300, type=int) parser.add_argument('--hidden_dim', default=512, type=int) parser.add_argument('--disc_size', default=50, type=int) parser.add_argument('--num_epoch', default=20, type=int) parser.add_argument('--eval_every', default=2500, type=int) parser.add_argument('--stop_after', default=5, type=int) parser.add_argument('--train_file', default='reviews_large_train.csv', type=str) parser.add_argument('--dev_file', default='reviews_large_validation.csv', type=str) parser.add_argument('--test_file', default='reviews_large_test.csv', type=str) parser.add_argument('--replace_word_dict', default='False', type=str) parser.add_argument('--retrain_model', default='False', type=str) parser.add_argument('--model_file', default='lm.model', type=str) parser.add_argument('--sos', default=2, type=int) parser.add_argument('--eos', default=3, type=int) parser.add_argument('--coverage_rate', default=0, type=int) args = parser.parse_args() args.train_file = DATA_PATH / args.dataset / args.train_file args.dev_file = DATA_PATH / args.dataset / args.dev_file args.test_file = DATA_PATH / args.dataset / args.test_file if args.replace_word_dict == 'False': args.replace_word_dict = False else: args.replace_word_dict = True if args.retrain_model == 'False': args.retrain_model = False else: args.retrain_model = True (DATA_PATH / 'model' / args.dataset).mkdir(parents = True, exist_ok=True) args.model_file = DATA_PATH / 'model' / args.dataset / args.model_file train_language_model(args)
2.203125
2
test/test_head.py
zydmayday/pamda
1
12799691
import unittest import ramda as R """ https://github.com/ramda/ramda/blob/master/test/head.js """ class TestHead(unittest.TestCase): def test_returns_the_first_element_of_an_ordered_collection(self): self.assertEqual(1, R.head([1, 2, 3])) self.assertEqual(2, R.head([2, 3])) self.assertEqual(3, R.head([3])) self.assertEqual(None, R.head([])) self.assertEqual('a', R.head('abc')) self.assertEqual('b', R.head('bc')) self.assertEqual('c', R.head('c')) self.assertEqual('', R.head('')) def test_throws_if_applied_to_None(self): with self.assertRaises(TypeError): R.head(None) if __name__ == '__main__': unittest.main()
3.375
3
exhaust/urls.py
lewiscollard/exhaust
0
12799692
from django.conf import settings from django.conf.urls.static import static from django.contrib import admin from django.contrib.sitemaps.views import sitemap from django.urls import include, path from django.views.generic import TemplateView from markdownx.views import MarkdownifyView from exhaust.posts.sitemaps import POSTS_SITE_MAPS urlpatterns = [ path('', include('exhaust.posts.urls', namespace='posts')), path('exogram/', include('exhaust.exogram.urls', namespace='exogram')), path('robots.txt', TemplateView.as_view(template_name='robots.txt', content_type='text/plain')), path('admin/', admin.site.urls), path('markdownx/markdownify/', MarkdownifyView.as_view(), name='markdownx_markdownify'), path('sitemap.xml', sitemap, {'sitemaps': POSTS_SITE_MAPS}, name='django.contrib.sitemaps.views.sitemap'), # Error page styling tests. It's OK to have these outside of DEBUG (if # someone wants to pretend they're having a 500 they're more than welcome # to). It means there's one less branch to test in settings. path('404/', TemplateView.as_view(template_name='404.html')), path('500/', TemplateView.as_view(template_name='500.html')), ] + static( settings.MEDIA_URL, document_root=settings.MEDIA_ROOT ) + static( settings.STATIC_URL, document_root=settings.STATIC_ROOT )
2.140625
2
Python3/tests/test_key.py
combofish/chips-get
2
12799693
<filename>Python3/tests/test_key.py<gh_stars>1-10 import unittest class TeskKey(unittest.TestCase): def test_key(self): a = ['a', 'b'] b = ['b'] self.assertEqual(a,b)
2.765625
3
src/quakestats/system/context.py
LaudateCorpus1/quakestats
21
12799694
import logging import pymongo from quakestats.datasource.mongo2 import ( DataStoreMongo, ) from quakestats.system import ( conf, ) logger = logging.getLogger(__name__) class SystemContext: def __init__(self): self.config = conf.cnf self.ds: DataStoreMongo = None self.ds_client: pymongo.MongoClient = None self.configure() def configure(self): uri = conf.get_conf_val('MONGO_URI') self.ds_client = pymongo.MongoClient(uri) parsed_uri = pymongo.uri_parser.parse_uri(uri) database_name = parsed_uri["database"] self.ds = DataStoreMongo(self.ds_client.get_database(database_name))
2.421875
2
keys.py
Hudson-AmalembaL/daraja_mpesa_api
0
12799695
business_shortCode = "174379" phone_number = "254746468686" lipa_na_mpesa_passkey = "<KEY>" consumer_key = "ryHq5u8TIFcyps7lIYQThAP1Al0zXAcU" consumer_secrete = "O9kOqvHmN5sGViKI "
1.015625
1
helper.py
Ellectronx/wsb-oceny
5
12799696
import smtplib from email.message import EmailMessage from credent import secret tb_headers=["id","przedmiot","wykladowca","forma_zaliczenia","rodz_zajec","ocena1","data1","ocena2","data2"] def sendEmail(subject,eml_from,eml_to,message): msg = EmailMessage() msg.set_content(message) msg['Subject'] = subject msg['From'] = eml_from msg['To'] = eml_to # Send the message via SMTP server. print("SENDING INFO EMAIL...") try: server = smtplib.SMTP(secret["smtp_host"], secret["smtp_port"]) server.ehlo() server.login(secret["smtp_login"], secret["smtp_password"]) server.send_message(msg) server.quit() print("SENDING OK!") except: #raise print("...sending email: somethin went wrong:(") def preetyGrade(grade): if grade=="-": return "brak" else: return str(grade) def compareT(T1,T2): #T1,T2 krotka z wierszem z bazy danych (wiersz tabeli z ocenami starymi/nowymi) lenT1 = len(T1) lenT2 = len(T2) if lenT1!=9 and lenT2!=9: return {"private":"Błąd E1. Nieodpowiednia ilość kolumn. Być może zmeniła się struktura strony źródłowej ?!","public":""} if lenT2 > lenT1 and lenT1==0: return {"private":"Dopisano nowy przedmiot: "+T2[1],"public":""} if lenT1 == lenT2 and lenT1 == 9: zm="" L = len(T1) for i in range(0,L): if(T1[i]!=T2[i]): zm = zm +"\r\nZmiana "+tb_headers[i]+" z "+preetyGrade(T1[i])+" na "+preetyGrade(T2[i])+", " if len(zm)>1: zm = zm[:-2] return {"private":"Przedmiot: "+T1[1]+" ("+T1[3]+", "+T1[2]+")"+zm, "public":"Możliwa nowe oceny z przedmiotu: "+T1[1]+" ("+T1[3]+", "+T1[2]+") [powiadomienie automatyczne, grupa WZ_INiN3_PG2]"} return {"private":"Nieokreślony błąd. Być moze załadowane zostały przedmioty z nowego semestru lub zmeniła się struktura strony źródłowej ?!","public":""}
3.0625
3
treestruct/__init__.py
srafehi/treestruct
0
12799697
from collections import MutableSet from . import helpers FORWARD = 1 # used to look at Node children BACKWARD = -1 # used to look at Node parents class NodeSet(MutableSet): """ A mutable set which automatically populates parent/child node sets. For example, if this NodeSet contains `children` nodes and a new node was added, that node's `parent` NodeSet will automatically be populated with the owner of this NodeSet. """ __slots__ = ('owner', 'items', 'direction') def __init__(self, owner, items, direction): """ :type owner: Node :type items: set[Node] :type direction: int """ self.owner = owner self.items = set() self.direction = direction self.update(items) def __iter__(self): return iter(self.items) def __len__(self): return len(self.items) def add(self, value): """ Adds the node to this NodeSet and populates the node's NodeSet with the owner of this NodeSet. :type value: Node """ if value not in self: value.direction(self.direction * -1).items.add(self.owner) return self.items.add(value) def discard(self, value): """ Removes the node from this NodeSet and removes this NodeSet's owner from the node's NodeSets. :type value: Node """ if value in self: value.direction(self.direction * -1).items.discard(self.owner) return self.items.discard(value) def update(self, nodes): for node in nodes: self.add(node) def discard_many(self, nodes): for node in nodes: self.discard(node) def one(self, raise_on_empty=False): """ Returns an item from this NodeSet if there is only one item. :type raise_on_empty: bool :rtype: Node | None :raises: ValueError """ if not self.items and raise_on_empty: raise ValueError('Called NodeSet.one on empty set') elif len(self.items) > 1: raise ValueError('Called NodeSet.one on set with multiple values') return next(iter(self.items), None) def __contains__(self, x): return self.items.__contains__(x) def __repr__(self): return 'NodeSet{}'.format(tuple(self.items)) class Node(object): __slots__ = ('parents', 'children', 'data') def __init__(self, data=None, parents=None, children=None): self.parents = NodeSet(self, [] if parents is None else parents, BACKWARD) self.children = NodeSet(self, [] if children is None else children, FORWARD) self.data = data def __repr__(self): return '<{} {}>'.format(type(self).__name__, self.data) @property def connections(self): """ Returns all parents and children associated with this Node. :rtype: set[Node] """ return set(list(self.parents) + list(self.children)) def direction(self, direction): """ Returns this node's parents if direction is BACKWARD, else, returns children nodes. :int direction: int :rtype: NodeSet """ return self.parents if direction == BACKWARD else self.children def depth_first_traversal(self, callback, direction, obj=None): """ Executes a depth-first traversal from this node in a given direction. Raising a StopIteration will terminate the traversal. :type callback: (Node, object) -> () :type direction: int :type obj: Any :return: Returns `obj` (or None if no `obj` is supplied). :rtype: Any """ return helpers.depth_first_traversal_for_node(node=self, callback=callback, direction=direction, obj=obj) def breadth_first_traversal(self, callback, direction, obj=None): """ Executes a breadth-first traversal from this node in a given direction. Raising a StopIteration will terminate the traversal. :type callback: (Node, object) -> () :type direction: int :type obj: Any :return: Returns `obj` (or None if no `obj` is supplied). :rtype: Any """ return helpers.breadth_first_traversal_for_node(node=self, callback=callback, direction=direction, obj=obj) def walk_links(self, callback, direction, obj=None): """ Walks the each link for this node. Raising a StopIteration will terminate the traversal. :type callback: (Node, Node, object) -> () :type direction: int :type obj: Any :return: Returns `obj` (or None if no `obj` is supplied). :rtype: Any """ return helpers.walk_links_for_node(node=self, callback=callback, direction=direction, obj=obj) def root(self): """ Returns the root node of this node if it only has one root node. :rtype: Node :raises: ValueError """ roots = self.roots() if len(roots) > 1: raise ValueError('Node.root is not applicable when the node has multiple roots') return next(iter(roots)) def gather_nodes(self, direction=None): """ Returns all nodes in the tree. Nodes can be restricted by specifying a direction. :type direction: int :rtype: set[Node] """ return helpers.gather_nodes(node=self, direction=direction) def flatten(self, direction=None): """ Returns a list of node lists representing a path on the tree. :type direction: int | None :rtype: list[list[treestruct.Node]] """ return helpers.flatten_from_node(node=self, direction=direction) def roots(self): """ Returns all roots (any parent nodes with no parents) of this node. :rtype: set[Node] """ return helpers.roots_for_node(node=self) def leaves(self): """ Returns all leaves (any child nodes with no children) of this node. :rtype: set[Node] """ return helpers.leaves_for_node(node=self) def delete(self, direction=None): """ Removes this node from the NodeSets of connected nodes. If direction is given, only remove the node from the connected nodes in the given direction. :type direction: int :rtype: Node """ return helpers.delete_node_relationships(node=self, direction=direction) def clone(self): """ Clones the node and all its child nodes and forms a new root. :rtype: Node """ return helpers.clone_subtree(node=self, cls=type(self)) def find_all(self, condition, direction=None): """ Returns all nodes which match the given condition. :type condition: (Node) -> bool :type direction: int :rtype: set[Node] """ return helpers.find_nodes(node=self, condition=condition, direction=direction) def find(self, condition, direction=None, raise_on_empty=False): """ Returns a single node which matches the given condition. :type condition: (Node) -> bool :type direction: int :type raise_on_empty: bool :rtype: Node | None :raises: ValueError """ return helpers.find_node(node=self, condition=condition, direction=direction, raise_on_empty=raise_on_empty) def to_dict(self, data_converter=None): """ Converts this node's complete structure into a dictionary. :type data_converter: (Any) -> (Any) | None :rtype: list[dict] """ return helpers.to_dict_from_node(node=self, data_converter=data_converter) @classmethod def from_dict(cls, tree_dict, data_converter=None): """ Converts a dict into a tree of Nodes, with the return value being the root node. :param tree_dict: dict :type data_converter: (Any) -> (Any) | None :rtype: Node """ return helpers.from_dict(tree_dict=tree_dict, data_converter=data_converter, cls=cls) @classmethod def from_nodes(cls, nodes): """ Creates a flat tree structure from a list of nodes. It is assumed that the first Node in the list is the root and each subsequent Node is a child. Any existing parents or children will be disregarded. :type nodes: collections.Sequence[Node] :rtype: Node """ return helpers.node_from_node_sequence(nodes=nodes, cls=cls)
3.921875
4
pyresx/__init__.py
cola314/pyresx
3
12799698
<reponame>cola314/pyresx from pyresx.ResXWriter import ResXWriter
1.0625
1
tests/test_connect.py
chdean/usajobs
2
12799699
<gh_stars>1-10 import usajobs import pytest def test_connect(): email, apikey = 'email', 'apikey' headers = usajobs.connect(email=email, apikey=apikey) assert headers == {'Host': 'data.usajobs.gov', 'User-Agent': 'email', 'Authorization-Key': 'apikey'}
1.992188
2
indra_world/sources/__init__.py
yanzv/indra_world
3
12799700
import tqdm import pickle import logging import functools from typing import List, Mapping, Optional from multiprocessing import Pool from indra.statements import Statement from indra_world.sources import eidos, hume, sofia logger = logging.getLogger(__name__) def _reader_wrapper(fname, reader, dart_ids=None, **kwargs): if reader == 'eidos': pr = eidos.process_json_file(fname, **kwargs) pr.doc.tree = None elif reader == 'sofia': pr = sofia.process_json_file(fname, **kwargs) elif reader == 'hume': pr = hume.process_jsonld_file(fname, **kwargs) if dart_ids: dart_id = dart_ids.get(fname) for stmt in pr.statements: for ev in stmt.evidence: ev.text_refs['DART'] = dart_id return pr.statements def process_reader_outputs(fnames: List[str], reader: str, dart_ids: Mapping[str, str] = None, extract_filter: List[str] = None, grounding_mode: str = 'compositional', nproc: int = 8, output_pkl: str = None) -> List[Statement]: """Process a set of reader outputs in parallel. Parameters ---------- fnames : The list of file paths to the reader outputs to be processed. reader : The name of the reader which produced the outputs. dart_ids : A dict which maps each fname in the fnames list to a DART document ID. These are then set in the evidences of statements exxtracted from the output. extract_filter : What types of statements to extract. grounding_mode : The type of grounding mode to use for processing. nproc : The number of workers to use for parallelization. output_pkl : The path to an output pickle file in which to dump the statements extracted from the outputs. Returns ------- : The list of statements extracted from the outputs. """ if extract_filter is None: extract_filter = ['influence'] pool = Pool(nproc) chunk_size = 10 process_fun = functools.partial(_reader_wrapper, reader=reader, dart_ids=dart_ids, extract_filter=extract_filter, grounding_mode=grounding_mode) stmts = [] for res in tqdm.tqdm(pool.imap_unordered(process_fun, fnames, chunksize=chunk_size), total=len(fnames)): stmts += res logger.debug('Closing pool...') pool.close() logger.debug('Joining pool...') pool.join() logger.info('Pool closed and joined.') if output_pkl: logger.info(f'Writing into {output_pkl}') with open(output_pkl, 'wb') as fh: pickle.dump(stmts, fh) return stmts
2.265625
2
megawidget/confirmation/__init__.py
pyrustic/megawidget
0
12799701
<reponame>pyrustic/megawidget<gh_stars>0 import tkinter as tk import tkutil from viewable import Viewable, CustomView from tkutil import merge_megaconfig # parts BODY = "body" LABEL_HEADER = "label_header" LABEL_MESSAGE = "label_message" FRAME_FOOTER = "frame_footer" BUTTON_CANCEL = "button_cancel" BUTTON_CONFIRM = "button_confirm" class Confirmation(tk.Toplevel): """ Confirmation is a dialog box to ask the user to confirm an action. Example: import tkinter as tk from megawidget import Confirmation def my_handler(result): print(result) root = tk.Tk() confirmation = Confirmation(root, title="Confirmation", header="Confirmation", message="Do you really want to continue ?", handler=my_handler) confirmation.build() root.mainloop() """ def __init__(self, master=None, title=None, header=None, message=None, on_close=None, geometry=None, megaconfig=None): """ PARAMETERS: - master: widget parent. Example: an instance of tk.Frame - title: title of dialog box - header: the text to show as header - message: the text to show as message - handler: a callback to be executed immediately after closing the dialog box. This callback should accept a boolean positional argument. True means Ok, confirmed. - geometry: str, as the dialog box is a toplevel (BODY), you can edit its geometry. Example: "500x300" - options: dictionary of widgets options The widgets keys are: BODY, LABEL_HEADER, LABEL_MESSAGE, FRAME_FOOTER, BUTTON_CANCEL, BUTTON_CONFIRM. Example: Assume that you want to set the LABEL_MESSAGE's background to black and the BODY's background to red: options = { BODY: {"background": "red"}, LABEL_MESSAGE: {"background": "black"} } """ self.__megaconfig = merge_megaconfig(secondary=megaconfig) super().__init__(master=master, class_="Confirmation", cnf=self.__megaconfig.get(BODY)) self.__title = title self.__header = header self.__message = message self.__on_close = on_close self.__geometry = geometry self.__parts = {} self.__ok = False # build self.__setup() # ==================================== # PROPERTIES # ==================================== @property def header(self): return self.__header @property def message(self): return self.__message @property def on_close(self): return self.__on_close @property def ok(self): """ Returns True if user confirmed, else get False """ return self.__ok @property def parts(self): """ Get the parts (widgets instances) used to build this dialog. This property returns a dict. The keys are: BODY, LABEL_HEADER, LABEL_MESSAGE, FRAME_FOOTER, BUTTON_CANCEL, BUTTON_CONFIRM Warning: check the presence of key before usage """ return self.__parts # ==================================== # INTERNAL # ==================================== def __setup(self): custom_view = CustomView(body=self, builder=self.__build, on_map=self.__on_map, on_destroy=self.__on_destroy) return custom_view.build() def __build(self): self.title(self.__title) self.resizable(0, 0) # # if self.__geometry: self.geometry(self.__geometry) # if self.__header: label_header = tk.Label(self, text=self.__header, anchor="w", justify=tk.LEFT, name=LABEL_HEADER, cnf=self.__megaconfig.get(LABEL_HEADER)) self.__parts[LABEL_HEADER] = label_header label_header.pack(fill=tk.X, expand=1, anchor="w", pady=5, padx=5) # if self.__message: label_message = tk.Label(self, name=LABEL_MESSAGE, text=self.__message, anchor="w", justify=tk.LEFT, cnf=self.__megaconfig.get(LABEL_MESSAGE)) self.__parts[LABEL_MESSAGE] = label_message label_message.pack(fill=tk.BOTH, expand=1, padx=5, pady=(5, 10)) # frame_footer = tk.Frame(self, cnf=self.__megaconfig.get(FRAME_FOOTER)) self.__parts[FRAME_FOOTER] = frame_footer frame_footer.pack(anchor="e", pady=(0, 2), padx=2) # button_confirm = tk.Button(frame_footer, text="Confirmation", name=BUTTON_CONFIRM, command=self.__on_click_confirm, cnf=self.__megaconfig.get(BUTTON_CONFIRM)) self.__parts[BUTTON_CONFIRM] = button_confirm button_confirm.pack(side=tk.RIGHT) # button_cancel = tk.Button(frame_footer, text="Cancel", name=BUTTON_CANCEL, command=self.__on_click_cancel, cnf=self.__megaconfig.get(BUTTON_CANCEL)) self.__parts[BUTTON_CANCEL] = button_cancel button_cancel.pack(side=tk.RIGHT, padx=(0, 2)) def __on_map(self): tkutil.center_dialog_effect(self, within=self.master.winfo_toplevel()) def __on_destroy(self): if self.__on_close: self.__on_close(self.__ok) def __on_click_cancel(self): self.__ok = False self.destroy() def __on_click_confirm(self): self.__ok = True self.destroy() class Error(Exception): def __init__(self, *args, **kwargs): self.message = args[0] if args else "" super().__init__(self.message) def __str__(self): return self.message class _ConfirmTest(Viewable): def __init__(self, root): super().__init__() self._root = root self._body = None def _build(self): self._body = tk.Frame(self._root) btn_launch = tk.Button(self._body, text="Launch", command=self._on_click_launch) btn_launch.pack() def _on_click_launch(self): confirmation = Confirmation(self._body, title="Confirmation", header="Confirmation", message="Do you really want to continue ?\nPress ok to continue\nOr die !") confirmation.wait_window() print("Confirmation:", confirmation.ok) if __name__ == "__main__": root = tk.Tk() root.geometry("500x300+0+0") confirm_test = _ConfirmTest(root) confirm_test.build_pack() root.mainloop()
3.234375
3
utils/inter_area.py
edosedgar/mtcnnattack
68
12799702
<gh_stars>10-100 from __future__ import division, print_function, absolute_import import tensorflow as tf import numpy as np def inter_area_batch(im_inp,h,w,hs,ws): # Do INTER_AREA resize here # h, w - input size # hs, ws - scaled size whole = im_inp return tf.clip_by_value(whole,0.,1.) def resize_area_batch(imgs, hs, ws): _, h, w, _ = imgs.shape with tf.variable_scope("resize_area"): out = inter_area_batch(imgs, int(h), int(w), hs, ws) return out
2.1875
2
output/models/ms_data/regex/re_c43_xsd/__init__.py
tefra/xsdata-w3c-tests
1
12799703
<filename>output/models/ms_data/regex/re_c43_xsd/__init__.py from output.models.ms_data.regex.re_c43_xsd.re_c43 import ( Regex, Doc, ) __all__ = [ "Regex", "Doc", ]
1.195313
1
monet/visualize/force.py
flo-compbio/monet
39
12799704
<gh_stars>10-100 # Author: <NAME> <<EMAIL>> # Copyright (c) 2021 <NAME> # # This file is part of Monet. from typing import Tuple import pandas as pd import scanpy.tl as tl import scanpy.pp as pp import plotly.graph_objs as go from ..core import ExpMatrix from ..latent import PCAModel from .cells import plot_cells def force_plot( matrix: ExpMatrix, num_components: int = 50, transform_name: str = 'freeman-tukey', pca_model: PCAModel = None, **kwargs) -> Tuple[go.Figure, pd.DataFrame]: if pca_model is None: pca_model = PCAModel(num_components=num_components, transform_name=transform_name) pc_scores = pca_model.fit_transform(matrix) else: pc_scores = pca_model.transform(matrix) adata = ExpMatrix(pc_scores.T).to_anndata() adata.obsm['pc_scores'] = pc_scores.values # determine nearest-neighbors pp.neighbors(adata, use_rep='pc_scores') tl.draw_graph(adata) Y = adata.obsm['X_draw_graph_fa'] scores = pd.DataFrame( index=adata.obs_names, columns=['Dim. 1', 'Dim. 2'], data=Y) fig = plot_cells(scores, **kwargs) return fig, scores
2.375
2
add_exp_to_ref.py
C2SM/clim-sanity-checker
0
12799705
# standard modules import os import shutil import argparse # aliased standard modules import pandas as pd # modules of sanity checker import lib.paths as paths import lib.utils as utils import lib.logger_config as logger_config # standalone imports from lib.logger_config import log from lib.test_config import get_config_of_current_test from lib.color import Style ''' Module providing the functionality to add an experiment to the reference pool. It contains: - add_line_descr_f: Add a new line to the experiment description file with all information about an experiment - main: asks user for additional information about experiment, commits data of new experiment to git-repository Help: python add_exp_tp_ref.py --help C.Siegenthaler 07.2020 (C2SM) J.Jucker 01.2021 (C2SM) ''' def add_line_descr_f(exp,f_exp_descr): ''' Add line for exp exp in file f_exp_descr :param exp: new expirement name :param f_exp_descr: file in which the new line has to be added return: None ''' log.info('Adding line {} in the file {}:'.format(exp,f_exp_descr)) # open file in dataframe if not os.path.isfile(f_exp_descr): # create dataframe cols_exp_descr_f = ['Experiment name', 'Platform', 'OS', 'Compiler (with version)', 'Optimisation level (-OX)', '-fast-transcendentals (y/n)', '-no-prec-sqrt (y/n)', '-no-prec-div (y/n)', 'welch (y/n)', 'fldcor (y/n)', 'rmse (y/n)', 'emi (y/n)', 'Date of experiment (month yyyy)'] df_exp_descr = pd.DataFrame(columns=cols_exp_descr_f) else: df_exp_descr = pd.read_csv(f_exp_descr, sep=';') # collect information from user log.banner('Please give the following informations ' 'about your experiment') dict_line = {'Experiment name': exp} for col_name in df_exp_descr.keys(): if col_name != 'Experiment name': # ask the user for info dict_line[col_name] = input('{} : '.format(col_name)) # amend the information if needed while True: # new dataframe containing new line for exp df_exp_descr_new = df_exp_descr.append(dict_line, ignore_index=True) log.banner('Here is the content of the description ' 'file including your new experiment.') log.info(df_exp_descr_new) answ_chg = input('Is the new file right ? (y/n/abort).\n' 'If you type n, you will be able to change ' 'column values\n' 'If you type abort, the process of adding ' 'the experiment {} to the reference is stoped.\n' '(y/n/abort) : ' ''.format(exp)) if answ_chg.upper() == 'Y': # save new file df_exp_descr_new.to_csv(f_exp_descr,sep=';',index=False) # get out of the loop return False elif answ_chg.upper() == 'N': answ_col = input('Which column field you want to change ?') if answ_col in df_exp_descr.keys(): dict_line[answ_col] = input('{} : '.format(answ_col)) else: log.warning('{} not in columns!'.format(answ_col)) log.info('Columns are {}\n'.format(list(df_exp_descr.columns))) elif answ_chg.upper() == 'ABORT': exit() return() def main(exp, tests, p_stages=paths.p_stages, p_ref_csv_files=paths.p_ref_csv_files, ltestsuite=False, lverbose=False): # initialisation new_branch_name = 'test_add_{}'.format(exp) files_to_commit = [] # fill up file 'Exps_description.csv' with additional # information via user input f_exp_descr = os.path.join(p_ref_csv_files,'Exps_description.csv') if not ltestsuite: add_line_descr_f(exp=exp,f_exp_descr=f_exp_descr) files_to_commit.append(f_exp_descr) for test in tests: test_cfg = get_config_of_current_test(test) csv_file = utils.clean_path(p_stages, 'test_postproc_{}_{}.csv' .format(test,exp)) # what is the filename in the reference pool filename_in_ref_dir = '{}_{}.csv'.format(test_cfg.ref_name,exp) # what is the location to store that file place_for_reference = os.path.join(p_ref_csv_files, test, filename_in_ref_dir) log.debug('Copy {} to {}'.format(csv_file,place_for_reference)) if not ltestsuite: shutil.copy(csv_file,place_for_reference) files_to_commit.append(place_for_reference) # copy pdf with bar-plots from Welch's-test if test == 'welch': pdf_file = utils.clean_path(p_stages, '{}_{}.pdf'.format(test_cfg.ref_name, exp)) # what is the name of the pdf in the reference pool filename_in_ref_dir = '{}_plots.pdf'.format(test_cfg.ref_name) # what is the location to store that file place_for_reference = os.path.join(p_ref_csv_files, test, filename_in_ref_dir) log.debug('Copy {} to {}'.format(csv_file,place_for_reference)) files_to_commit.append(place_for_reference) if not ltestsuite: shutil.copy(pdf_file,place_for_reference) # root is important to not fail during git commands os.chdir(paths.rootdir) # checkout new branch if not ltestsuite: log.info('Create and checkout new branch {}'.format(new_branch_name)) git_cmd = 'git checkout -B {}'.format(new_branch_name) utils.shell_cmd(git_cmd,py_routine='add_exp_to_ref.py') # commit all modified files prior in the function to git for file in files_to_commit: git_cmd = 'git add {}'.format(file) log.debug(git_cmd) utils.shell_cmd(git_cmd, py_routine=__name__) log.debug('Commit files {}'.format(files_to_commit)) commit_message = input('Please type your commit message :') git_cmd = 'git commit -m "{}"'.format(commit_message) utils.shell_cmd(git_cmd, py_routine=__name__) # Finish log.info(Style.GREEN('Files are added in the new branch: ' '{} in your local git repository.' .format(new_branch_name))) log.info('To add the file to the official repository, ' 'please perform the following steps:') log.info('1. Push the new branch into the official repo:') log.info(' git push --set-upstream origin {}'.format(new_branch_name)) log.info('2. On the Open Web interface (GitHub) , open a Pull Request.') log.banner('End add_exp_to_ref for experiment {}'.format(exp)) return() if __name__ == '__main__': # parsing arguments parser = argparse.ArgumentParser( formatter_class=argparse.ArgumentDefaultsHelpFormatter) parser.add_argument('--exp','-e', dest='exp', required=True, help='exp to add') parser.add_argument('--p_stages', dest='p_stages', default=paths.p_stages, help='relative or absolute path of the csv \ files of the testresults') parser.add_argument('--p_ref_csv_files', dest='p_ref_csv_files', default=paths.p_ref_csv_files, help='path to the pool of csv files, \ one per reference experiment') parser.add_argument('--tests','-t', dest='tests', default=['welch','fldcor','rmse','emi'], nargs='+', help='Tests to add to reference pool') parser.add_argument('--verbose','-v', dest='lverbose', action='store_true', help='Debug output') parser.add_argument('--testsuite','-ts', dest='ltestsuite', action='store_true', help='Run of testsuite') args = parser.parse_args() # init logger logger_config.init_logger(args.lverbose,__file__) log.banner('Start execute {} as main()'.format(__file__)) # make all paths from user to absolute paths args.p_stages = utils.abs_path(args.p_stages) args.p_ref_csv_files = utils.abs_path(args.p_ref_csv_files) main(exp=args.exp, tests=args.tests, p_stages=args.p_stages, p_ref_csv_files=args.p_ref_csv_files, ltestsuite=args.ltestsuite, lverbose=args.lverbose) log.banner('End execute {} as main()'.format(__file__))
2.25
2
ml_rest_api/api/health/liveness.py
jgbustos/ml-rest-api
15
12799706
<gh_stars>10-100 """This module implements the HealthLiveness class.""" from flask_restx import Resource from ml_rest_api.api.restx import api, FlaskApiReturnType @api.default_namespace.route("/liveness") class HealthLiveness(Resource): """Implements the /liveness GET method.""" @staticmethod @api.doc( responses={ 200: "Success", } ) def get() -> FlaskApiReturnType: """ Returns liveness status. """ return {"Alive": True}, 200
2.25
2
tools/Vitis-AI-Quantizer/vai_q_tensorflow2.x/tensorflow_model_optimization/python/core/quantization/keras/vitis/common/vitis_custom_wrapper.py
hito0512/Vitis-AI
1
12799707
<reponame>hito0512/Vitis-AI # Copyright 2019 Xilinx Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Wrapper which is custom layer over underlying layer. `CustomOpWrapper` is responsible for modifying the construction of the underlying layer to ensure proper attributes are placed in the graph. """ from __future__ import absolute_import from __future__ import division from __future__ import print_function import tensorflow as tf import copy from tensorflow.python.util import tf_inspect from tensorflow_model_optimization.python.core.quantization.keras.vitis.utils import common_utils register_keras_serializable = tf.keras.utils.register_keras_serializable deserialize_keras_object = tf.keras.utils.deserialize_keras_object serialize_keras_object = tf.keras.utils.serialize_keras_object logger = common_utils.VAILogger @register_keras_serializable(package='Vitis', name='CustomOpWrapper') class CustomOpWrapper(tf.keras.layers.Wrapper): """Mark this layer as a custom layer and set some attributes""" def __init__(self, layer, **kwargs): """Create a custom layer wrapper for a keras layer. Args: layer: The keras layer to be quantized. **kwargs: Additional keyword arguments to be passed to the keras layer. """ if layer is None: logger.error('`layer` cannot be None.') # Check against keras.Model since it is an instance of keras.layers.Layer. if not isinstance(layer, tf.keras.layers.Layer) or isinstance( layer, tf.keras.Model): logger.error('`layer` can only be a `tf.keras.layers.Layer` instance. ' 'You passed an instance of type: {input}.'.format( input=layer.__class__.__name__)) if 'name' not in kwargs: kwargs['name'] = layer.name super(CustomOpWrapper, self).__init__(layer, **kwargs) self._track_trackable(layer, name='layer') def build(self, input_shape): super(CustomOpWrapper, self).build(input_shape) def compute_output_shape(self, input_shape): return self.layer.compute_output_shape(self.layer.input_shape) def call(self, inputs, training=None): args = tf_inspect.getfullargspec(self.layer.call).args if 'training' in args: outputs = self.layer.call(inputs, training=training) else: outputs = self.layer.call(inputs) return outputs def get_config(self): base_config = super(CustomOpWrapper, self).get_config() config = {} return dict(list(base_config.items()) + list(config.items())) @classmethod def from_config(cls, config): config = config.copy() layer = tf.keras.layers.deserialize(config.pop('layer')) return cls(layer=layer, **config) @property def trainable_weights(self): return self.layer.trainable_weights @property def non_trainable_weights(self): return self.layer.non_trainable_weights @property def updates(self): return self.layer.updates + self._updates @property def losses(self): return self.layer.losses + self._losses _types_dict = {"CustomOpWrapper", CustomOpWrapper}
1.984375
2
crosswalk_client/methods/domain/update_domain.py
The-Politico/django-crosswalk-client
3
12799708
from urllib.parse import urljoin import requests from crosswalk_client.exceptions import BadResponse from crosswalk_client.objects.domain import DomainObject from crosswalk_client.validators.domain import validate_required_domain_arg class UpdateDomain(object): @validate_required_domain_arg def update_domain(self, domain, update_attrs): response = requests.patch( urljoin(self.service_address, f"domains/{domain}/"), headers=self.headers, json=update_attrs, ) if response.status_code != 200: raise BadResponse( "The service responded with a {}: {}".format( response.status_code, response.content ) ) return DomainObject(response.json(), client=self)
2.75
3
openprocurement/auctions/core/plugins/awarding/v3/tests/migration.py
EBRD-ProzorroSale/openprocurement.auctions.core
2
12799709
<filename>openprocurement/auctions/core/plugins/awarding/v3/tests/migration.py from zope import deprecation deprecation.moved('openprocurement.auctions.core.tests.plugins.awarding.v3.tests.migration', 'version update')
0.964844
1
pybetter/item.py
cjwcommuny/pybetter
0
12799710
<filename>pybetter/item.py def item(x): assert len(x) == 1 return x[0]
2.078125
2
mozdns/sshfp/views.py
jlin/inventory
22
12799711
# Create your views here. from mozdns.views import MozdnsDeleteView from mozdns.views import MozdnsCreateView from mozdns.views import MozdnsDetailView from mozdns.views import MozdnsUpdateView from mozdns.views import MozdnsListView from mozdns.sshfp.models import SSHFP from mozdns.sshfp.forms import SSHFPForm class SSHFPView(object): model = SSHFP form_class = SSHFPForm queryset = SSHFP.objects.all() class SSHFPDeleteView(SSHFPView, MozdnsDeleteView): """ """ class SSHFPDetailView(SSHFPView, MozdnsDetailView): """ """ template_name = 'sshfp/sshfp_detail.html' class SSHFPCreateView(SSHFPView, MozdnsCreateView): """ """ class SSHFPUpdateView(SSHFPView, MozdnsUpdateView): """ """ class SSHFPListView(SSHFPView, MozdnsListView): """ """
2.09375
2
pyppeteer_stealth/__init__.py
ramiezer2/pyppeteer_stealth
1
12799712
<reponame>ramiezer2/pyppeteer_stealth from pyppeteer.page import Page from .chrome_app import chrome_app from .chrome_runtime import chrome_runtime from .iframe_content_window import iframe_content_window from .media_codecs import media_codecs from .sourceurl import sourceurl from .navigator_hardware_concurrency import navigator_hardware_concurrency from .navigator_languages import navigator_languages from .navigator_permissions import navigator_permissions from .navigator_plugins import navigator_plugins from .navigator_vendor import navigator_vendor from .navigator_webdriver import navigator_webdriver from .user_agent_override import user_agent_override from .utils import with_utils from .webgl_vendor import webgl_vendor from .window_outerdimensions import window_outerdimensions async def stealth(page: Page, **kwargs) -> None: if not isinstance(page, Page): raise ValueError("page must be pyppeteer.page.Page") await with_utils(page, **kwargs) await chrome_app(page, **kwargs) await chrome_runtime(page, **kwargs) await iframe_content_window(page, **kwargs) await media_codecs(page, **kwargs) await sourceurl(page, **kwargs) await navigator_hardware_concurrency(page, **kwargs) await navigator_languages(page, **kwargs) await navigator_permissions(page, **kwargs) await navigator_plugins(page, **kwargs) await navigator_vendor(page, **kwargs) await navigator_webdriver(page, **kwargs) await user_agent_override(page, **kwargs) await webgl_vendor(page, **kwargs) await window_outerdimensions(page, **kwargs)
2.125
2
1-99/10-19/10.py
dcragusa/LeetCode
0
12799713
<filename>1-99/10-19/10.py """ Given an input string (s) and a pattern (p), implement regular expression matching with support for '.' and '*'. '.' Matches any single character. '*' Matches zero or more of the preceding element. The matching should cover the entire input string (not partial). Note: s could be empty and contains only lowercase letters a-z. p could be empty and contains only lowercase letters a-z, and characters like . or *. Example 1: Input: s = "aa", p = "a", Output: false Explanation: "a" does not match the entire string "aa". Example 2: Input: s = "aa", p = "a*", Output: true Explanation: '*' means zero or more of the preceding element, 'a'. Therefore, by repeating 'a' once, it becomes "aa". Example 3: Input: s = "ab", p = ".*", Output: true Explanation: ".*" means "zero or more (*) of any character (.)". Example 4: Input: s = "aab", p = "c*a*b", Output: true Explanation: c can be repeated 0 times, a can be repeated 1 time. Therefore, it matches "aab". Example 5: Input: s = "mississippi", p = "mis*is*p*.", Output: false """ """ We go backwards through the string and pattern. If there is a character match and there is no *, advance through the string and pattern by one. If there is a *, examine whether combinations of consuming more characters from the string (repeating characters) or more characters from the pattern (the * matching 0 characters). If we get to the end of the string and there is still a pattern remaining, discard any * sets. The regex is a match if at the end the string and pattern are equal. """ import functools @functools.lru_cache() def is_match(s, p): if s == p: return True elif s and not p: return False elif not s: return is_match(s, p[:-2]) if len(p) >= 2 and p[-1] == '*' else False if s[-1] == p[-1] or p[-1] == '.': # simple match return is_match(s[:-1], p[:-1]) elif p[-1] == '*': if p[-2] != s[-1] and p[-2] != '.': # no match, but * after return is_match(s, p[:-2]) else: # match, with * after for idx in range(1, len(s)+1): # examine if consuming more string characters leads to a match if s[-idx] != s[-1] and p[-2] != '.': break if is_match(s[:-idx], p): return True # examine if skipping the pattern leads to a match return is_match(s, p[:-2]) else: # no match, no * after return False assert is_match('a', 'ab*') is True assert is_match('abc', 'abc') is True assert is_match('aa', 'a') is False assert is_match('aab', 'c*aab') is True assert is_match('aa', 'a*') is True assert is_match('ab', '.*') is True assert is_match('ab', '.*c') is False assert is_match('aab', 'c*a*b') is True assert is_match('mississippi', 'mis*is*p*.') is False assert is_match('a', 'a*a') is True assert is_match('bbbba', '.*a*a') is True assert is_match('a', '.*..a*') is False assert is_match('aaaaaaaaaaaaab', 'a*a*a*a*a*a*a*a*a*a*c') is False assert is_match('aa', 'ab*a*') is True
4.375
4
skyutils/aircraft.py
jacob-zeiger/skyspy
12
12799714
<reponame>jacob-zeiger/skyspy from datetime import datetime, timedelta from shapely.geometry import Point class Aircraft: def __init__(self, line): ts = datetime.now() self.hexcode = line[0:6] self.flight = line[7:14].replace(" ", "") self.description = f"{self.flight} (Details N/A)" self.altitude = line[15:21].replace(" ", "") self.speed = line[26:29] lat = line[34:41].replace(" ", "") lon = line[44:52].replace(" ", "") try: lat = float(lat) lon = float(lon) except ValueError: lat = 0 lon = 0 self.coordinates = Point(lat, lon) self.track = line[54:57] self.msg = line[60:63] self.last = line[69:73] self.in_geofence = False self.origin = "N/A" self.origin_name = "N/A" self.destination = "N/A" self.destination_name = "N/A" self.type = "N/A" self.dist_to_home = 1001 self.ts = ts
2.765625
3
setup.py
espg/mortie
0
12799715
# A minimal setup.py file to make a Python project installable. import setuptools import yaml with open("README.md", "r") as fh: long_description = fh.read() with open("environment.yml", "r") as fh: env = yaml.safe_load(fh) requirements = [a.split('=', 1)[0].strip() for a in env['dependencies'] ] setuptools.setup( name = "mortie", version = "0.1.0", author = "<NAME>", author_email = "<EMAIL>", description = "Morton numbering for healpix grids", long_description = long_description, long_description_content_type = "text/markdown", packages = setuptools.find_packages(), classifiers = [ "Programming Language :: Python :: 3", "License :: OSI Approved :: BSD License", "Operating System :: OS Independent", ], python_requires = '>= 3.5', install_requires = requirements, )
1.734375
2
tests/test_main_return_ptr.py
jiamo/pcc
4
12799716
import os import sys import ctypes this_dir = os.path.dirname(__file__) parent_dir = os.path.dirname(this_dir) sys.path.insert(0, parent_dir) from pcc.evaluater.c_evaluator import CEvaluator import unittest class TestMainReturnPtr(unittest.TestCase): def test_simple(self): pcc = CEvaluator() ret = pcc.evaluate(''' int a = 50; int b = 4; int* swap(int *x, int *y){ int tmp; tmp = *x; *x = *y; *y = tmp; return x; } int* main(){ swap(&a, &b); return &a ; } ''', llvmdump=True) # ret_value = ret.contents print("The answer is {} ret type is {} content ".format(ret, type(ret))) # so the global var assert ret.contents.value == 4 if __name__ == "__main__": unittest.main()
2.8125
3
src/gilbert/plugins/markdown.py
funkybob/gilbert
11
12799717
from pathlib import Path from markdown import markdown from gilbert import Site from gilbert.content import Page from gilbert.types import LoaderResult from gilbert.utils import oneshot class MarkdownPage(Page): """ Page type that renders its content as Markdown. Extensions can be configured in ``config.yml`` via: content_type:MarkdownPage or using the ``extras`` attribute. """ # List of Markdown extensions to enable. extras: list = [] @oneshot def content(self) -> str: extras = self.extras if not extras: extras = self.site.config.get("content_type", {}).get("MarkdownPage", []) return markdown(self.data, output_format="html5", extensions=extras) def load_md(path: Path) -> LoaderResult: data = path.read_text(encoding="utf-8") return data, {"content_type": "MarkdownPage"} Site.register_loader("md", load_md)
2.578125
3
lib/exutils.py
miyagaw61/old_exgdb
27
12799718
import os import sys EXGDBFILE = os.path.abspath(os.path.expanduser(__file__)) sys.path.insert(0, os.path.dirname(EXGDBFILE) + "/lib/") import utils from enert import * def clearscreen(): """ Customized clearscreen from https://github.com/longld/peda """ print("\x1b[2J\x1b[H") utils.clearscreen = clearscreen
1.914063
2
classes/player.py
mcappleman/scoresheet-selenium
0
12799719
class Player(): def __init__(self, player): self.name = player['espn_name'] self.position = player['espn_pos'] self.mlb_team = player['mlb_team'] self.throws = player['throws'] self.bats = player['bats'] self.espn_id = str(int(player['espn_id'])) self.bref_id = player['bref_id'] self.fg_id = '' self.mine = False self.batter = self.position != 'RP' and self.position != 'SP' self.stats = {} self.team = player['team_id'] def to_dict(self): mine = '' if self.mine: mine = 'x' player_dict = { 'name': self.name, 'position': self.position, 'mlb_team': self.mlb_team, 'throws': self.throws, 'bats': self.bats, 'espn_id': self.espn_id, 'bref_id': self.bref_id, 'fg_id': self.fg_id, 'mine': mine, 'team_number': self.team } for key, value in self.stats.items(): player_dict[key] = value return player_dict def get_stats(self, row, key_start, site): key = key_start + "_" + site if site == 'ESPN': self.get_espn_stats(row, key) elif site == 'BR': self.get_br_stats(row, key) def get_br_stats(self, row, key_start): try: self.stats[key_start + '_ba'] = row.find_element_by_xpath('.//td[@data-stat="batting_avg"]').text self.stats[key_start + '_obp'] = row.find_element_by_xpath('.//td[@data-stat="onbase_perc"]').text self.stats[key_start + '_slg'] = row.find_element_by_xpath('.//td[@data-stat="slugging_perc"]').text self.stats[key_start + '_ops'] = row.find_element_by_xpath('.//td[@data-stat="onbase_plus_slugging"]').text except Exception: self.stats[key_start + '_era'] = row.find_element_by_xpath('.//td[@data-stat="earned_run_avg"]').text self.stats[key_start + '_ip'] = row.find_element_by_xpath('.//td[@data-stat="IP"]').text self.stats[key_start + '_so'] = row.find_element_by_xpath('.//td[@data-stat="SO"]').text self.stats[key_start + '_bb'] = row.find_element_by_xpath('.//td[@data-stat="BB"]').text def get_espn_stats(self, row, key_start): if self.batter: self.get_espn_batter_stats(row, key_start) else: self.get_espn_pitcher_stats(row, key_start) def get_espn_batter_stats(self, row, key_start): self.stats[key_start + '_ba'] = row.find_element_by_xpath('.//td[14]').text self.stats[key_start + '_obp'] = row.find_element_by_xpath('.//td[15]').text self.stats[key_start + '_slg'] = row.find_element_by_xpath('.//td[16]').text self.stats[key_start + '_ops'] = row.find_element_by_xpath('.//td[17]').text def get_espn_pitcher_stats(self, row, key_start): self.stats[key_start + '_era'] = row.find_element_by_xpath('.//td[2]').text self.stats[key_start + '_ip'] = row.find_element_by_xpath('.//td[10]').text self.stats[key_start + '_so'] = row.find_element_by_xpath('.//td[16]').text self.stats[key_start + '_bb'] = row.find_element_by_xpath('.//td[15]').text
3.0625
3
clickatell/__init__.py
Kapooral/clickatell-python
9
12799720
import httplib2 import urllib import json import re import sys class Transport: """ Abstract representation of a transport class. Defines the supported API methods """ endpoint = "platform.clickatell.com" def __init__(self): """ Construct a new transportation instance. :param boolean secure: Should we try and use a secure connection """ pass def merge(self, *args): """ Merge multiple dictionary objects into one. :param variadic args: Multiple dictionary items :return dict """ values = [] for entry in args: values = values + list(entry.items()) return dict(values) def parseResponse(self, response): """ Parse the response from json. Remapping error code and messages to be a level higher """ response['body'] = json.loads(response['body']) response['messages'] = response['body']['messages'] response['error'] = response['body']['error'] del response['body'] return response def request(self, action, data={}, headers={}, method='GET'): """ Run the HTTP request against the Clickatell API :param str action: The API action :param dict data: The request parameters :param dict headers: The request headers (if any) :param str method: The HTTP method :return: The request response """ http = httplib2.Http() body = urllib.urlencode(data) if (sys.version_info[0] < 3) else urllib.parse.urlencode(data) url = 'https://' + self.endpoint + '/' + action url = (url + '?' + body) if (method == 'GET') else url resp, content = http.request(url, method, headers=headers, body=json.dumps(data)) return self.merge(resp, {'body': content}) def sendMessage(self, to, message, extra={}): """ Send a message. :param list to: The number you want to send to (list of strings, or one string) :param string message: The message you want to send :param dict extra: Any extra parameters (see Clickatell documentation) :return dict :raises NotImplementedError """ raise NotImplementedError()
3.625
4
pkg/yc.py
rmc8/bibliography_alert
0
12799721
import os import yaml class YamlConfig: def __init__(self, file_path: str = "./settings/config.yml"): self.file_path = file_path def exists(self) -> bool: return os.path.exists(self.file_path) def load(self) -> dict: """ :return: Return yaml data as dictionary format """ with open(self.file_path, "r", encoding="utf-8") as yf: return yaml.load(yf, Loader=yaml.FullLoader) def write(self, data: dict) -> None: """ Export yaml :param data: A dictionary of data that will be output in Yaml format """ with open(self.file_path, "w", encoding="utf-8") as yf: yaml.dump(data, yf, default_flow_style=False)
3.078125
3
src/pr_Sentinel1LakeArea_Ver3_Export.py
mbonnema/SWAV
0
12799722
#!/usr/bin/env python3 # -*- coding: utf-8 -*- """ ******************************************************************************* Google Earth Engine Setninel-1 Lake Area o Purpose: Estimate surface are of lake from Sentinel-1 SAR date, using Google Earth Engine cloud computing platform o Inputs: * ROI: Google Earth Engine geometry object describing the region of interest o Outputs: * Results: List containing 4 elements (GEE objects): 1) List of lake surface areas from ascending passes 2) List of lake surface areas from descending passes 3) List of time steps ascoiated with ascending pass surface areas 4) List of time steps ascoiated with descending pass surface areas Written by: <NAME>, <EMAIL> Version 0.3 ******************************************************************************* """ import ee ee.Initialize() def GetS1ResTimeSeries(roi): ID = roi.get('ID') ROI = roi.geometry() ROI_Diff = ROI.difference(roi) Date_Start = ee.Date('2017-01-01'); Date_End = ee.Date('2020-01-01'); date_interval = ee.Number(1); #month angle_threshold_1 = ee.Number(45.4); angle_threshold_2 = ee.Number(31.66) AreaImg = ee.Image.pixelArea() #****Get WaterProb Threshold************************************************ waterProb = ee.Image('JRC/GSW1_1/GlobalSurfaceWater').select('occurrence') wProbThresh = ee.Number(ee.Image.constant(0).blend(waterProb).rename('occurrence').reduceRegion( reducer = ee.Reducer.max(), geometry = ROI, scale = 100, maxPixels = 6098838800, tileScale = 16 ).get('occurrence')) waterConfident = waterProb.gte(wProbThresh) landConfident = (ee.Image.constant(0).blend(waterProb)).Not().rename('occurrence') waterConfidentArea = ee.Number(waterConfident.multiply(AreaImg).reduceRegion( reducer = ee.Reducer.sum(), geometry = ROI, scale = 100, maxPixels = 6098838800, tileScale = 16, ).get('occurrence')) landConfidentArea = ee.Number(landConfident.multiply(AreaImg).reduceRegion( reducer = ee.Reducer.sum(), geometry = ROI, scale = 100, maxPixels = 6098838800, tileScale = 16, ).get('occurrence')) #****Create list of dates for time series******************************************** n_steps = Date_End.difference(Date_Start,'month').divide(date_interval).round(); dates = ee.List.sequence(0,n_steps,1); def make_datelist(n): return(Date_Start.advance(ee.Number(n).multiply(date_interval),'month')) dates = dates.map(make_datelist); #****Filter Edge Pixels************************************************************** def maskByAngle(img): I = ee.Image(img) angle = I.select('angle'); mask1 = angle.lt(angle_threshold_1); mask2 = angle.gt(angle_threshold_2); I = I.updateMask(mask1) return(I.updateMask(mask2)) #****Make S1 Image Collection******************************************************** def create_collection(d): start = ee.Date(d); end = ee.Date(d).advance(date_interval,'month'); date_range = ee.DateRange(start,end); S1 = ee.ImageCollection('COPERNICUS/S1_GRD') \ .filterDate(date_range) \ .filterBounds(ROI) \ .filter(ee.Filter.listContains('transmitterReceiverPolarisation', 'VV')) \ .filter(ee.Filter.eq('instrumentMode', 'IW')) S1 = ee.ImageCollection(ee.Algorithms.If( condition = S1.size().gt(0), trueCase = S1.map(maskByAngle), falseCase = S1 )) S1_median = ee.Image(S1.select('VV').mean()).clip(ROI) S1_median = S1_median.set('system:time_start',start.millis()) S1_median = S1_median.set('Number_of_images',S1.size()) return(S1_median) #****Calc ROI Area********************************************************************** def calcArea(img): I = ee.Image(img) area = I.select('VV').lt(99999999).multiply(AreaImg).reduceRegion( reducer = ee.Reducer.sum(), geometry = ROI, scale = 100, maxPixels = 6098838800, tileScale = 16, ).get('VV') return(I.set('ROI_area',area)) #****Apply Filter********************************************************************** def focal_median(img): I = ee.Image(img) #fm = I.select('VV').rename('VV_smooth') fm = I.select('VV').focal_median(50,'circle','meters').rename('VV_smooth') return(I.addBands(fm)) #****Make Water Mask**************************************************************** def MakeWaterMask(img): I = ee.Image(img) wThresh = ee.Number(I.get('wThresh')) waterProb = ee.Image('JRC/GSW1_1/GlobalSurfaceWater').select('occurrence') Mask = I.select('VV_smooth').updateMask(waterProb).lt(wThresh).rename('WaterMask') Sum = Mask.multiply(AreaImg).reduceRegion( reducer = ee.Reducer.sum(), geometry = ROI, scale = 100, maxPixels = 6098838800, tileScale = 16 ) I = I.set('water_pixels',Sum.get('WaterMask')) I = I.set('Water_Area',ee.Number(Sum.get('WaterMask'))) return I.addBands(Mask) #****Round time********************************************************************* def makeBackscatterStats(img): img = ee.Image(img) wMask = img.select('WaterMask') vv = img.select('VV_smooth') wPixelmean = vv.updateMask(wMask).reduceRegion( reducer = ee.Reducer.mean(), geometry = ROI, scale = 1000, maxPixels = 6098838800, tileScale = 16 ) wPixelStd = vv.updateMask(wMask).reduceRegion( reducer = ee.Reducer.stdDev(), geometry = ROI, scale = 1000, maxPixels = 6098838800, tileScale = 16 ) lPixelmean = vv.updateMask(wMask.Not()).reduceRegion( reducer = ee.Reducer.mean(), geometry = ROI, scale = 1000, maxPixels = 6098838800, tileScale = 16 ) lPixelStd = vv.updateMask(wMask.Not()).reduceRegion( reducer = ee.Reducer.stdDev(), geometry = ROI, scale = 1000, maxPixels = 6098838800, tileScale = 16 ) inPixelmean = vv.reduceRegion( reducer = ee.Reducer.mean(), geometry = roi, scale = 1000, maxPixels = 6098838800, tileScale = 16 ) inPixelStd = vv.reduceRegion( reducer = ee.Reducer.stdDev(), geometry = roi, scale = 1000, maxPixels = 6098838800, tileScale = 16 ) outPixelmean = vv.reduceRegion( reducer = ee.Reducer.mean(), geometry = ROI_Diff, scale = 1000, maxPixels = 6098838800, tileScale = 16 ) outPixelStd = vv.updateMask(wMask.Not()).reduceRegion( reducer = ee.Reducer.stdDev(), geometry = ROI_Diff, scale = 300, maxPixels = 6098838800, tileScale = 16 ) img = img.set('wPixelmean',wPixelmean.get('VV_smooth')) img = img.set('wPixelStd',wPixelStd.get('VV_smooth')) img = img.set('lPixelmean',lPixelmean.get('VV_smooth')) img = img.set('lPixelStd',lPixelStd.get('VV_smooth')) img = img.set('inPixelmean',inPixelmean.get('VV_smooth')) img = img.set('inPixelStd',inPixelStd.get('VV_smooth')) img = img.set('outPixelmean',outPixelmean.get('VV_smooth')) img = img.set('outPixelStd',outPixelStd.get('VV_smooth')) return img def makeBackScatterFromJRC(img): img = ee.Image(img) waterProb = ee.Image('JRC/GSW1_1/GlobalSurfaceWater').select('occurrence') waterConfident = waterProb.gte(wProbThresh) landConfident = (ee.Image.constant(0).blend(waterProb)).Not() vv = img.select('VV_smooth') wMean = vv.updateMask(waterConfident).reduceRegion( reducer = ee.Reducer.mean(), geometry = ROI, scale = 100, maxPixels = 6098838800, tileScale = 16 ) wStd = vv.updateMask(waterConfident).reduceRegion( reducer = ee.Reducer.stdDev(), geometry = ROI, scale = 100, maxPixels = 6098838800, tileScale = 16 ) lMean = vv.updateMask(landConfident).reduceRegion( reducer = ee.Reducer.mean(), geometry = ROI, scale = 100, maxPixels = 6098838800, tileScale = 16 ) lStd = vv.updateMask(landConfident).reduceRegion( reducer = ee.Reducer.stdDev(), geometry = ROI, scale = 100, maxPixels = 6098838800, tileScale = 16 ) img = img.set('wMean',wMean.get('VV_smooth')).set('lMean',lMean.get('VV_smooth')) img = img.set('wStd',wStd.get('VV_smooth')).set('lStd',lStd.get('VV_smooth')) return img #****Round time********************************************************************* def Roundtime(img): I = ee.Image(img) time = ee.Number(I.get('system:time_start')).round() return(I.set('system:time_start',time)) #****Caclulate Threshold************************************************************** def calcThresh(img): img = ee.Image(img) wMean = ee.Number(img.get('wMean')) wStd = ee.Number(img.get('wStd')) lMean = ee.Number(img.get('lMean')) lStd = ee.Number(img.get('lStd')) x = (lMean.subtract(wMean)).divide(wStd.add(lStd)) wThresh = wMean.add(wStd.multiply(x)) return img.set('wThresh',wThresh) #****Caclulate Errors************************************************************* def calcError(img): img = ee.Image(img) waterProb = ee.Image('JRC/GSW1_1/GlobalSurfaceWater').select('occurrence') waterConfident = waterProb.gte(wProbThresh) landConfident = (ee.Image.constant(0).blend(waterProb)).Not() vv = img.select('VV_smooth') thresh = ee.Number(img.get('wThresh')) wError = ee.Number(vv.gt(thresh).updateMask(waterConfident).rename('wError').multiply(AreaImg).reduceRegion( reducer = ee.Reducer.sum(), geometry = ROI, scale = 100, maxPixels = 6098838800, tileScale = 16 ).get('wError')) lError = ee.Number(vv.lt(thresh).updateMask(landConfident).rename('lError').multiply(AreaImg).reduceRegion( reducer = ee.Reducer.sum(), geometry = ROI, scale = 100, maxPixels = 6098838800, tileScale = 16 ).get('lError')) #wError = wError.divide(waterConfidentArea.subtract(wError)) #lError = lError.divide(landConfidentArea.subtract(lError)) return img.set('wError',wError).set('lError',lError) def calcError2(img): img = ee.Image(img) wError = ee.Number(img.get('wError')) lError = ee.Number(img.get('lError')) wError = wError.divide(waterConfidentArea.subtract(wError)) lError = lError.divide(landConfidentArea.subtract(lError)) return img.set('wError2',wError).set('lError2',lError) #****Run Functions****************************************************************** S1 = ee.ImageCollection(dates.map(create_collection,True)) S1 = S1.set('wProbThresh',wProbThresh) S1 = S1.filter(ee.Filter.gt('Number_of_images',0)) S1 = S1.map(calcArea) S1 = S1.filter(ee.Filter.gt('ROI_area',ee.Number(ROI.area().multiply(0.95)))) S1 = S1.map(focal_median) #S1 = S1.map(Roundtime) S1 = S1.map(makeBackScatterFromJRC) S1 = S1.filter(ee.Filter.gt('wMean',-9999)) S1 = S1.filter(ee.Filter.gt('lMean',-9999)) S1 = S1.filter(ee.Filter.gt('wStd',-9999)) S1 = S1.filter(ee.Filter.gt('lStd',-9999)) S1 = S1.map(calcThresh) S1 = S1.map(calcError) S1 = S1.filter(ee.Filter.lt('wError',9999999999999)) S1 = S1.filter(ee.Filter.lt('lError',9999999999999)) S1 = S1.map(calcError2) S1 = S1.filter(ee.Filter.lt('wError2',9999999999999)) S1 = S1.filter(ee.Filter.lt('lError2',9999999999999)) S1 = S1.map(MakeWaterMask) #S1 = S1.map(makeBackscatterStats) #****Extract Time Series*************************************************************** def extractTimeSeries(collection): WaterArea = ee.Array(collection.aggregate_array('Water_Area')).multiply(0.000001) #Conversion to km^2 time = ee.Array(collection.aggregate_array('system:time_start')) wMean = ee.Array(collection.aggregate_array('wMean')) wStd = ee.Array(collection.aggregate_array('wStd')) lMean = ee.Array(collection.aggregate_array('lMean')) lStd = ee.Array(collection.aggregate_array('lStd')) wProbThresh = collection.get('wProbThresh') ROIArea = ee.Array(collection.aggregate_array('ROI_area')).multiply(0.000001) WThresh = ee.Array(collection.aggregate_array('wThresh')) WError = WaterArea.multiply(ee.Array(collection.aggregate_array('wError2'))) LError = ee.Array(collection.aggregate_array('lError2')).multiply(ROIArea.subtract(WaterArea)) exportDict = ee.Dictionary({ 'Date': time, 'WaterArea': WaterArea, 'WThresh': WThresh, 'LakeID': ID, 'WError': WError, 'LError': LError }) exportTable = ee.Feature(None, exportDict) return exportTable Export = ee.Algorithms.If( condition = S1.size().gt(0), trueCase = ee.Feature(extractTimeSeries(S1)), falseCase = None ) return Export #return([WaterAreaA,WaterAreaD,timeA,timeD,wPixelmeanA, wPixelStdA, lPixelmeanA, lPixelStdA, wPixelmeanD, wPixelStdD, lPixelmeanD, lPixelStdD, inPixelmeanA, inPixelStdA, outPixelmeanA, outPixelStdA, inPixelmeanD, inPixelStdD, outPixelmeanD, outPixelStdD ])
2.921875
3
tests/test_error_messages.py
tberlok/psecas
10
12799723
<filename>tests/test_error_messages.py import numpy as np from psecas import Solver, System from psecas import ChebyshevExtremaGrid import pytest """ We set up systems with errors, and see if Psecas gives a reasonable error, i.e., a NameError. """ # Create grid N = 32 grid = ChebyshevExtremaGrid(N, -1, 1) # Create a solver object def test_parser_findmatrices(verbose=False): """ Here we add the value A to the equation without setting it in the system. This should return a NameError """ # Create system system = System(grid, variables='f', eigenvalue='sigma') # Add the first (and only) equation system.add_equation("sigma*z*f = dz(dz(f)) + 2*A*f") with pytest.raises(NameError) as e_info: solver = Solver(grid, system) if verbose: print(str(e_info.value)) def test_parser_boundaries(verbose=False): """ Here we add the value B to the boundary without setting it in the system. This should return a NameError """ # Create system system = System(grid, variables='f', eigenvalue='sigma') # Add the first (and only) equation system.add_equation("sigma*z*f = dz(dz(f))") system.add_boundary('f', 'Dirichlet', 'B*f = 0') solver = Solver(grid, system) with pytest.raises(NameError) as e_info: # The error is found when the solve method is called solver.solve() if verbose: print(str(e_info.value)) if __name__ == '__main__': test_parser_findmatrices(True) test_parser_boundaries(True)
2.796875
3
deepblink/cli/_check.py
BioinfoTongLI/deepBlink
13
12799724
<reponame>BioinfoTongLI/deepBlink """CLI submodule for checking image shapes.""" import logging import os import textwrap from ..io import load_image from ..util import predict_shape class HandleCheck: """Handle checking submodule for CLI. Args: arg_input: Path to image. logger: Verbose logger. """ def __init__(self, arg_input: str, logger: logging.Logger): self.raw_input = arg_input self.logger = logger self.logger.info("\U0001F537 starting checking submodule") self.abs_input = os.path.abspath(self.raw_input) def __call__(self) -> None: """Run check for input image.""" print( textwrap.dedent( f""" 1. Your image has a shape of: {self.image.shape} ---------- 2. Possible parameters \U000027A1 x, y: single 2D image used for one prediction \U000027A1 z: third (height) dimension \U000027A1 c: color channels \U000027A1 t: time dimension \U000027A1 3: RGB color stack ---------- 3. By default we would assign: "({predict_shape(self.image.shape)})" \U0001F449 If this is incorrect, please provide the proper shape using the --shape flag to the submodule predict in deepblink's command line interface """ ) ) @property def image(self): """Load a single image.""" return load_image(self.abs_input)
2.625
3
pnad/transformer/economy.py
fabiommendes/pnad
1
12799725
from .base import IncomeField, FunctionField, sum_na, Field class IncomeDataMixin: """ All income-related variables """ year: int # # Main job # income_work_main_money_fixed = IncomeField({ (1992, ...): 'V9532', (1981, 1990): 'V537', 1979: 'V2318', 1978: 'V2426', 1977: 'V75', 1976: 'V2308', }, descr='Fixed monthly salary') income_work_main_money_variable = IncomeField({ (1981, ...): None, # do not ask this in recent surveys 1979: 'V2338', 1978: 'V2446', 1977: 'V76', 1976: 'V2358', }, descr='Variable part of monthly salary') income_work_main_products = IncomeField({ (1992, ...): 'V9535', (1981, 1990): 'V538', 1979: 'V2339', 1978: 'V2447', 1977: 'V77', 1976: 'V2359', }, descr='Salary received in products') @FunctionField def income_work_main_money(self, df): """Sum of fixed + variable money income from main job""" return sum_na(df.income_work_main_money_variable, df.income_work_main_money_fixed) @FunctionField def income_work_main(self, df): """Total income from main work""" # Also computed in PNAD as V4718 (yr > 1992) return sum_na(df.income_work_main_money, df.income_work_main_products) # # Secondary job # income_work_secondary_money_fixed = IncomeField( {(1992, ...): 'V9982', (1980, 1990): None, 1979: 'V2427', (..., 1978): None, }, descr='Salary of secondary job (fixed part)') income_work_secondary_money_variable = IncomeField( {(1980, ...): None, 1979: 'V2457', (..., 1978): None, }, descr='Salary of secondary job (variable)') income_work_secondary_products = IncomeField( {(1992, ...): 'V9985', (1980, 1990): None, 1979: 'V2458', (..., 1978): None, }, descr='Salary of secondary job (products)') @FunctionField def income_work_secondary_money(self, df): """Total income from secondary job""" return sum_na(df.income_work_secondary_money_fixed, df.income_work_secondary_money_variable) @FunctionField def income_work_secondary(self, df): """Total income from secondary job""" return sum_na(df.income_work_secondary_money, df.income_work_secondary_products) # # Other jobs # income_work_extra_money_fixed = IncomeField({ (1992, ...): 'V1022', (1981, 1990): 'V549', 1979: 'V2319', 1978: 'V2428', 1977: 'V85', 1976: 'V2362', }, descr='') income_work_extra_money_variable = IncomeField({ (1992, ...): None, (1981, 1990): None, 1979: 'V2349', 1978: 'V2468', 1977: 'V86', 1976: None, }, descr='') income_work_extra_products = IncomeField({ (1992, ...): 'V1025', (1981, 1990): 'V550', 1979: 'V2350', 1978: 'V2469', 1977: 'V87', 1976: None, }, descr='') @FunctionField def income_work_extra_money(self, df): """Total income from jobs other than primary and secondary""" return sum_na(df.income_work_extra_money_fixed, df.income_work_extra_money_variable) @FunctionField def income_work_extra(self, df): """Total income from jobs other than primary and secondary""" return sum_na(df.income_work_extra_money, df.income_work_extra_products) @FunctionField def income_work_other_money(self, df): """Total income from jobs other than primary and secondary""" return sum_na(df.income_work_extra_money, df.income_work_secondary_money) @FunctionField def income_work_other_products(self, df): """Total income from jobs other than primary and secondary""" return sum_na(df.income_work_extra_products, df.income_work_secondary_products) @FunctionField def income_work_other(self, df): """Total income from jobs other than primary and secondary""" return sum_na(df.income_work_other_money, df.income_work_other_products) @FunctionField def income_work_money(self, df): """Total income from jobs other than primary and secondary""" return sum_na(df.income_work_extra_money, df.income_work_main_money) @FunctionField def income_work_products(self, df): """Total income from jobs other than primary and secondary""" return sum_na(df.income_work_extra_products, df.income_work_main_products) # # Social security # income_retirement_main = IncomeField({ (1992, ...): 'V1252', (1981, 1990): 'V578', 1979: 'V2350', 1978: 'V2479', 1977: 'V90', 1976: 'V2365', # actually, this is retirement + pension }, descr='') income_retirement_other = IncomeField({ (1992, ...): 'V1258', (..., 1990): None, }, descr='') income_pension_main = IncomeField({ (1992, ...): 'V1255', (1981, 1990): 'V579', 1979: None, 1978: 'V2480', 1977: 'V91', 1976: None, }, descr='') income_pension_other = IncomeField({ (1992, ...): 'V1261', (..., 1990): None, }, descr='') income_permanence_bonus = IncomeField( {(1992, ...): 'V1264', (1981, 1990): 'V580', (..., 1979): None}, descr='Paid to workers that can retire, but decide to continue working', ) @FunctionField def income_pension(self, df): return sum_na(df.income_pension_main, df.income_pension_other) @FunctionField def income_retirement(self, df): return sum_na(df.income_retirement_main, df.income_retirement_other) @FunctionField def income_social(self, df): return sum_na(df.income_pension, df.income_retirement, df.income_permanence_bonus) # # Capital income # income_rent = IncomeField({ (1992, ...): 'V1267', (1981, 1990): 'V581', 1979: 'V2363', 1978: 'V2482', 1977: 'V93', 1976: 'V2363', }, descr='') income_investments = IncomeField({ (1992, ...): 'V1273', (..., 1990): None, # does it have a better description? # OUTR. REC. EMPREG.CAPITA 1979: 'V2361', 1978: 'V2483', 1977: 'V95', 1976: None, }, descr='All sources of financial yield except for rents') @FunctionField def income_capital(self, df): """All sources of capital income""" total = sum_na(df.income_rent, df.income_investiments) if self.year == 1977: sum_na(df.V94, df.V97) return total # Other income sources #################################################### income_other = IncomeField({ (1992, ...): None, (1981, 1990): 'V582', (1978, 1979): None, 1977: 'V96', 1976: 'V2366', }, descr='') income_donation = IncomeField({ (1992, ...): 'V1270', (1981, 1990): None, 1979: 'V2362', 1978: 'V2481', 1977: 'V92', 1976: 'V2364', }, descr='') @FunctionField def income_misc(self, df): return sum_na(df.income_donation, df.income_other) # # Total incomes # @FunctionField def income_work(self, df): # @NoSelf """Sum of all income sources due to labor""" # Also computed in PNAD as V4719 (yr > 1992) total = sum_na(df.income_work_main, df.income_work_other) # These are used to quantify total income due to work for people who # do not want to declare each job separately if self.year > 1992 and 'V7122' in df and 'V7125' in df: T = IncomeField.remove_missing total = sum_na(total, T(df.V7122), T(df.V7125)) return total @FunctionField def income(self, df): """Total income of an individual""" return sum_na(df.income_work, df.income_social, df.income_capital, df.income_misc) # # Family and household # income_household = IncomeField( {(1992, ...): 'V4721', (1981, 1990): 'V410', (..., 1979): None, }, descr='') income_family = IncomeField( {(1992, ...): 'V4722', (1981, 1990): 'V5010', (..., 1979): None, }, descr='') income_household_per_capta = IncomeField( {(1992, ...): 'V4742', (1981, 1990): None, (..., 1979): None, }, descr='') income_family_per_capta = IncomeField( {(1992, ...): 'V4750', (1981, 1990): None, (..., 1979): None, }, descr='') class OccupationDataMixin: occupation_week = Field({(1992, ...): 'V9906', (..., 1990): 'V503', }, descr='Occupation at the week the survey was taken') occupation_year = Field({(1992, ...): 'V9971', (..., 1990): None, }, descr='') occupation_secondary = Field({(1992, ...): 'V9990', (..., 1990): None, }, descr='') occupation_previous = Field({(1992, ...): 'V9910', (..., 1990): None, }, descr='') occupation_first = Field({(1992, ...): 'V1298', (..., 1990): None, }, descr='') occupation_father = Field({(1992, ...): 'V1293', (..., 1990): None, }, descr='') occupation_father_previous = Field({(1992, ...): 'V1258', (..., 1990): None, }, descr='') is_occupied = Field({(1992, ...): 'V4705', (..., 1990): None, }, descr='') is_active = Field({(1992, ...): 'V4704', (..., 1990): None, }, descr='') work_duration = Field({(1992, ...): 'V4707', (..., 1990): None, }, descr='') @FunctionField def occupation(self, df): return df.occupation_week # TODO: improve this!
2.890625
3
Macroeconomia/Argentina/IndicadoresEmpleoDesocupacion.py
alejivo/Macroeconomics
0
12799726
<reponame>alejivo/Macroeconomics import pandas as pd import io import requests import json class IndicadoresEmpleoDesocupacion: def __init__(self): """ Los indicadores de empleo y desocupacion se basan en gran medida en la EPC (encuesta permanente de hogares) en 31 aglomeraciones urbanas. """ pass def getTasaActividad(self, periodo = "Anual"): """ La tasa de actividad es PEA/PoblacionTotal Se considera como una tasa indicadora de la oferta laboral por parte de los trabajadores. Parameters ---------- periodo : str, optional (puede ser "Anual", "Trimestral") DESCRIPTION. The default is "Anual". Returns ------- pd.DataFrame(). """ #Obtener la url de descarga del cvs urlPackage="https://datos.gob.ar/api/3/action/package_show?id=sspm-principales-variables-ocupacionales-eph-continua-actividad" s=requests.get(urlPackage).content objJson = json.loads(s) resultado = objJson['result']['resources'] selector = 1 if periodo == "Trimestral" else 0 #si no es trimestral siempre es anual ultimoResultado = resultado[selector] urlDescarga = ultimoResultado['url'] descripcion = ultimoResultado['description'] print("Descargando: {}".format(descripcion)) print("Archivo: {}".format(urlDescarga)) #Descargar la url con cvs y generar pandas dataframe contenidoCVS = requests.get(urlDescarga).content flujoCVS = io.StringIO(contenidoCVS.decode('utf-8')) df_temp = pd.read_csv(flujoCVS) #transform string to datetime df_temp['indice_tiempo'] = pd.to_datetime(df_temp['indice_tiempo'], format='%Y-%m-%d', errors='ignore') df_temp['indice_tiempo'] = df_temp['indice_tiempo'].dt.date #set index df_temp.set_index('indice_tiempo', inplace=True) return df_temp def getTasaEmpleo(self, periodo = "Anual"): """ La tasa de empleo se calcula como poblacion ocupada/poblacion total Se concidera como una tasa representativa de la demanda laboral ejercida por la empresas. Parameters ---------- periodo : str, optional (puede ser "Anual", "Trimestral") DESCRIPTION. The default is "Anual". Returns ------- pd.DataFrame(). """ #Obtener la url de descarga del cvs urlPackage="https://datos.gob.ar/api/3/action/package_show?id=sspm-principales-variables-ocupacionales-eph-continua-empleo" s=requests.get(urlPackage).content objJson = json.loads(s) resultado = objJson['result']['resources'] selector = 1 if periodo == "Trimestral" else 0 #si no es trimestral siempre es anual ultimoResultado = resultado[selector] urlDescarga = ultimoResultado['url'] descripcion = ultimoResultado['description'] print("Descargando: {}".format(descripcion)) print("Archivo: {}".format(urlDescarga)) #Descargar la url con cvs y generar pandas dataframe contenidoCVS = requests.get(urlDescarga).content flujoCVS = io.StringIO(contenidoCVS.decode('utf-8')) df_temp = pd.read_csv(flujoCVS) #transform string to datetime df_temp['indice_tiempo'] = pd.to_datetime(df_temp['indice_tiempo'], format='%Y-%m-%d', errors='ignore') df_temp['indice_tiempo'] = df_temp['indice_tiempo'].dt.date #set index df_temp.set_index('indice_tiempo', inplace=True) return df_temp def getTasaDesocupacion(self, periodo = "Anual"): """ Se calcula como: poblacion desocupada/PEA Parameters ---------- periodo : str, optional (puede ser "Anual", "Trimestral") DESCRIPTION. The default is "Anual". Returns ------- pd.DataFrame(). """ #Obtener la url de descarga del cvs urlPackage="https://datos.gob.ar/api/3/action/package_show?id=sspm-principales-variables-ocupacionales-eph-continua-desempleo" s=requests.get(urlPackage).content objJson = json.loads(s) resultado = objJson['result']['resources'] selector = 1 if periodo == "Trimestral" else 0 #si no es trimestral siempre es anual ultimoResultado = resultado[selector] urlDescarga = ultimoResultado['url'] descripcion = ultimoResultado['description'] print("Descargando: {}".format(descripcion)) print("Archivo: {}".format(urlDescarga)) #Descargar la url con cvs y generar pandas dataframe contenidoCVS = requests.get(urlDescarga).content flujoCVS = io.StringIO(contenidoCVS.decode('utf-8')) df_temp = pd.read_csv(flujoCVS) #transform string to datetime df_temp['indice_tiempo'] = pd.to_datetime(df_temp['indice_tiempo'], format='%Y-%m-%d', errors='ignore') df_temp['indice_tiempo'] = df_temp['indice_tiempo'].dt.date #set index df_temp.set_index('indice_tiempo', inplace=True) return df_temp def getTasaSubocupacionDemandante(self, periodo = "Anual"): """ Se calcula como poblacion desocupada demandante/PEA Parameters ---------- periodo : str, optional (puede ser "Anual", "Trimestral") DESCRIPTION. The default is "Anual". Returns ------- pd.DataFrame(). """ #Obtener la url de descarga del cvs urlPackage="https://datos.gob.ar/api/3/action/package_show?id=sspm-principales-variables-ocupacionales-eph-continua-subocupacion-demandante" s=requests.get(urlPackage).content objJson = json.loads(s) resultado = objJson['result']['resources'] selector = 1 if periodo == "Trimestral" else 0 #si no es trimestral siempre es anual ultimoResultado = resultado[selector] urlDescarga = ultimoResultado['url'] descripcion = ultimoResultado['description'] print("Descargando: {}".format(descripcion)) print("Archivo: {}".format(urlDescarga)) #Descargar la url con cvs y generar pandas dataframe contenidoCVS = requests.get(urlDescarga).content flujoCVS = io.StringIO(contenidoCVS.decode('utf-8')) df_temp = pd.read_csv(flujoCVS) #transform string to datetime df_temp['indice_tiempo'] = pd.to_datetime(df_temp['indice_tiempo'], format='%Y-%m-%d', errors='ignore') df_temp['indice_tiempo'] = df_temp['indice_tiempo'].dt.date #set index df_temp.set_index('indice_tiempo', inplace=True) return df_temp def getTasaSubocupacionNoDemandante(self, periodo = "Anual"): """ Se calcula como poblacion desocupada NO demandante/PEA Parameters ---------- periodo : str, optional (puede ser "Anual", "Trimestral") DESCRIPTION. The default is "Anual". Returns ------- pd.DataFrame(). """ #Obtener la url de descarga del cvs urlPackage="https://datos.gob.ar/api/3/action/package_show?id=sspm-principales-variables-ocupacionales-eph-continua-subocupacion-no-demandante" s=requests.get(urlPackage).content objJson = json.loads(s) resultado = objJson['result']['resources'] selector = 1 if periodo == "Trimestral" else 0 #si no es trimestral siempre es anual ultimoResultado = resultado[selector] urlDescarga = ultimoResultado['url'] descripcion = ultimoResultado['description'] print("Descargando: {}".format(descripcion)) print("Archivo: {}".format(urlDescarga)) #Descargar la url con cvs y generar pandas dataframe contenidoCVS = requests.get(urlDescarga).content flujoCVS = io.StringIO(contenidoCVS.decode('utf-8')) df_temp = pd.read_csv(flujoCVS) #transform string to datetime df_temp['indice_tiempo'] = pd.to_datetime(df_temp['indice_tiempo'], format='%Y-%m-%d', errors='ignore') df_temp['indice_tiempo'] = df_temp['indice_tiempo'].dt.date #set index df_temp.set_index('indice_tiempo', inplace=True) return df_temp def getIndiceSalariosBase2016(self): """ Es un indice que estima la evolucion de los salarios de la economia Base octubre 2016 Returns ------- pd.DataFrame(). """ #Obtener la url de descarga del cvs urlPackage="https://datos.gob.ar/api/3/action/package_show?id=sspm-indice-salarios-base-octubre-2016" s=requests.get(urlPackage).content objJson = json.loads(s) resultado = objJson['result']['resources'] selector = 0 #si no es trimestral o mensual siempre es anual ultimoResultado = resultado[selector] urlDescarga = ultimoResultado['url'] descripcion = ultimoResultado['description'] print("Descargando: {}".format(descripcion)) print("Archivo: {}".format(urlDescarga)) #Descargar la url con cvs y generar pandas dataframe contenidoCVS = requests.get(urlDescarga).content flujoCVS = io.StringIO(contenidoCVS.decode('utf-8')) df_temp = pd.read_csv(flujoCVS) #transform string to datetime df_temp['indice_tiempo'] = pd.to_datetime(df_temp['indice_tiempo'], format='%Y-%m-%d', errors='ignore') df_temp['indice_tiempo'] = df_temp['indice_tiempo'].dt.date #set index df_temp.set_index('indice_tiempo', inplace=True) return df_temp
3.4375
3
cs15211/KthLargestElementinaStream.py
JulyKikuAkita/PythonPrac
1
12799727
__source__ = 'https://leetcode.com/problems/kth-largest-element-in-a-stream/' # Time: O() # Space: O() # # Description: Leetcode # 703. Kth Largest Element in a Stream # # Design a class to find the kth largest element in a stream. # Note that it is the kth largest element in the sorted order, not the kth distinct element. # # Your KthLargest class will have a constructor which accepts an integer k and an integer array nums, # which contains initial elements from the stream. For each call to the method KthLargest.add, # return the element representing the kth largest element in the stream. # # Example: # # int k = 3; # int[] arr = [4,5,8,2]; # KthLargest kthLargest = new KthLargest(3, arr); # kthLargest.add(3); // returns 4 # kthLargest.add(5); // returns 5 # kthLargest.add(10); // returns 5 # kthLargest.add(9); // returns 8 # kthLargest.add(4); // returns 8 # Note: # You may assume that nums length >= k-1 and k >= 1. # import unittest class Solution(object): pass # your function here class TestMethods(unittest.TestCase): def test_Local(self): self.assertEqual(1, 1) if __name__ == '__main__': unittest.main() Java = ''' # Thought: # use PQ # 77ms 83.97% class KthLargest { private PriorityQueue<Integer> pq; private int k; public KthLargest(int k, int[] nums) { this.k = k; this.pq = new PriorityQueue<>(); for (int num : nums) { pq.offer(num); if (pq.size() > k) pq.poll(); } } public int add(int val) { pq.offer(val); if (pq.size() > k) pq.poll(); return pq.peek(); } } # use BST # 428ms 5.74% class KthLargest { TreeNode root; private int k; public KthLargest(int k, int[] nums) { this.k = k - 1; for (int n : nums) { root = insert(root, n); } } public int add(int val) { root = insert(root, val); return findKthLargest(k, root); } private int findKthLargest(int k, TreeNode root) { if (root == null) return -1; if (root.mRightSum == k) return root.mVal; if (root.mRightSum > k) { return findKthLargest(k, root.right); } else { return findKthLargest(k - root.mRightSum - 1, root.left); } } private TreeNode insert(TreeNode root, int val) { if (root == null) return new TreeNode(val, 0); if (val < root.mVal) { root.left = insert(root.left, val); } else { root.mRightSum++; root.right = insert(root.right, val); } return root; } private class TreeNode { int mVal; int mRightSum; TreeNode left; TreeNode right; TreeNode(int val, int rightSum) { mVal = val; mRightSum = rightSum; } } } /** * Your KthLargest object will be instantiated and called as such: * KthLargest obj = new KthLargest(k, nums); * int param_1 = obj.add(val); */ '''
3.875
4
examples/nv-hpc-sdk_test.py
owainkenwayucl/gepy
0
12799728
<gh_stars>0 #/usr/bin/env python3 # This script does a test of a particular nvidia compiler on Myriad import sys import os import copy import time import gepy import gepy.executor compiler_module = 'compilers/nvhpc/21.11' repo = 'https://github.com/UCL-RITS/pi_examples.git' if (len(sys.argv) > 1): compiler_module = sys.argv[1] print('Generating job scripts for compiler module: ' + compiler_module) template_job = gepy.job(name='nvtest') template_job.modules.append('personal-modules') template_job.modules.append('testing-modules') template_job.modules.append(compiler_module) template_job.add_resource('gpu','1') template_job.set_node_classes('EFL') tmp_dir = 'nvtest_'+str(time.time()) os.mkdir(tmp_dir) template_job.location=os.getcwd() + '/' + tmp_dir status = gepy.executor.run(['git', 'clone', repo, tmp_dir + '/pi_examples']) if (status.returncode != 0): sys.exit('Error cloning repo: ' + status.stderr) template_job.workload.append(gepy.serial_command('cd ', ['pi_examples'])) # Right, that's the repo cloned and a template job created. doconc_job = copy.deepcopy(template_job) cudaf_job = copy.deepcopy(template_job) openmp_job = copy.deepcopy(template_job) openacc_job = copy.deepcopy(template_job) # do concurrent test doconc_job.workload.append(gepy.serial_command('cd ', ['fortran_do_concurrent_pi_dir'])) doconc_job.workload.append(gepy.serial_command('make ', ['clean'])) doconc_job.workload.append(gepy.serial_command('make ', ['nvhpc'])) doconc_job.workload.append(gepy.serial_command('./pi', [])) doconc_job.name = template_job.name + 'doconc' # cuda fortran test cudaf_job.workload.append(gepy.serial_command('cd ', ['cudafortran_pi_dir'])) cudaf_job.workload.append(gepy.serial_command('make ', ['clean'])) cudaf_job.workload.append(gepy.serial_command('make ', [])) cudaf_job.workload.append(gepy.serial_command('./pi', [])) cudaf_job.name = template_job.name + 'cudaf' # openmp fortran test openmp_job.workload.append(gepy.serial_command('cd ', ['fortran_omp_pi_dir'])) openmp_job.workload.append(gepy.serial_command('make ', ['clean'])) openmp_job.workload.append(gepy.serial_command('make ', ['nvhpc_offload'])) openmp_job.workload.append(gepy.serial_command('./pi_gpu', [])) openmp_job.name = template_job.name + 'openmp' # openacc fortran test openacc_job.workload.append(gepy.serial_command('cd ', ['fortran_openacc_pi_dir'])) openacc_job.workload.append(gepy.serial_command('make ', ['clean'])) openacc_job.workload.append(gepy.serial_command('make ', ['-f' 'Makefile.myriad', 'pi'])) openacc_job.workload.append(gepy.serial_command('./pi', [])) openacc_job.name = template_job.name + 'openacc' print('Submitting jobs') j,t = gepy.executor.qsub(doconc_job.get_job_script()) j,t = gepy.executor.qsub(cudaf_job.get_job_script()) j,t = gepy.executor.qsub(openmp_job.get_job_script()) j,t = gepy.executor.qsub(openacc_job.get_job_script()) print('Done')
2.140625
2
robotino_ros/src/twist_motor_omni.py
aadi-mishra/robotino
2
12799729
<filename>robotino_ros/src/twist_motor_omni.py #!/usr/bin/env python import rospy import roslib import math from std_msgs.msg import Float32,Int32, UInt8MultiArray, Bool, String from geometry_msgs.msg import Twist from sensor_msgs.msg import Range import numpy as np from numpy import linalg as al class TwistToMotors(): def __init__(self): rospy.init_node("twist_to_motors") nodename = rospy.get_name() rospy.loginfo("%s started" % nodename) self.M_PI = math.pi self.motor_velocities = [] # Create publishers that publishes target velocity to the PID controller self.pub_lmotor = rospy.Publisher('lwheel_vtarget', Float32,queue_size=10) self.pub_rmotor = rospy.Publisher('rwheel_vtarget', Float32,queue_size=10) self.pub_bmotor = rospy.Publisher('bwheel_vtarget', Float32,queue_size=10) # Subscribe to the velocity commands from teleop rospy.Subscriber('/robotino/cmd_vel', Twist, self.twistCallback, queue_size=10) self.rate = rospy.get_param("~rate", 100) self.timeout_ticks = rospy.get_param("~timeout_ticks", 100) self.left = 0 self.right = 0 self.back = 0 def spin(self): r = rospy.Rate(self.rate) idle = rospy.Rate(100) self.ticks_since_target = self.timeout_ticks while not rospy.is_shutdown(): while not rospy.is_shutdown() and self.ticks_since_target < self.timeout_ticks: self.spinOnce() r.sleep() idle.sleep() def spinOnce(self): # Calculating the individual motor velocity for a motion command angle_mat = np.array([[math.cos(30*(self.M_PI/180)), math.cos(150*(self.M_PI/180)), math.cos(90*(self.M_PI/180))], [-math.sin(30*(self.M_PI/180)), -math.sin(150*(self.M_PI/180)),math.sin(90*(self.M_PI/180))], [1, 1, 1]]) angle_mat_inv = al.inv(angle_mat) [v_r, v_l, v_b] = np.dot(angle_mat_inv, np.array([self.dx, self.dy, self.dr])) # Assigning the calculated velocities to each motors self.right = v_r self.left = v_l self.back = v_b self.pub_lmotor.publish(self.left) self.pub_rmotor.publish(self.right) self.pub_bmotor.publish(self.back) self.ticks_since_target += 1 # Callback function def twistCallback(self,msg): self.ticks_since_target = 0 self.dx = msg.linear.x self.dr = msg.angular.z self.dy = msg.linear.y if __name__ == '__main__': """ main """ twistToMotors = TwistToMotors() twistToMotors.spin()
2.5
2
ai.py
AyushAryal/chess
9
12799730
<gh_stars>1-10 from stockfish import Stockfish def get_best_move(board): stockfish = Stockfish() move_list = [] for move in board.moves: move_list.append(move.to_long_algebraic()) stockfish.set_position(move_list) return long_algebraic_to_coordinate(stockfish.get_best_move()) def long_algebraic_to_coordinate(move): initial, final, promotion_piece = move[:2], move[2:4], move[4:] i_x, i_y = initial[1], initial[0] f_x, f_y = final[1], final[0] i_y = ord(i_y) - ord("a") f_y = ord(f_y) - ord("a") i_x = 8 - int(i_x) f_x = 8 - int(f_x) return ((i_x, i_y), (f_x, f_y), promotion_piece)
2.75
3
src/service/start.py
Ellie-Yen/-Python-Tic-Tac-Toe-Game
0
12799731
<gh_stars>0 from ..gameModel import GameModel from ..appLib.messageFormatters import msgWelcome async def start(cls: GameModel) -> None: return msgWelcome()
1.554688
2
exercicio_py/ex0002_tabuada_multiplicacao/main_v2.py
danielle8farias/Exercicios-Python-3
0
12799732
<gh_stars>0 #!/usr/bin/env python3.8 ######## # autora: <EMAIL> # repositório: https://github.com/danielle8farias # Descrição: Usuário digita um número inteiro e programa retorna a tabuada de multiplicação desse. ######## import sys sys.path.append('/home/danielle8farias/hello-world-python3/meus_modulos') from mensagem import * from numeros import ler_num_nat ler_cabecalho('tabuada de multiplicação') while True: num = ler_num_nat('Digite um número: ') i = 1 for i in range(i, 10): print(f'{num:4} x {i} = {num*i}') print() resposta = ' ' while resposta not in 'SN': resposta = ler_resposta('\nDeseja rodar o programa de novo? [S/N] ') if resposta == 'N': break criar_linha() criar_rodape()
4.0625
4
beverages/models.py
vchrisb/Kudo4Coffee
0
12799733
from django.db import models from django.utils import timezone # Create your models here. class Beverage(models.Model): """( description)""" created_by = models.ForeignKey('auth.User', on_delete=models.SET_NULL, null=True) name = models.CharField(max_length=100) key = models.CharField(max_length=100) description = models.CharField(max_length=200) fill_quantity_min = models.IntegerField() fill_quantity_max = models.IntegerField() fill_quantity_steps = models.IntegerField() def __str__(self): return str(self.name) class BeverageHistory(models.Model): bean_amount_choices = ( ('VeryMild', 'Very mild'), ('Mild', 'Mild'), ('MildPlus', 'Mild +'), ) temperature_choices = ( ('88C', '88 °C'), ('90C', '90 °C'), ('92C', '92 °C'), ) created_by = models.ForeignKey('auth.User', on_delete=models.SET_NULL, null=True) created_at = models.DateTimeField(default=timezone.now) bean_amount = models.CharField(max_length=100, choices=bean_amount_choices, default='Mild') temperature = models.CharField(max_length=100, choices=temperature_choices, default='90C') beverage = models.ForeignKey(Beverage, on_delete=models.CASCADE)
2.578125
3
glu/cli.py
chongkong/glu
7
12799734
<reponame>chongkong/glu import argparse import json import yaml import os import logging from collections import OrderedDict from glu.util import OrderedDictYamlLoader from glu import create_scope def load_file(path): if path.lower().endswith('.yml') or path.lower().endswith('.yaml'): with open(path, 'r') as f: return yaml.load(f, Loader=OrderedDictYamlLoader) else: with open(path, 'r') as f: return json.load(f, object_pairs_hook=OrderedDict) def load_scope_from_file(scope, file_uri): path = file_uri params = {} if '?' in file_uri: path, params = file_uri.split('?') params = dict(tuple(field_and_value.split('=')) for field_and_value in params.split('&')) scope.load(load_file(path), **params) def parse(): parser = argparse.ArgumentParser() parser.add_argument('-s', '--source', required=True, dest='sources', nargs='+', help='Path to the source file') parser.add_argument('-e', '--use-env', action='store_true', help='Use environment variable') parser.add_argument('-t', '--template', required=True, help='Path to the template file') parser.add_argument('-o', '--output', required=True, help='Path for output file') return parser.parse_args() def main(): args = parse() scope = create_scope() if args.use_env: scope.load(dict(os.environ.items()), load_to='@env') for file_uri in args.sources: load_scope_from_file(scope, file_uri) template = load_file(args.template) res = scope.glue(template) logging.info('Result:\n{}'.format(json.dumps(res, indent=2))) with open(args.output, 'w') as f: json.dump(res, f, indent=2, separators=(',', ': ')) f.write('\n')
2.328125
2
handover/evaluation/test_cases/test_58_std.py
CN-UPB/sharp
2
12799735
<gh_stars>1-10 from ._base import * class TestCase003(TestCase): def __init__(self): TestCase.__init__(self, id='003', alt_id='fixed_pps_increasing_pps_58_bytes', description='Fixed PPS. Increasing State Transfer Duration. 58 Byte Packets', pps=1000, packet_size=58, state_duration=[0, 1, 0.1], reports=STD_REPORTS)
1.820313
2
assignments/assignment2/layers.py
INeedTractorPlz/dlcourse_ai
1
12799736
<gh_stars>1-10 import numpy as np from math import exp, log def l2_regularization(W, reg_strength): """ Computes L2 regularization loss on weights and its gradient Arguments: W, np array - weights reg_strength - float value Returns: loss, single value - l2 regularization loss gradient, np.array same shape as W - gradient of weight by l2 loss """ # TODO: Copy from the previous assignment loss = reg_strength*sum(sum(W**2)); grad = reg_strength*2*W; return loss, grad def cross_entropy_loss(probs, target_index): ''' Computes cross-entropy loss Arguments: probs, np array, shape is either (N) or (batch_size, N) - probabilities for every class target_index: np array of int, shape is (1) or (batch_size) - index of the true class for given sample(s) Returns: loss: single value ''' # TODO implement cross-entropy #print("probs:", probs); return -log(probs[target_index - 1]); def softmax_with_cross_entropy(predictions, target_index): """ Computes softmax and cross-entropy loss for model predictions, including the gradient Arguments: predictions, np array, shape is either (N) or (N, batch_size) - classifier output target_index: np array of int, shape is (1) or (batch_size) - index of the true class for given sample(s) Returns: loss, single value - cross-entropy loss dprediction, np array same shape as predictions - gradient of predictions by loss value """ # TODO: Copy from the previous assignment # TODO implement softmax with cross-entropy #One-dimension option if predictions.ndim == 1: predictions_ = predictions - np.max(predictions); dprediction = np.array(list(map(exp, predictions_))); summ = sum(dprediction); dprediction /= summ; loss = cross_entropy_loss(dprediction, target_index); dprediction[target_index - 1] -= 1; return loss, dprediction; else: predictions_ = predictions - np.max(predictions, axis = 1)[:, np.newaxis]; exp_vec = np.vectorize(exp); #print("predictions_:", predictions_); dprediction = np.apply_along_axis(exp_vec, 1, predictions_); #print("dprediction before division: ", dprediction); summ = sum(dprediction.T); #print("summ: ", summ); dprediction /= summ[:, np.newaxis]; #print("dprediction after division: ", dprediction); loss = np.array([cross_entropy_loss(x,y) for x,y in zip(dprediction, target_index)]); #print("loss: ", loss); #print("target_index - 1:", target_index - 1); it = np.nditer(target_index - 1, flags = ['c_index'] ) while not it.finished: #print("it[0] = ", it[0]); dprediction[it.index, it[0]] -= 1 it.iternext() dprediction /= len(target_index); #print("dprediction after subtraction: ", dprediction); return loss.mean(), dprediction; raise Exception("Not implemented!") class Param: """ Trainable parameter of the model Captures both parameter value and the gradient """ def __init__(self, value): #self.init = value.copy(); self.value = value; self.grad = np.zeros_like(value); class ReLULayer: def __init__(self): self.X = None def forward(self, X): # TODO: Implement forward pass # Hint: you'll need to save some information about X # to use it later in the backward pass self.X = X; return (X > 0)*X; def backward(self, d_out): """ Backward pass Arguments: d_out, np array (batch_size, num_features) - gradient of loss function with respect to output Returns: d_result: np array (batch_size, num_features) - gradient with respect to input """ # TODO: Implement backward pass # Your final implementation shouldn't have any loops return (self.X > 0)*d_out; def params(self): # ReLU Doesn't have any parameters return {} class FullyConnectedLayer: def __init__(self, n_input, n_output): self.W = Param(0.01 * np.random.randn(n_input, n_output)) self.B = Param(0.01 * np.random.randn(1, n_output)) self.X = None def forward(self, X): # TODO: Implement forward pass # Your final implementation shouldn't have any loops self.X = X; #if np.any(self.W.init != self.W.value) or np.any(self.B.init != self.B.value): self.W.grad = np.zeros_like(self.W.value); self.B.grad = np.zeros_like(self.B.value); # self.W.init = self.W.value; # self.B.init = self.B.value; return np.dot(self.X, self.W.value) + self.B.value; def backward(self, d_out): """ Backward pass Computes gradient with respect to input and accumulates gradients within self.W and self.B Arguments: d_out, np array (batch_size, n_output) - gradient of loss function with respect to output Returns: d_result: np array (batch_size, n_input) - gradient with respect to input """ # TODO: Implement backward pass # Compute both gradient with respect to input # and gradients with respect to W and B # Add gradients of W and B to their `grad` attribute # It should be pretty similar to linear classifier from # the previous assignment dW = np.dot(self.X.T, d_out); dB = np.dot(np.ones((1, d_out.shape[0])), d_out); d_input = np.dot(d_out, self.W.value.T); self.W.grad += dW; self.B.grad += dB; return d_input; def params(self): return {'W': self.W, 'B': self.B}
3.4375
3
senergy/fixed_assets/doctype/fixed_asset_request/fixed_asset_request.py
TridotsTech/senergy
0
12799737
<reponame>TridotsTech/senergy # -*- coding: utf-8 -*- # Copyright (c) 2020, TeamPRO and contributors # For license information, please see license.txt from __future__ import unicode_literals import frappe from frappe.model.document import Document from frappe.model.mapper import get_mapped_doc from frappe.utils import cstr, flt, getdate, new_line_sep, nowdate, add_days, get_link_to_form class FixedAssetRequest(Document): pass def set_missing_values(source, target_doc): if target_doc.doctype == "Purchase Order" and getdate(target_doc.schedule_date) < getdate(nowdate()): target_doc.schedule_date = None target_doc.run_method("set_missing_values") target_doc.run_method("calculate_taxes_and_totals") @frappe.whitelist() def get_default_supplier_query(doctype, txt, searchfield, start, page_len, filters): doc = frappe.get_doc("Fixed Asset Request", filters.get("doc")) item_list = [] for d in doc.items: item_list.append(d.item_code) return frappe.db.sql("""select default_supplier from `tabItem Default` where parent in ({0}) and default_supplier IS NOT NULL """.format(', '.join(['%s']*len(item_list))),tuple(item_list)) @frappe.whitelist() def make_supplier_quotation(source_name, target_doc=None): def postprocess(source, target_doc): set_missing_values(source, target_doc) doclist = get_mapped_doc("Fixed Asset Request", source_name, { "Fixed Asset Request": { "doctype": "Supplier Quotation", "validation": { "docstatus": ["=", 1], "material_request_type": ["=", "Purchase"] } }, "Fixed Asset Request Item": { "doctype": "Supplier Quotation Item", "field_map": [ ["name", "fixed_asset_request_item"], ["parent", "fixed_asset_request"], ["uom", "uom"], ["uom", "stock_uom"], ] } }, target_doc, postprocess) return doclist @frappe.whitelist() def make_request_for_quotation(source_name, target_doc=None): doclist = get_mapped_doc("Fixed Asset Request", source_name, { "Fixed Asset Request": { "doctype": "Request for Quotation", "validation": { "docstatus": ["=", 1], "material_request_type": ["=", "Purchase"] } }, "Fixed Asset Request Item": { "doctype": "Request for Quotation Item", "field_map": [ ["name", "fixed_asset_request_item"], ["parent", "fixed_asset_request"], ["uom", "stock_uom"], ["uom", "Nos"] ] } }, target_doc) return doclist # def update_item(obj, target, source_parent): # target.conversion_factor = obj.conversion_factor # target.qty = flt(flt(obj.stock_qty) - flt(obj.ordered_qty))/ target.conversion_factor # target.stock_qty = (target.qty * target.conversion_factor) # if getdate(target.schedule_date) < getdate(nowdate()): # target.schedule_date = None @frappe.whitelist() def make_purchase_order(source_name, target_doc=None): def postprocess(source, target_doc): if frappe.flags.args and frappe.flags.args.default_supplier: # items only for given default supplier supplier_items = [] for d in target_doc.items: default_supplier = get_item_defaults(d.item_code, target_doc.company).get('default_supplier') if frappe.flags.args.default_supplier == default_supplier: supplier_items.append(d) target_doc.items = supplier_items set_missing_values(source, target_doc) # def select_item(d): # frappe.errprint(d) # return d.ordered_qty < d.stock_qty doclist = get_mapped_doc("Fixed Asset Request", source_name, { "Fixed Asset Request": { "doctype": "Purchase Order", "validation": { "docstatus": ["=", 1], "material_request_type": ["=", "Purchase"] } }, "Fixed Asset Request Item": { "doctype": "Purchase Order Item", "field_map": [ ["name", "fixed_asset_request_item"], ["parent", "fixed_asset_request"], ["uom", "stock_uom"], ["uom", "uom"], ["sales_order", "sales_order"], ["sales_order_item", "sales_order_item"] ], # "postprocess": update_item, # "condition": select_item } }, target_doc, postprocess) return doclist
1.703125
2
learn/decorator/1.py
PuddingWalker/py_small_projects
0
12799738
<reponame>PuddingWalker/py_small_projects ######################################################################### # File Name: 1.py # Author: Walker # mail:<EMAIL> # Created Time: Tue Dec 7 14:58:24 2021 ######################################################################### # !/usr/bin/env python3 import time import threading import json import requests def sum(*numbers): total = 0.0 for number in numbers: total += number return total x = 200 def print_value(): global x x = 100 print('函数中x = {0}'.format(x)) print_value() print('全局变量x = {0}'.format(x)) # 输出结果: # 函数中x=100 # 全局变量x=100 arr = [n for n in range(100)] def biger50(n): return True if n > 50 else False print(list(filter(biger50, arr))) r = requests.get('https://cn.bing.com/') # print(r.status_code) # print(r.content) # print(r.content.decode()) # json.loads(r.content) def thread_body(): # current t = threading.current_thread() for n in range(5): # cur print('{0} th excute thread {1}.'.format(n, t.name)) # thread sleep time.sleep(2) print('thread {0} Done.'.format(t.name)) # main thread # create th obj 1 t1 = threading.Thread(target=thread_body) # obj 2 t2 = threading.Thread(target=thread_body, name="MyThread") # active thread t1 # t1.start() # active thread t2 # t2.start() class SmallThread(threading.Thread): def __init__(self, name=None): super().__init__(name=name) # def run(self): # current t = threading.current_thread() for n in range(5): # cur print("{0} th excute {1}".format(n, t.name)) # sleep time.sleep(2) print(f'thread{t.name} done.') # main # crt t1 #t1 = SmallThread("t1") # crt t2 #t2 = SmallThread("t2") # launch # t1.start() # t2.start() value = [] def thread_body(): print(f'{threading.current_thread().name} starting...') for n in range(4): print(f'{n}_th {threading.current_thread().name} running...') value.append(n) time.sleep(2) print(f'{threading.current_thread().name} done.') #print("main ....") #t1 = threading.Thread(target=thread_body) # t1.start() # block # t1.join() #print('value = {0}'.format(value)) #print('main continue') # thread stop isrunning = True # work thread body def workthread_body(): while isrunning: # thread beginning to work print(f'{threading.current_thread().name} is working...') # thread sleep time.sleep(5) print(f'{threading.current_thread().name} Finished its job.') # control thread body def controlthread_body(): global isrunning while isrunning: # input stop cmd from standard input(keyboard) command = input('Input Stop Command: ') if command == 'exit': isrunning = False print('control thread done.') # main # crt work thread obj workthread workthread = threading.Thread(target=workthread_body) # workthread.start() # crt ctrl thread obj controlthread controlthread = threading.Thread(target=controlthread_body) controlthread.start()
2.6875
3
cantata/elements/synapse.py
kernfel/cantata
0
12799739
<filename>cantata/elements/synapse.py import torch from cantata import init import cantata.elements as ce class Synapse(ce.Module): ''' Synapse with optional current, short- and long-term plasticity submodules Input: Delayed presynaptic spikes; postsynaptic spikes; postsyn voltage Output: Synaptic currents ''' def __init__(self, W, signs_pre, delaymap=None, wmin=None, wmax=None, current=None, stp=None, ltp=None, STDP_frac=None): super().__init__() self.active = W is not None if not self.active: return self.register_parabuf('W', W) self.register_buffer('W_signed', torch.empty_like(W), persistent=False) if delaymap is None: delaymap = torch.ones(1, *W.shape[-2:]) self.register_buffer('delaymap', delaymap, persistent=False) self.register_buffer('signs_pre', signs_pre, persistent=False) self.register_buffer('wmin', wmin, persistent=False) self.register_buffer('wmax', wmax, persistent=False) if ltp is not None: if not torch.any(STDP_frac > 0): ltp = None else: self.register_buffer('STDP_frac', STDP_frac, persistent=False) self.shortterm = stp self.longterm = ltp self.current = current self.reset() @classmethod def configured(cls, projections, conf_pre, conf_post, batch_size, dt, stp=None, ltp=None, current=None, shared_weights=True, train_weight=True, disable_training=False, **kwargs): active = len(projections[0]) > 0 if not active: ret = cls(None, None, None) ret.projections = projections return ret nPre = init.get_N(conf_pre) nPost = nPre if conf_post is None else init.get_N(conf_post) delaymap = init.get_delaymap(projections, dt, conf_pre, conf_post) wmax = init.expand_to_synapses(projections, nPre, nPost, 'wmax') wmin = init.expand_to_synapses(projections, nPre, nPost, 'wmin') # Weights bw = 0 if shared_weights else batch_size w = init.build_connectivity(projections, nPre, nPost, bw) if train_weight and not disable_training: w = torch.nn.Parameter(w) signs_pre = init.expand_to_neurons(conf_pre, 'sign').to(torch.int8) if ltp is not None: STDP_frac = init.expand_to_synapses( projections, nPre, nPost, 'STDP_frac') else: STDP_frac = None ret = cls(w, signs_pre, delaymap=delaymap, wmin=wmin, wmax=wmax, current=current, stp=stp, ltp=ltp, STDP_frac=STDP_frac) ret.projections = projections return ret def reset(self, keep_values=False): if self.active: self.align_signs() if self.shortterm is not None: self.shortterm.reset(keep_values) if self.longterm is not None: self.longterm.reset(self, keep_values) if self.current is not None: self.current.reset(keep_values) def forward(self, Xd, X=None, Vpost=None): ''' Xd: (delay, batch, pre) X: (batch, post) Vpost: (batch, post) Output: Current (batch, post) ''' if not self.active: return torch.zeros_like(Vpost) # LTP if self.longterm is not None: Wlong = self.longterm(Xd, X, Vpost) W = self.W_signed * (1-self.STDP_frac) + Wlong * self.STDP_frac WD = 'beo' else: W = self.W_signed WD = 'eo' if len(self.W.shape) == 2 else 'beo' # STP if self.shortterm is not None: Xd = Xd * (self.shortterm(Xd)+1) # dbe # Integrate output = self.internal_forward(WD, W, Xd) # Current filter if self.current is not None: output = self.current(output) return output def internal_forward(self, WD, W, Xd): return torch.einsum( f'{WD}, dbe, deo ->bo', W, Xd, self.delaymap) def load_state_dict(self, *args, **kwargs): super(Synapse, self).load_state_dict(*args, **kwargs) self.align_signs() def align_signs(self): if not self.active: return signs = self.signs_pre.unsqueeze(1).expand_as(self.W) # with torch.no_grad(): self.W_signed = torch.empty_like(self.W) self.W_signed[signs == -1] = -torch.abs(self.W)[signs == -1] self.W_signed[signs == 0] = self.W[signs == 0] self.W_signed[signs == 1] = torch.abs(self.W)[signs == 1] def weight(self): return torch.abs(self.W)
2.1875
2
runtests.py
jmoujaes/dpaste
0
12799740
#!/usr/bin/env python import sys import django from django.conf import settings from django.test.runner import DiscoverRunner as TestRunner SETTINGS = { 'DATABASES': { 'default': { 'ENGINE': 'django.db.backends.sqlite3', 'NAME': 'dev.db', }, # 'default': { # 'ENGINE': 'django.db.backends.mysql', # 'NAME': 'dpaste', # 'USER': 'root', # 'PASSWORD': '', # } }, 'TEMPLATES': [ { 'BACKEND': 'django.template.backends.django.DjangoTemplates', 'DIRS': [], 'APP_DIRS': True, 'OPTIONS': { 'context_processors': [ 'django.template.context_processors.debug', 'django.template.context_processors.request', 'django.template.context_processors.i18n', 'dpaste.context_processors.dpaste_globals', ], }, }, ], 'INSTALLED_APPS': [ 'django.contrib.sessions', 'django.contrib.staticfiles', 'dpaste', ], 'MIDDLEWARE_CLASSES': ( 'django.contrib.sessions.middleware.SessionMiddleware', ), 'STATIC_ROOT': '/tmp/dpaste_test_static/', 'STATIC_URL': '/static/', 'ROOT_URLCONF': 'dpaste.urls', 'LANGUAGE_CODE': 'en', 'LANGUAGES': (('en', 'English'),), } def runtests(*test_args): # Setup settings if not settings.configured: settings.configure(**SETTINGS) # app registry setup django.setup() # test runner test_runner = TestRunner(verbosity=1) failures = test_runner.run_tests(['dpaste']) if failures: sys.exit(failures) if __name__ == '__main__': runtests(*sys.argv[1:])
1.953125
2
raspi_components/light/light.py
builderdev212/raspi_components
1
12799741
import RPi.GPIO as GPIO from .light_errors import LedError class Led: """ This is a class used to control LED's directly connected to the GPIO via a pin given. See the documentation for an example of how to wire the LED. """ def __init__(self, pin): """ This initates the LED on the given pin, setting it into the output mode, making sure it is off, and setting the PWM up so that the LED can be dimmed. """ try: self.pin = int(pin) GPIO.setmode(GPIO.BOARD) GPIO.setup(self.pin, GPIO.OUT) GPIO.output(self.pin, GPIO.LOW) self.led_dim = GPIO.PWM(self.pin, 500) except: raise LedError("Error during the initiation of the LED class.") def on(self, brightness=100): """ Turns the defined LED on, the brightness is set by default to 100%. """ try: self.led_dim.start(brightness) except: raise LedError("Error while turning the LED on.") def off(self): """ Turns the defined LED off. """ try: self.led_dim.stop() except: raise LedError("Error while turning the LED off.") def dim(self, brightness): """ Dims the definied LED. Keep in mind, that if you don't first turn the LED on this will error out. """ if brightness < 0: brightness = 0 elif brightness > 100: brightness = 100 else: pass try: self.led_dim.ChangeDutyCycle(brightness) except: raise LedError("Error while dimming the LED. Make sure you have turned the LED on.")
4.1875
4
kimura/run.py
prprhyt/pacman
0
12799742
<reponame>prprhyt/pacman # -*- coding: utf-8 -*- # ! /usr/bin/python #NLPでやってみた #参考: https://spjai.com/category-classification/#i-5 import pandas as pd import numpy as np from sklearn.preprocessing import LabelEncoder from sklearn.externals import joblib import os.path import kimura.nlp_tasks as nlp_tasks from sklearn.neural_network import MLPClassifier # アルゴリズムとしてmlpを使用 from sklearn.metrics import accuracy_score, confusion_matrix XSS_TRAIN_FILE = 'dataset/train_level_1.csv' XSS_TEST_FILE = 'dataset/test_level_1.csv' NORMAL_TRAIN_FILE = 'dataset/normal.csv' NORMAL_TEST_FILE = 'dataset/normal.csv' def train(): classifier = MyMLPClassifier() classifier.train() def predict(): xss_test_data, xss_test_label = data_loader(XSS_TEST_FILE, 'xss') normal_test_data, normal_test_label = data_loader(NORMAL_TEST_FILE, 'normal') X_test = xss_test_data + normal_test_data y_test = xss_test_label + normal_test_label classifier = MyMLPClassifier() classifier.load_model() pred = classifier.predict(X_test) acc_score = accuracy_score(y_test, pred) conf_mat = confusion_matrix( pred, y_test, labels=['xss', 'normal'] ) print("acc: \n", acc_score) print("confusion matrix: \n", conf_mat) class MyMLPClassifier(): model = None model_name = "mlp" def load_model(self): if os.path.exists(self.get_model_path())==False: raise Exception('no model file found!') self.model = joblib.load(self.get_model_path()) self.classes = joblib.load(self.get_model_path('class')).tolist() self.vectorizer = joblib.load(self.get_model_path('vect')) self.le = joblib.load(self.get_model_path('le')) def get_model_path(self,type='model'): return 'kimura/models/'+self.model_name+"_"+type+'.pkl' def get_vector(self,text): return self.vectorizer.transform([text]) def train(self): xss_train_data, xss_train_label = data_loader(XSS_TRAIN_FILE, 'xss') normal_train_data, normal_train_label = data_loader(NORMAL_TRAIN_FILE, 'normal') X_train = xss_train_data + normal_train_data X, vectorizer = nlp_tasks.get_vector_by_text_list(X_train) y_train = xss_train_label + normal_train_label # loading labels le = LabelEncoder() le.fit(y_train) Y = le.transform(y_train) model = MLPClassifier(max_iter=300, hidden_layer_sizes=(100,),verbose=10,) model.fit(X, Y) # save models joblib.dump(model, self.get_model_path()) joblib.dump(le.classes_, self.get_model_path("class")) joblib.dump(vectorizer, self.get_model_path("vect")) joblib.dump(le, self.get_model_path("le")) self.model = model self.classes = le.classes_.tolist() self.vectorizer = vectorizer def predict(self,query): X = self.vectorizer.transform([query]) key = self.model.predict(X) return self.classes[key[0]] def data_loader(f_name, l_name): with open(f_name, mode='r', encoding='utf-8') as f: data = list(set(f.readlines())) label = [l_name for i in range(len(data))] return data, label def run(): train() predict() if __name__ == '__main__': run()
3.1875
3
src/pynn/bin/average_state.py
enesyugan/yapay-nn
0
12799743
<reponame>enesyugan/yapay-nn<gh_stars>0 #!/usr/bin/env python3 # encoding: utf-8 # Copyright 2019 <NAME> # Licensed under the Apache License, Version 2.0 (the "License") import os, glob import copy import argparse import torch from pynn.util import load_object_param parser = argparse.ArgumentParser(description='pynn') parser.add_argument('--model-path', help='model saving path', default='model') parser.add_argument('--config', help='model config', default='model.cfg') parser.add_argument('--states', help='model states', default='ALL') parser.add_argument('--save-all', help='save configuration as well', action='store_true') if __name__ == '__main__': args = parser.parse_args() model, cls, module, m_params = load_object_param(args.model_path + '/' + args.config) ext = copy.deepcopy(model) if args.states == 'ALL': states = [s for s in glob.glob("%s/epoch-*.pt" % args.model_path)] else: states = args.states.split(',') states = ["%s/epoch-%s.pt" % (args.model_path, s) for s in states] state = states[0] model.load_state_dict(torch.load(state, map_location='cpu')) params = list(model.parameters()) for state in states[1:]: ext.load_state_dict(torch.load(state, map_location='cpu')) eparams = list(ext.parameters()) for i in range(len(params)): params[i].data.add_(eparams[i].data) scale = 1.0 / len(states) for p in params: p.data.mul_(scale) state = model.state_dict() if not args.save_all: model_file = '%s/epoch-avg.pt' % args.model_path torch.save(state, model_file) else: dic = {'params': m_params, 'class': cls, 'module': module, 'state': state} torch.save(dic, '%s/epoch-avg.dic' % args.model_path)
1.796875
2
integrators.py
marghetis/npde
37
12799744
<reponame>marghetis/npde import numpy as np import tensorflow as tf from tensorflow.python.ops import math_ops from tensorflow.python.ops import functional_ops from tensorflow.python.ops import array_ops from tensorflow.python.framework import ops from abc import ABC, abstractmethod float_type = tf.float64 class Integrator(ABC): """ Base class for integrators """ def __init__(self,model): self.model= model @abstractmethod def forward(self): pass @abstractmethod def _step_func(self): pass @abstractmethod def _make_scan_func(self): pass class ODERK4(Integrator): """ Runge-Kutta implementation for solving ODEs """ def __init__(self,model): super().__init__(model) def forward(self,x0,ts): Nt = x0.shape[0] Xs = np.zeros(Nt,dtype=np.object) for i in range(Nt): time_grid = ops.convert_to_tensor(ts[i], preferred_dtype=float_type, name='t') y0 = ops.convert_to_tensor(x0[i,:].reshape((1,-1)), name='y0') time_delta_grid = time_grid[1:] - time_grid[:-1] scan_func = self._make_scan_func(self.model.f) y_grid = functional_ops.scan(scan_func, (time_grid[:-1],time_delta_grid), y0) y_s = array_ops.concat([[y0], y_grid], axis=0) Xs[i] = tf.reshape(tf.squeeze(y_s),[len(ts[i]),self.model.D]) return Xs def _step_func(self,f,dt,t,y): dt = math_ops.cast(dt, y.dtype) k1 = f(y, t) k2 = f(y + dt*k1/2, t+dt/2) k3 = f(y + dt*k2/2, t+dt/2) k4 = f(y + dt*k3, t+dt) return math_ops.add_n([k1, 2*k2, 2*k3, k4]) * (dt / 6) def _make_scan_func(self,f): def scan_func(y, t_dt): t, dt = t_dt dy = self._step_func(f, dt, t, y) dy = math_ops.cast(dy, dtype=y.dtype) return y + dy return scan_func class SDEEM(Integrator): """ Euler-Maruyama implementation for solving SDEs dx = f(x)*dt + g*sqrt(dt) """ def __init__(self,model,s=1): super().__init__(model) self.s = s def forward(self,x0,ts,Nw=1): Xs = np.zeros(len(ts),dtype=np.object) for i in range(len(ts)): t = np.linspace(0,np.max(ts[i]),(len(ts[i])-1)*self.s+1) t = np.unique(np.sort(np.hstack((t,ts[i])))) idx = np.where( np.isin(t,ts[i]) )[0] t = np.reshape(t,[-1,1]) time_grid = ops.convert_to_tensor(t, preferred_dtype=float_type, name='t') time_delta_grid = time_grid[1:] - time_grid[:-1] y0 = np.repeat(x0[i,:].reshape((1,-1)),Nw,axis=0) y0 = ops.convert_to_tensor(y0, name='y0') scan_func = self._make_scan_func(self.model.f,self.model.diffus.g) y_grid = functional_ops.scan(scan_func, (time_grid[:-1],time_delta_grid), y0) ys = array_ops.concat([[y0], y_grid], axis=0) Xs[i] = tf.transpose(tf.gather(ys,idx,axis=0),[1,0,2]) return Xs def _step_func(self,f,g,t,dt,x): dt = math_ops.cast(dt, x.dtype) return f(x,t)*dt + g(x,t)*tf.sqrt(dt) def _make_scan_func(self,f,g): def scan_func(y, t_dt): t,dt = t_dt dy = self._step_func(f,g,t,dt,y) dy = math_ops.cast(dy, dtype=y.dtype) return y + dy return scan_func
2.515625
3
teamcomp/Window.py
khanfluence/team-comp
0
12799745
import os import pickle import re import requests import tkinter from tkinter import ttk from Hero import Hero from SearchListbox import SearchListbox from Team import Team class Window(ttk.Frame): def __init__(self, root=None): super().__init__(root) self.root = root self.root.title("teamcomp") self.grid() self.root.unbind_class("Listbox", "<space>") # how to rebind action to Enter? self.root.bind("<Key>", lambda event: self.search(event)) # hero list self.heroes = dict() self.hero_frm = ttk.Frame(self, borderwidth=0) self.hero_lst = SearchListbox(self.hero_frm, height=20) self.hero_scl = ttk.Scrollbar(self.hero_frm) self.init_hero_list() # team lists self.team1 = Team() self.team2 = Team() self.team_frm = ttk.Frame(self, borderwidth=0) self.team1_lst = SearchListbox(self.team_frm, height=5) self.team2_lst = SearchListbox(self.team_frm, height=5) self.init_team_lists() # add/remove buttons self.add_rem_frm = ttk.Frame(self, borderwidth=0) self.team1_add_btn = ttk.Button( self.add_rem_frm, text="-->", command=lambda: self.add_hero(self.team1, self.team1_lst), ) self.team1_rem_btn = ttk.Button( self.add_rem_frm, text="<--", command=lambda: self.remove_hero(self.team1, self.team1_lst), ) self.team2_add_btn = ttk.Button( self.add_rem_frm, text="-->", command=lambda: self.add_hero(self.team2, self.team2_lst), ) self.team2_rem_btn = ttk.Button( self.add_rem_frm, text="<--", command=lambda: self.remove_hero(self.team2, self.team2_lst), ) self.init_add_rem_buttons() # stats list self.stats_frm = ttk.Frame(self, borderwidth=0) self.stats_lbl = ttk.Label(self.stats_frm, text="Counters") self.stats_lst = SearchListbox( self.stats_frm, height=20, width=26, font=("Courier", "10"), ) self.stats_scl = ttk.Scrollbar(self.stats_frm) self.init_stats_list() # controls self.controls_lfrm = ttk.LabelFrame(self, text="Controls") self.show_rb_var = tkinter.StringVar() self.show_team1_rb = ttk.Radiobutton( self.controls_lfrm, text="Radiant", variable=self.show_rb_var, value="team1", ) self.show_team2_rb = ttk.Radiobutton( self.controls_lfrm, text="Dire", variable=self.show_rb_var, value="team2", ) self.show_hero_rb = ttk.Radiobutton( self.controls_lfrm, text="Hero", variable=self.show_rb_var, value="hero", ) self.show_stats_btn = ttk.Button( self.controls_lfrm, text="Show", command=self.show_stats, ) self.reset_teams_btn = ttk.Button( self.controls_lfrm, text="Clear", command=self.clear_teams, ) self.clear_stats_btn = ttk.Button( self.controls_lfrm, text="Wipe", command=self.wipe_stats, ) self.init_controls() def init_hero_list(self): if os.path.isfile("heroes.dat"): with open("heroes.dat", "rb") as f: self.heroes = pickle.load(f) else: self.init_heroes() for name in self.heroes.keys(): self.hero_lst.append(name) self.hero_lst.config(yscrollcommand=self.hero_scl.set) self.hero_scl.config(command=self.hero_lst.yview) hero_lbl = ttk.Label(self.hero_frm, text="Heroes") self.hero_frm.grid(row=0, column=0, rowspan=2, sticky=tkinter.NS) self.hero_lst.grid(row=1, column=0) self.hero_scl.grid(row=1, column=1, sticky=tkinter.NS) hero_lbl.grid(row=0, column=0) def init_team_lists(self): team1_lbl = ttk.Label(self.team_frm, text="Radiant") team2_lbl = ttk.Label(self.team_frm, text="Dire") self.team_frm.grid(row=0, column=2, sticky=tkinter.N) team1_lbl.grid(row=0, column=3) self.team1_lst.grid(row=1, column=3, rowspan=5) self.team_frm.grid_rowconfigure(6, minsize=20) team2_lbl.grid(row=7, column=3) self.team2_lst.grid(row=8, column=3, rowspan=5) def init_add_rem_buttons(self): self.add_rem_frm.grid(row=0, column=1, sticky=tkinter.N) self.add_rem_frm.grid_rowconfigure(0, minsize=40) self.team1_add_btn.grid(row=1) self.team1_rem_btn.grid(row=2) self.team2_add_btn.grid(row=3) self.team2_rem_btn.grid(row=4) def init_stats_list(self): self.stats_lst.config(yscrollcommand=self.stats_scl.set) self.stats_scl.config(command=self.stats_lst.yview) self.stats_frm.grid(row=0, column=3, rowspan=2, sticky=tkinter.NS) self.stats_lst.grid(row=1, column=0) self.stats_scl.grid(row=1, column=1, sticky=tkinter.NS) self.stats_lbl.grid(row=0, column=0) def init_controls(self): self.controls_lfrm.grid_columnconfigure(0, weight=1) self.controls_lfrm.grid_columnconfigure(1, weight=1) self.controls_lfrm.grid_columnconfigure(2, weight=1) self.controls_lfrm.grid(row=1, column=1, columnspan=2, sticky=tkinter.NSEW) self.show_team1_rb.grid(row=0, column=0) self.show_team2_rb.grid(row=0, column=1) self.show_hero_rb.grid(row=0, column=2) self.show_stats_btn.grid(row=1, column=0) self.reset_teams_btn.grid(row=1, column=1) self.clear_stats_btn.grid(row=1, column=2) # team 1 selected by default self.show_team1_rb.invoke() def clear_teams(self): self.team1.reset() self.team2.reset() self.team1_lst.delete(0, tkinter.END) self.team2_lst.delete(0, tkinter.END) # wipe cached stats and fetch fresh stats for heroes on teams def wipe_stats(self): for hero in self.heroes.values(): hero.stats = dict() for hero in self.team1.heroes + self.team2.heroes: self.heroes[hero.name].fetch_stats() self.stats_lst.delete(0, tkinter.END) # initialize hero dict and SearchListbox def init_heroes(self): page = requests.get( "https://www.dotabuff.com/heroes", headers={"user-agent": "Mozilla/5.0"} ) self.hero_lst.delete(0, tkinter.END) self.heroes = dict() for hero_info in re.findall( r'<a href="/heroes/(.+?)">.+?<div class="name">(.+?)</div>', re.search( r'<div class="hero-grid">[\s\S]+</div></footer></section>', page.text ).group(), ): self.heroes[hero_info[1]] = Hero(hero_info[1], hero_info[0]) self.hero_lst.append(hero_info[1]) # unused, has no button; doable by deleting heroes.dat before run def refresh_heroes(self): self.init_heroes() self.wipe_stats() # button action def add_hero(self, team: Team, team_lst): hero: Hero = self.get_selected_hero(self.hero_lst) if hero is not None and team.add_hero(hero): team_lst.append(hero.name) # button action def remove_hero(self, team, team_lst): idx = team_lst.curselection() if not idx: return team.remove_hero(self.heroes[team_lst.get(idx[0])]) team_lst.delete(idx[0]) # get currently selected hero in hero list, fetching stats if necessary def get_selected_hero(self, lst: SearchListbox) -> Hero: idx = lst.curselection() hero: Hero = None # use Optional? do something different? if idx: hero = self.heroes[lst.get(idx[0])] if not hero.stats: hero.fetch_stats() return hero # button action def show_stats(self): if self.show_rb_var.get() == "hero": # can select a hero from full list or teams for lst in [self.hero_lst, self.team1_lst, self.team2_lst]: hero: Hero = self.get_selected_hero(lst) if hero is not None: self.update_stats_listbox(hero) break else: self.update_stats_listbox(eval(f"self.{self.show_rb_var.get()}")) def update_stats_listbox(self, hero_or_team): # better way to handle hero or team? self.stats_lst.delete(0, tkinter.END) for hero, stat in sorted( hero_or_team.stats.items(), key=lambda item: item[1], reverse=True, ): if isinstance(hero_or_team, Hero) or hero not in hero_or_team.heroes: self.stats_lst.append_stat(f"{hero:20} {stat:+.2f}") self.stats_lst.grid(row=1, column=0) # performed on window close def write_stats(self): with open("heroes.dat", "wb") as f: pickle.dump(self.heroes, f, protocol=pickle.HIGHEST_PROTOCOL) @staticmethod def search(event): if ( event.widget.winfo_class() == "Listbox" and (event.char.isalpha() or event.char == " ") ): event.widget.search(event.char)
2.953125
3
examples/surveillance_edge_constrained.py
agmangas/wotemu
2
12799746
<gh_stars>1-10 import json from docker.types import RestartPolicy from wotemu.enums import NetworkConditions from wotemu.topology.models import (Broker, BuiltinApps, Network, Node, NodeApp, NodeResources, Service, Topology) _ID_1 = "loc1" _ID_2 = "loc2" _THING_ID_DETECTOR = "urn:org:fundacionctic:thing:wotemu:detector" _THING_ID_HISTORIAN = "urn:org:fundacionctic:thing:historian" def _build_detector_cluster( cluster_id, network_edge, num_cameras, broker, camera_resources=None, detector_resources=None): network = Network( name=f"field_{cluster_id}", conditions=NetworkConditions.WIFI) nodes_camera = [ Node( name=f"camera_{cluster_id}_{idx}", app=NodeApp(path=BuiltinApps.CAMERA, http=True), networks=[network], resources=camera_resources) for idx in range(num_cameras) ] camera_hostnames = [ f"{item.name}.{network.name}" for item in nodes_camera ] param_cameras = json.dumps([ {"servient_host": cam_name} for cam_name in camera_hostnames ]) app_detector = NodeApp( path=BuiltinApps.DETECTOR, params={"cameras": param_cameras}, mqtt=True) node_detector = Node( name=f"detector_{cluster_id}", app=app_detector, networks=[network, network_edge], resources=detector_resources, broker=broker, broker_network=network_edge) return nodes_camera, node_detector def topology(): network_edge_1 = Network( name=f"edge_2g_{_ID_1}", conditions=NetworkConditions.GPRS) network_edge_2 = Network( name=f"edge_3g_{_ID_2}", conditions=NetworkConditions.REGULAR_3G) network_cloud_user = Network( name="cloud_user", conditions=NetworkConditions.CABLE) broker = Broker( name=f"broker", networks=[network_edge_1, network_edge_2]) camera_resources = NodeResources( target_cpu_speed=200, mem_limit="256M") detector_resources = NodeResources( target_cpu_speed=600, mem_limit="1G") nodes_camera_1, node_detector_1 = _build_detector_cluster( cluster_id=_ID_1, network_edge=network_edge_1, num_cameras=2, camera_resources=camera_resources, detector_resources=detector_resources, broker=broker) nodes_camera_2, node_detector_2 = _build_detector_cluster( cluster_id=_ID_2, network_edge=network_edge_2, num_cameras=6, camera_resources=camera_resources, detector_resources=detector_resources, broker=broker) mongo = Service( name="mongo", image="mongo:4", restart_policy=RestartPolicy(condition="on-failure")) historian_observed_things = [ { "servient_host": f"{node_detector_1.name}.{network_edge_1.name}", "thing_id": _THING_ID_DETECTOR }, { "servient_host": f"{node_detector_2.name}.{network_edge_2.name}", "thing_id": _THING_ID_DETECTOR } ] historian_app = NodeApp( path=BuiltinApps.MONGO_HISTORIAN, http=True, params={ "mongo_uri": "mongodb://mongo", "observed_things": json.dumps(historian_observed_things) }) node_historian = Node( name="cloud", app=historian_app, networks=[network_edge_1, network_edge_2, network_cloud_user]) node_historian.link_service(mongo) user_app = NodeApp( path=BuiltinApps.CALLER, params={ "servient_host": f"{node_historian.name}.{network_cloud_user.name}", "thing_id": _THING_ID_HISTORIAN, "params": json.dumps({"write": None, "list": None}), "lambd": 5 }) node_user = Node( name="user", app=user_app, networks=[network_cloud_user], scale=5) topology = Topology(nodes=[ *nodes_camera_1, node_detector_1, *nodes_camera_2, node_detector_2, node_historian, node_user ]) return topology
2.015625
2
bulletin/tools/plugins/api/job/views.py
rerb/django-bulletin
5
12799747
<gh_stars>1-10 from bulletin.api import permissions from bulletin.api.views import PostList, PostDetail import serializers from bulletin.tools.plugins.models import Job class JobList(PostList): queryset = Job.objects.all() serializer_class = serializers.JobSerializer permission_classes = (permissions.IsAdminUserOrReadOnly,) class JobDetail(PostDetail): serializer_class = serializers.JobSerializer permission_classes = (permissions.IsAdminUserOrReadOnly,)
1.984375
2
learn_python/hexlet/lists/lessons/multiply_matrix.py
PavliukKonstantin/learn-python
0
12799748
<reponame>PavliukKonstantin/learn-python # Операция умножения двух матриц А и В представляет собой вычисление # результирующей матрицы С, где каждый элемент C(ij) равен сумме произведений # элементов в соответствующей строке первой матрицы A(ik) и элементов # в столбце второй матрицы B(kj). # # Две матрицы можно перемножать только в том случае, если количество столбцов # в первой матрице совпадает с количеством строк во второй матрице. # Это значит, что первая матрица обязательно должна быть согласованной # со второй матрицей. В результате операции умножения матрицы размера M×N # на матрицу размером N×K является матрица размером M×K. # # src/solution.py # Реализуйте функцию multiply, которая принимает две матрицы и # возвращает новую матрицу — результат их произведения. # # Примеры # >>> from solution import multiply # >>> A = [[1, 2], [3, 2]] # >>> B = [[3, 2], [1, 1]] # >>> multiply(A, B) # [[5, 4], [11, 8]] # >>> # >>> C = [ # ... [2, 5], # ... [6, 7], # ... [1, 8], # ... ] # >>> D = [ # ... [1, 2, 1], # ... [0, 1, 0], # ... ] # >>> multiply(C, D) # [[2, 9, 2], [6, 19, 6], [1, 10, 1]] def multiply(matrix1, matrix2): t_matrix2 = list(map(list, zip(*matrix2))) result = [[0 for _ in range(len(t_matrix2))] for _ in range(len(matrix1))] for i in range(len(matrix1)): for j in range(len(t_matrix2)): for k in range(len(matrix1[0])): result[i][j] += matrix1[i][k] * t_matrix2[j][k] return result C = [ [2, 5], [6, 7], [1, 8], ] D = [ [1, 2, 1], [0, 1, 0], ] print(multiply(C, D)) def test_multiply(): assert multiply( [[2]], [[3]], ) == [[6]] a = [ [1], [2], ] b = [ [10, 20], ] assert multiply(a, b) == [ [10, 20], [20, 40], ] a = [ [1, 2, 1], [0, 1, 0], [2, 3, 4], ] b = [ [2, 5], [6, 7], [1, 8], ] assert multiply(a, b) == [ [15, 27], [6, 7], [26, 63], ] test_multiply()
2.9375
3
generative/pix2sketch.py
judithfan/pix2svg
3
12799749
from __future__ import division from __future__ import print_function from __future__ import absolute_import if __name__ == '__main__': import os import argparse from PIL import Image import torch import torchvision.transforms as transforms from torch.autograd import Variable from beamsearch import SemanticBeamSearch parser = argparse.ArgumentParser(description="generate sketches") parser.add_argument('--image_path', type=str, help='path to image file') parser.add_argument('--distract_dir', type=str, help='directory to distractor image files') parser.add_argument('--sketch_dir', type=str, help='directory to store sketches') parser.add_argument('--n_samples', type=int, default=5, help='number of samples per iteration') parser.add_argument('--n_iters', type=int, default=20, help='number of iterations') parser.add_argument('--stdev', type=float, default=15.0, help='standard deviation for Gaussian when sampling') parser.add_argument('--patience', type=int, default=5, help='once the informativity measure stops improving, wait N epochs before quitting') parser.add_argument('--beam_width', type=int, default=2, help='number of particles to preserve at each timestep') parser.add_argument('--embedding_layer', type=int, default=-1, help='-1|0|1|...|7|8') parser.add_argument('--embedding_net', type=str, default='vgg19', help='vgg19|resnet152') parser.add_argument('--distance_fn', type=str, default='cosine', help='cosine|l1|l2') parser.add_argument('--fuzz', type=float, default=1.0, help='hyperparameter for line rendering') args = parser.parse_args() # prep images natural = Image.open(args.image_path) distractors = [] for i in os.listdir(args.distract_dir): distractor_path = os.path.join(args.distract_dir, i) distractor = Image.open(distractor_path) distractors.append(distractor) preprocessing = transforms.Compose([ transforms.Scale(256), transforms.CenterCrop(224), transforms.ToTensor(), transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])]) # grab embeddings for the natural & distractor images natural = Variable(preprocessing(natural).unsqueeze(0)) distractors = Variable(torch.cat([preprocessing(image).unsqueeze(0) for image in distractors])) explorer = SemanticBeamSearch(112, 112, 224, beam_width=args.beam_width, n_samples=args.n_samples, n_iters=args.n_iters, stdev=args.stdev, fuzz=1.0, embedding_net=args.embedding_net, embedding_layer=args.embedding_layer) natural_emb = explorer.vgg19(natural) distractor_embs = explorer.vgg19(distractors) for i in range(args.n_iters): sketch = explorer.train(i, natural_emb, distractor_items=distractor_embs) im = Image.fromarray(sketch) im.save(os.path.join(args.sketch_dir, 'sketch.png'))
2.296875
2
codes/2/4.py
BigShuang/python-introductory-exercises
0
12799750
def get_lcm(a, b): multiple = a * b max_v = a if b > a: max_v = b for i in range(max_v, multiple): if i % a == 0 and i % b == 0: return i return multiple print(get_lcm(6, 8)) print(get_lcm(12, 15))
3.765625
4