max_stars_repo_path
stringlengths 3
269
| max_stars_repo_name
stringlengths 4
119
| max_stars_count
int64 0
191k
| id
stringlengths 1
7
| content
stringlengths 6
1.05M
| score
float64 0.23
5.13
| int_score
int64 0
5
|
---|---|---|---|---|---|---|
src/train/test.py | jiangqn/RNNLM | 1 | 12798251 | <filename>src/train/test.py<gh_stars>1-10
import os
import torch
from torch import nn
from torch.utils.data.dataloader import DataLoader
from src.data_process.dataset import LMDataset
from src.train.eval import eval
from src.utils.constants import PAD_INDEX
from src.utils.logger import Logger
def test(args):
os.environ['CUDA_VISIBLE_DEVICES'] = str(args.gpu)
base_path = os.path.join('./data', args.data)
processed_base_path = os.path.join(base_path, 'processed')
processed_test_path = os.path.join(processed_base_path, 'test.npz')
save_path = os.path.join(processed_base_path, 'rnnlm.pkl')
log_base_path = os.path.join(base_path, 'log')
log_path = os.path.join(log_base_path, 'test_log.txt')
logger = Logger(log_path)
test_data = LMDataset(processed_test_path)
test_loader = DataLoader(
dataset=test_data,
batch_size=args.batch_size,
shuffle=False,
pin_memory=True
)
model = torch.load(save_path)
model = model.cuda()
criterion = nn.CrossEntropyLoss(ignore_index=PAD_INDEX)
test_loss, test_ppl = eval(model, test_loader, criterion)
logger.log('test_loss: %.4f\ttest_ppl: %.4f' % (test_loss, test_ppl)) | 2.03125 | 2 |
database_build/dbpedia_run_log_http.py | derdav3/tf-sparql | 5 | 12798252 | <reponame>derdav3/tf-sparql<filename>database_build/dbpedia_run_log_http.py<gh_stars>1-10
from multiprocessing import Pool
import re, sys, requests, random, json
import time as timesleep
import numpy as np
from tqdm import *
from urlparse import urlparse, parse_qs
import urllib
from datetime import datetime, time
fallback_json = { "head": { "link": [], "vars": ["property", "propertyLabel", "propertyVal", "propertyValLabel"] },
"results": { "distinct": False, "ordered": True, "bindings": [ ] } }
def run_http_request(req):
'''Executes HTTP request to server and returns time
Keyword-args:
req -- sparql query in url formatting
'''
url = 'http://claudio11.ifi.uzh.ch:8890' + req + '&format=json'
t0 = datetime.utcnow()
# make call and measure time taken
resp = requests.get(url)
time1 = (datetime.utcnow() - t0).total_seconds()
return resp, time1
def cleanup_query(query):
'''Cleans log-url into readable sparql query
Keyword-args:
query -- log-url to clean
'''
line_no_tabs = re.sub(r'%09|%0B', '+', query)
line_single_spaces = re.sub(r'\++', '+', line_no_tabs)
line_no_formatting = re.sub(r'%0A|%0D', '', line_single_spaces)
line_noprefix = re.sub(r'.*query=', '', line_no_formatting)
line_noquotes = re.sub(r'"', '', line_noprefix)
line_end_format = re.sub(r'(&.*?)$', '', line_noquotes)
return urllib.unquote_plus(line_end_format.encode('ascii'))
def get_result_size(response):
try:
result_size = len(response['results']['bindings'])
except:
# respJson = fallback_json
result_size = 0
return result_size
def run_log(query_line, last_timestamp):
# open queries and regex for links
url_ = re.findall('"GET (.*?) HTTP', query_line)
last_timestamp_new = datetime.utcnow()
if len(url_) == 1:
request_url = url_[0]
query_times = []
resp = ''
result_size = 0
try:
utcnow = datetime.utcnow()
midnight_utc = datetime.combine(utcnow.date(), time(0))
delta_last_query = (datetime.utcnow() - last_timestamp).total_seconds()
for _ in range(11):
response, exec_time = run_http_request(request_url)
# if exec_time == -1.:
# break
query_times.append(exec_time)
# timesleep.sleep(random.random()*0.1)
last_timestamp_new = datetime.utcnow()
timestamp_query = ((last_timestamp_new - midnight_utc).total_seconds())
respJson = response.json()
result_size = get_result_size(respJson)
except:
exec_time = -1
if exec_time != -1 and len(query_times) == 11: #and result_size > 0:
cold_exec_time = query_times[0]
warm_times = query_times[1:]
warm_mean = np.mean(warm_times, dtype=np.float64)
time_vec = [timestamp_query, delta_last_query]
query_clean = cleanup_query(request_url)
res = str(query_clean + '\t'+ str(time_vec) + '\t' + str(warm_mean) + '\t' + str(cold_exec_time) + '\t' + str(result_size) + '\n')
return (res, last_timestamp_new)
else:
return (-1., last_timestamp_new)
else:
return (-1., last_timestamp_new)
def main():
results = []
log_file = 'database.log'
# with open(log_file) as f:
# #Spawn pool of workers to execute http queries
# pool = Pool()
# results = pool.map_async(run_log, f,1)
# pool.close()
# while not results.ready():
# remaining = results._number_left
# print "Waiting for", remaining, "tasks to complete..."
# sys.stdout.flush()
# time.sleep(10)
with open(log_file) as in_, tqdm(total=40000) as pbar:
count = 0.
last_timestamp = datetime.utcnow()
for l_ in in_:
count += 1
res, last_timestamp = run_log(l_, last_timestamp)
if len(results) > 40000:
break
if count == 19:
count = 0
pbar.update(19)
sys.stdout.flush()
if res != -1.:
results.append(res)
with open(log_file + '-test2', 'a') as out:
for entry in results:
# for entry in results.get():
if entry is not None:
out.write(str(entry))
if __name__ == '__main__':
main()
| 2.234375 | 2 |
setup.py | finnish-heritage-agency/passari-web-ui | 1 | 12798253 | from setuptools import setup, find_packages
NAME = "passari_web_ui"
DESCRIPTION = (
"Web interface for Passari workflow"
)
LONG_DESCRIPTION = DESCRIPTION
AUTHOR = "<NAME>"
AUTHOR_EMAIL = "<EMAIL>"
setup(
name=NAME,
description=DESCRIPTION,
long_description=LONG_DESCRIPTION,
author=AUTHOR,
author_email=AUTHOR_EMAIL,
packages=find_packages("src"),
include_package_data=True,
package_dir={"passari_web_ui": "src/passari_web_ui"},
install_requires=[
"Flask",
"Flask-Security-Too",
"click>=7", "click<8",
"SQLAlchemy",
"psycopg2",
"rq>=1",
"rq-dashboard>=0.6",
"toml",
"bcrypt",
"Flask-SQLAlchemy",
"Flask-WTF",
"flask-talisman",
"arrow"
],
classifiers=[
"Development Status :: 5 - Production/Stable",
"Operating System :: POSIX :: Linux",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.6",
"License :: OSI Approved :: MIT License",
"Framework :: Flask",
],
python_requires=">=3.6",
use_scm_version=True,
command_options={
"build_sphinx": {
"project": ("setup.py", NAME),
"source_dir": ("setup.py", "docs")
}
},
setup_requires=["setuptools_scm", "sphinx", "sphinxcontrib-apidoc"],
extras_require={
"sphinx": ["sphinxcontrib-apidoc"]
}
)
| 1.34375 | 1 |
classification.py | Gusyatnikova/argument-mining-rus | 0 | 12798254 | <reponame>Gusyatnikova/argument-mining-rus
import os
import pickle
import nltk as nltk
from nltk.classify import SklearnClassifier
from sklearn.linear_model import LogisticRegression
from sklearn.naive_bayes import MultinomialNB, BernoulliNB
from data_manager import DataManager
correct_labels = ['Premise', 'Claim', 'MajorClaim']
correct_links = ['Support', 'Attacks']
class Classification:
def __init__(self):
self.divided_args = []
self.divided_links = []
pass
def set_data(self, data):
args = DataManager().filter_labels(data, correct_labels)
links = DataManager().filter_links(data)
self.divided_args = DataManager().divide_sentences(args)
self.divided_links = DataManager().divide_sentences(links)
def get_divided_args(self):
return self.divided_args
def get_divided_links(self):
return self.divided_links
@staticmethod
def save_pickle(classifier, pickle_name):
pickle_dir = 'pickle_files'
filename = os.path.join(pickle_dir, pickle_name)
with open(filename, 'wb') as f:
pickle.dump(classifier, f)
@staticmethod
def getFeatures(words):
content = DataManager().get_content(words)
features = nltk.FreqDist(content).keys()
return features
def set_naivebayes_classifier(self, train_args, train_links):
classifier = nltk.NaiveBayesClassifier.train(train_args)
Classification().save_pickle(classifier, 'args_naivebayes.pickle')
classifier = nltk.NaiveBayesClassifier.train(train_links)
Classification().save_pickle(classifier, 'links_naivebayes.pickle')
pass
def set_sklearn_classifier(self, train_args, train_links):
classifier = SklearnClassifier(MultinomialNB()).train(train_args)
self.save_pickle(classifier, 'args_sklearn.pickle')
classifier = SklearnClassifier(MultinomialNB()).train(train_links)
self.save_pickle(classifier, 'links_sklearn.pickle')
pass
def set_logisticregression_classifier(self, train_args, train_links):
classifier = SklearnClassifier(LogisticRegression()).train(train_args)
self.save_pickle(classifier, 'args_logisticregression.pickle')
classifier = SklearnClassifier(LogisticRegression()).train(train_links)
self.save_pickle(classifier, 'links_logisticregression.pickle')
pass
def train_classifiers(self, arguments_training_set, links_training_set):
self.set_naivebayes_classifier(arguments_training_set, links_training_set)
self.set_sklearn_classifier(arguments_training_set, links_training_set)
self.set_logisticregression_classifier(arguments_training_set, links_training_set)
pass
def load_classifier(self, filename):
classifier_file = open('pickle_files/'+filename, "rb")
classifier = pickle.load(classifier_file, encoding="latin1")
classifier_file.close()
return classifier
| 2.78125 | 3 |
blog/urls.py | minielectron/portfolio | 0 | 12798255 | from django.urls import path
from . import views
app_name="blog" #Works as namespace
urlpatterns = [
path('', views.blogs, name="blog"),
path('<int:blog_id>', views.detail, name="detail")
] | 1.8125 | 2 |
ch16/app.py | rauhaanrizvi/code | 10 | 12798256 | from pyreact import setTitle, useEffect, useState, render, createElement as el
def App():
newTask, setNewTask = useState("")
editTask, setEditTask = useState(None)
taskList, setTaskList = useState([])
taskCount, setTaskCount = useState(0)
taskFilter, setTaskFilter = useState("all")
def handleSubmit(event):
event.preventDefault()
new_list = list(taskList) # Make a copy
if editTask is not None: # In edit mode
taskIndex = new_list.index(editTask) # Get list position
new_list[taskIndex].update({'name': newTask}) # Update name
else: # In add mode
new_list.append({'name': newTask, 'status': False}) # Add new item
setTaskList(new_list) # Update our state
setNewTask("") # Clear the new item value
setEditTask(None) # Clear the edit item value
def handleEdit(task):
setNewTask(task['name']) # Set the new item value
setEditTask(task) # Set the edit item value
def handleDelete(task):
new_list = list(taskList) # Make a copy
new_list.remove(task) # Remove the specified item
setTaskList(new_list) # Update our state
def handleChange(event):
target = event['target']
if target['name'] == 'taskFilter':
setTaskFilter(target['value'])
else:
setNewTask(target['value'])
def handleChangeStatus(event, task):
target = event['target']
new_list = list(taskList) # Make a copy
taskIndex = new_list.index(task) # Get list position
new_list[taskIndex].update({'status': target['checked']}) # Update
setTaskList(new_list) # Update our state
def ListItem(props):
task = props['task']
if taskFilter == "all" or \
(taskFilter == "open" and not task['status']) or \
(taskFilter == "closed" and task['status']):
return el('li', None,
task['name'] + " ",
el('button',
{'type': 'button',
'onClick': lambda: handleDelete(task)
}, "Delete"
),
el('button',
{'type': 'button',
'onClick': lambda: handleEdit(task)
}, "Edit"
),
el('label', {'htmlFor': 'status'}, " Completed:"),
el('input',
{'type': 'checkbox',
'id': 'status',
'onChange': lambda e: handleChangeStatus(e, task),
'checked': task['status']
}
),
)
else:
return None
def ListItems():
return [el(ListItem, {'key': task['name'], 'task': task}) for task in taskList]
def updateCount():
if taskFilter == 'open':
new_list = [task for task in taskList if not task['status']]
elif taskFilter == 'closed':
new_list = [task for task in taskList if task['status']]
else:
new_list = [task for task in taskList]
setTaskCount(len(new_list))
useEffect(lambda: setTitle("ToDo List"), [])
useEffect(updateCount, [taskList, taskFilter])
return el('form', {'onSubmit': handleSubmit},
el('div', None, f"Number of Tasks: {taskCount}"),
el('div', None,
el('label', {'htmlFor': 'all'}, "All Tasks:"),
el('input', {'type': 'radio',
'name': 'taskFilter',
'id': 'all',
'value': 'all',
'onChange': handleChange,
'checked': taskFilter == 'all'
}
),
el('label', {'htmlFor': 'open'}, " Active:"),
el('input', {'type': 'radio',
'name': 'taskFilter',
'id': 'open',
'value': 'open',
'onChange': handleChange,
'checked': taskFilter == 'open'
}
),
el('label', {'htmlFor': 'closed'}, " Completed:"),
el('input', {'type': 'radio',
'name': 'taskFilter',
'id': 'closed',
'value': 'closed',
'onChange': handleChange,
'checked': taskFilter == 'closed'
}
),
),
el('label', {'htmlFor': 'editBox'},
"Edit Task: " if editTask is not None else "Add Task: "
),
el('input', {'id': 'editBox',
'onChange': handleChange,
'value': newTask
}
),
el('input', {'type': 'submit'}),
el('ol', None,
el(ListItems, None)
),
)
render(App, None, 'root')
| 2.765625 | 3 |
setup.py | grantjenks/python-appstore | 1 | 12798257 | from io import open
from setuptools import setup
from setuptools.command.test import test as TestCommand
import appstore
class Tox(TestCommand):
def finalize_options(self):
TestCommand.finalize_options(self)
self.test_args = []
self.test_suite = True
def run_tests(self):
import tox
errno = tox.cmdline(self.test_args)
exit(errno)
with open('README.rst', encoding='utf-8') as reader:
readme = reader.read()
setup(
name='appstore',
version=appstore.__version__,
description='App Store -- user-oriented front-end for pip.',
long_description=readme,
author='<NAME>',
author_email='<EMAIL>',
url='http://www.grantjenks.com/docs/appstore/',
license='Apache 2.0',
packages=['appstore'],
tests_require=['tox'],
cmdclass={'test': Tox},
install_requires=[],
classifiers=(
'Development Status :: 1 - Planning',
'Intended Audience :: Developers',
'License :: OSI Approved :: Apache Software License',
'Natural Language :: English',
'Programming Language :: Python',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: Implementation :: CPython',
),
)
| 1.90625 | 2 |
img_vec.py | janisoteps/imsim1 | 0 | 12798258 | <reponame>janisoteps/imsim1
# from keras.models import Model
# from keras.layers import Flatten, Dense, Input
# from keras.utils.data_utils import get_file
# from keras import backend as K
from keras.layers import Dense
from tensorflow.python.framework import ops
import tensorflow as tf
sess = tf.Session()
from keras import backend as K
K.set_session(sess)
# this placeholder will contain our input digits, as flat vectors
img_features_x = tf.placeholder(tf.float32, shape=(1, 4096))
def loss(x1, x2, y):
# Euclidean distance between x1,x2
l2diff = tf.sqrt( tf.reduce_sum(tf.square(tf.sub(x1, x2)),
reduction_indices=1))
# you can try margin parameters
margin = tf.constant(1.)
labels = tf.to_float(y)
match_loss = tf.square(l2diff, 'match_term')
mismatch_loss = tf.maximum(0., tf.sub(margin, tf.square(l2diff)), 'mismatch_term')
# if label is 1, only match_loss will count, otherwise mismatch_loss
loss = tf.add(tf.mul(labels, match_loss), tf.mul((1 - labels), mismatch_loss), 'loss_add')
loss_mean = tf.reduce_mean(loss)
return loss_mean
def initialize_parameters():
"""
Initializes parameters to build a neural network with tensorflow. The shapes are:
W1 : [25, 12288]
b1 : [25, 1]
W2 : [12, 25]
b2 : [12, 1]
W3 : [6, 12]
b3 : [6, 1]
Returns:
parameters -- a dictionary of tensors containing W1, b1, W2, b2, W3, b3
"""
tf.set_random_seed(1) # so that your "random" numbers match ours
### START CODE HERE ### (approx. 6 lines of code)
W1 = tf.get_variable("W1", [25, 12288], initializer=tf.contrib.layers.xavier_initializer(seed=1))
b1 = tf.get_variable("b1", [25, 1], initializer=tf.zeros_initializer())
W2 = tf.get_variable("W2", [12, 25], initializer=tf.contrib.layers.xavier_initializer(seed=1))
b2 = tf.get_variable("b2", [12, 1], initializer=tf.zeros_initializer())
W3 = tf.get_variable("W3", [6, 12], initializer=tf.contrib.layers.xavier_initializer(seed=1))
b3 = tf.get_variable("b3", [6, 1], initializer=tf.zeros_initializer())
### END CODE HERE ###
parameters = {"W1": W1,
"b1": b1,
"W2": W2,
"b2": b2,
"W3": W3,
"b3": b3}
return parameters
def forward_propagation(X, parameters):
"""
Implements the forward propagation for the model: LINEAR -> RELU -> LINEAR -> RELU -> LINEAR -> SOFTMAX
Arguments:
X -- input dataset placeholder, of shape (input size, number of examples)
parameters -- python dictionary containing your parameters "W1", "b1", "W2", "b2", "W3", "b3"
the shapes are given in initialize_parameters
Returns:
Z3 -- the output of the last LINEAR unit
"""
# Retrieve the parameters from the dictionary "parameters"
W1 = parameters['W1']
b1 = parameters['b1']
W2 = parameters['W2']
b2 = parameters['b2']
W3 = parameters['W3']
b3 = parameters['b3']
### START CODE HERE ### (approx. 5 lines) # Numpy Equivalents:
Z1 = tf.add(tf.matmul(W1, tf.cast(X, tf.float32)), b1) # Z1 = np.dot(W1, X) + b1
A1 = tf.nn.relu(Z1) # A1 = relu(Z1)
Z2 = tf.add(tf.matmul(W2, A1), b2) # Z2 = np.dot(W2, a1) + b2
A2 = tf.nn.relu(Z2) # A2 = relu(Z2)
Z3 = tf.add(tf.matmul(W3, A2), b3) # Z3 = np.dot(W3,Z2) + b3
### END CODE HERE ###
return Z3
def model(X_train, Y_train, X_test, Y_test, learning_rate=0.0001,
num_epochs=1500, minibatch_size=32, print_cost=True):
"""
Implements a three-layer tensorflow neural network: LINEAR->RELU->LINEAR->RELU->LINEAR->SOFTMAX.
Arguments:
X_train -- training set, of shape (input size = 12288, number of training examples = 1080)
Y_train -- test set, of shape (output size = 6, number of training examples = 1080)
X_test -- training set, of shape (input size = 12288, number of training examples = 120)
Y_test -- test set, of shape (output size = 6, number of test examples = 120)
learning_rate -- learning rate of the optimization
num_epochs -- number of epochs of the optimization loop
minibatch_size -- size of a minibatch
print_cost -- True to print the cost every 100 epochs
Returns:
parameters -- parameters learnt by the model. They can then be used to predict.
"""
ops.reset_default_graph() # to be able to rerun the model without overwriting tf variables
tf.set_random_seed(1) # to keep consistent results
seed = 3 # to keep consistent results
(n_x, m) = X_train.shape # (n_x: input size, m : number of examples in the train set)
n_y = Y_train.shape[0] # n_y : output size
costs = [] # To keep track of the cost
# Create Placeholders of shape (n_x, n_y)
### START CODE HERE ### (1 line)
X, Y = create_placeholders(n_x, n_y)
### END CODE HERE ###
# Initialize parameters
### START CODE HERE ### (1 line)
parameters = initialize_parameters()
### END CODE HERE ###
# Forward propagation: Build the forward propagation in the tensorflow graph
### START CODE HERE ### (1 line)
Z3 = forward_propagation(X, parameters)
### END CODE HERE ###
# Cost function: Add cost function to tensorflow graph
### START CODE HERE ### (1 line)
cost = compute_cost(Z3, Y)
### END CODE HERE ###
# Backpropagation: Define the tensorflow optimizer. Use an AdamOptimizer.
### START CODE HERE ### (1 line)
optimizer = tf.train.GradientDescentOptimizer(learning_rate=learning_rate).minimize(cost)
### END CODE HERE ###
# Initialize all the variables
init = tf.global_variables_initializer()
# Start the session to compute the tensorflow graph
with tf.Session() as sess:
# Run the initialization
sess.run(init)
# Do the training loop
for epoch in range(num_epochs):
epoch_cost = 0. # Defines a cost related to an epoch
num_minibatches = int(m / minibatch_size) # number of minibatches of size minibatch_size in the train set
seed = seed + 1
minibatches = random_mini_batches(X_train, Y_train, minibatch_size, seed)
for minibatch in minibatches:
# Select a minibatch
(minibatch_X, minibatch_Y) = minibatch
# IMPORTANT: The line that runs the graph on a minibatch.
# Run the session to execute the "optimizer" and the "cost", the feedict should contain a minibatch for (X,Y).
### START CODE HERE ### (1 line)
_, minibatch_cost = sess.run([optimizer, cost], feed_dict={X: minibatch_X, Y: minibatch_Y})
### END CODE HERE ###
epoch_cost += minibatch_cost / num_minibatches
# Print the cost every epoch
if print_cost == True and epoch % 100 == 0:
print("Cost after epoch %i: %f" % (epoch, epoch_cost))
if print_cost == True and epoch % 5 == 0:
costs.append(epoch_cost)
# lets save the parameters in a variable
parameters = sess.run(parameters)
print("Parameters have been trained!")
# Calculate the correct predictions
correct_prediction = tf.equal(tf.argmax(Z3), tf.argmax(Y))
# Calculate accuracy on the test set
accuracy = tf.reduce_mean(tf.cast(correct_prediction, "float"))
print("Train Accuracy:", accuracy.eval({X: X_train, Y: Y_train}))
print("Test Accuracy:", accuracy.eval({X: X_test, Y: Y_test}))
return parameters
loss_ = loss(x1_, x2_, y_)
optimizer = tf.train.AdamOptimizer(learning_rate=0.1).minimize(loss_)
# batchsize = 4
# x1 = np.random.rand(batchsize, dim)
# x2 = np.random.rand(batchsize, dim)
# y = np.array([0,1,1,0])
#
# l = sess.run(loss_, feed_dict={x1_:x1, x2_:x2, y_:y})
# sess.run((optimizer, cost), feed_dict = {X: minibatch_X, Y: minibatch_Y})
# Initialize all variables
init_op = tf.global_variables_initializer()
sess.run(init_op)
# Run training loop
with sess.as_default():
optimizer.run(feed_dict={x1_:x1, x2_:x2, y_:y}) | 2.875 | 3 |
thanks/package_tools.py | vsprogrammer2909/thanks | 168 | 12798259 | <filename>thanks/package_tools.py
from functools import reduce
from itertools import chain, takewhile
import os
import pkg_resources
import re
class MetaDataNotFound(Exception):
pass
def get_local_dist(package_name):
working_set = dict(
(dist.project_name, dist) for dist in pkg_resources.WorkingSet()
)
return working_set[package_name]
def get_dist_metadata(dist):
metadata_path = get_local_dist_metadata_filepath(dist)
with open(metadata_path) as fh:
metadata = parse_metadata(fh.read())
return metadata
def get_funding_data(metadata):
return metadata.get('funding_url')
def get_local_dist_metadata_filepath(dist):
# Dist filename syntax
# name ["-" version ["-py" pyver ["-" required_platform]]] "." ext
# https://setuptools.readthedocs.io/en/latest/formats.html#filename-embedded-metadata
def valid_component(component):
return component[1]
# Stop taking filename components at the first missing/invalid component
filename_component = takewhile(valid_component, (
('', pkg_resources.to_filename(pkg_resources.safe_name(dist.project_name))),
('-', pkg_resources.to_filename(pkg_resources.safe_version(dist.version))),
('-py', dist.py_version),
('-', dist.platform),
))
filename = ''.join(chain(*filename_component))
if isinstance(dist, pkg_resources.EggInfoDistribution):
ext = 'egg-info'
metadata_file = 'PKG-INFO'
elif isinstance(dist, pkg_resources.DistInfoDistribution):
ext = 'dist-info'
metadata_file = 'METADATA'
elif isinstance(dist, pkg_resources.Distribution):
ext = os.path.join('egg', 'EGG-INFO')
metadata_file = 'PKG-INFO'
else:
ext = None
metadata_file = None
filename = '{}.{}'.format(filename, ext)
path = os.path.join(dist.location, filename, metadata_file)
if ext:
return path
else:
return None
metadata_patterns = re.compile(r"""
(\s*Author:\s+(?P<author>.*)\s*)? # Author
(\s*Maintainer:\s+(?P<maintainer>.+)\s*)? # Maintainer
(\s*Project-URL:\sFunding,\s+(?P<funding_url>.+)\s*)? # Funding URL
""", re.VERBOSE)
def get_line_metadata(line):
return metadata_patterns.search(line).groupdict()
def filter_empty_metadata(metadata):
return dict((k, v) for k, v in metadata.items() if v)
def parse_metadata(metadata):
metadata = (
filter_empty_metadata(get_line_metadata(line))
for line in metadata.splitlines()
)
metadata = [m for m in metadata if m]
metadata = reduce(
lambda x, y: dict((k, v) for k, v in chain(x.items(), y.items())),
metadata,
{},
)
return metadata
def get_local_metadata(package_name):
try:
dist = get_local_dist(package_name)
metadata = get_dist_metadata(dist)
except FileNotFoundError:
# No metadata.json file locally
raise MetaDataNotFound()
return metadata
def get_local_funding_metadata(package_name):
try:
metadata = get_local_metadata(package_name)
funding_url = get_funding_data(metadata)
except KeyError:
# Package not available locally,
# or there isn't a 'Funding' entry in the project_urls
raise MetaDataNotFound()
return funding_url
| 2.234375 | 2 |
kubernetes_manager/models/base.py | breimers/Django-Kubernetes-Manager | 13 | 12798260 | <reponame>breimers/Django-Kubernetes-Manager
import json
from tempfile import NamedTemporaryFile
from uuid import uuid4
from django.contrib.postgres.fields import JSONField
from django.db import models
from kubernetes import client, config
class KubernetesBase(models.Model):
"""
KubernetesBase
:type: model (abstract)
:description: Base parent model that all subsequent models inherit from.
:inherits: django_extensions.db.models.TitleSlugDescriptionModel
:fields: id, cluster, config, deployed, deleted
"""
id = models.UUIDField(default=uuid4, editable=False, primary_key=True, help_text="UUID Auto field.")
title = models.CharField(max_length=128)
cluster = models.ForeignKey("TargetCluster", on_delete=models.SET_NULL, null=True, help_text="ForeignKey to TargetCluster object.")
config = JSONField(default=dict, null=True, blank=True, help_text="Pass in extra parameters here.")
deployed = models.DateTimeField(null=True, blank=True, help_text="Time when object is applied to cluster.")
removed = models.DateTimeField(null=True, blank=True, help_text="Time when object is removed from cluster.")
class Meta:
abstract = True
def slugify_function(self):
"""
:description: Overrides default slugify with custom logic.
"""
return self.title.replace("_", "-").replace(" ", "-").lower()
@property
def slug(self):
return self.slugify_function()
def get_client(self, API=client.CoreV1Api, **kwargs):
"""Gets a k8s api client
Args:
API (client.<type>) - Kubernetes Client Type
Returns:
object of type <API>
"""
if "persist_config" not in kwargs:
kwargs["persist_config"] = False
with NamedTemporaryFile() as ntf:
kwargs["config_file"] = ntf.name
cc = json.dumps(self.cluster.config) if isinstance(self.cluster.config, dict) else self.cluster.config
with open(ntf.name, "w") as f:
f.write(cc)
return API(api_client=config.new_client_from_config(config_file=ntf.name))
class KubernetesMetadataObjBase(KubernetesBase):
"""
KubernetesMetadataObjBase
:type: model (abstract)
:description: Extends KubernetesBase to include metadata fields.
:inherits: kubernetes_manager.models.base.KubernetesBase
:fields: labels, annotations
"""
labels = JSONField(default=dict, help_text="Dictionary store equivalent to Labels in Kubernetes API")
annotations = JSONField(default=dict, null=True, blank=True, help_text="Dictionary store equivalent to Annotations in Kubernetes API")
class Meta:
abstract = True
class KubernetesNetworkingBase(KubernetesMetadataObjBase):
"""
KubernetesNetworkingBase
:type: model (abstract)
:description: Extends KubernetesMetadataObjBase to include network fields.
:inherits: kubernetes_manager.models.base.KubernetesMetadataObjBase
:fields: labels, annotations
"""
api_version = models.CharField(max_length=16, default="v1", help_text="API version used to deploy child object.")
kind = models.CharField(max_length=16, help_text="String representation of Kubernetes object kind")
port = models.IntegerField(default=80, help_text="Port object will expose")
namespace = models.ForeignKey("KubernetesNamespace", on_delete=models.CASCADE, help_text="Live namespace the object is associated with.")
kuid = models.CharField(max_length=48, null=True, blank=True, help_text="Object's UID in the cluster")
class Meta:
abstract = True
| 1.953125 | 2 |
demos/video_demo.py | ALEXKIRNAS/tensorflow-fast-style-transfer | 0 | 12798261 | <reponame>ALEXKIRNAS/tensorflow-fast-style-transfer
from moviepy.video.io.ffmpeg_reader import FFMPEG_VideoReader
from tqdm import tqdm
import cv2
from utils.demo_utils import StyleTransferDemo
import numpy as np
from typing import List
import click
def get_frames(video_path: str) -> List[np.ndarray]:
"""
Load frames from video.
:param video_path: path to video.
:return: loaded frames.
"""
video_reader = FFMPEG_VideoReader(video_path)
frames = []
for _ in tqdm(range(video_reader.nframes),
desc='Getting video frames'):
frames.append(video_reader.read_frame())
return frames
def generate_video_by_frames(path: str, frames: List[np.ndarray]):
"""
Generate video file by frames sequence.
:param path: path where to store resulting video.
:param frames: frames sequence.
"""
(height, width, _) = frames[0].shape
video = cv2.VideoWriter(path, -1, 30, (width, height))
for image in tqdm(frames, desc='Writing video'):
video.write(image)
video.release()
def combine_frames(left_frames: List[np.ndarray],
right_frames: List[np.ndarray]) -> List[np.ndarray]:
"""
Combine two sequences of frames into one by concatenating them.
:param left_frames: left side sequence.
:param right_frames: right side sequence.
:return: concatenated sequence.
"""
if len(left_frames) != len(right_frames):
raise ValueError('Sequences of frames must be same length!')
combined_frames = []
for left_frame, right_frame in zip(left_frames, right_frames):
combined_frame = np.concatenate([left_frame, right_frame], axis=1)
combined_frames.append(combined_frame)
return combined_frames
@click.command()
@click.option('--video_path',
help='Path to video that need to process.',
default='../data/videos/Africa.mp4')
@click.option('--result_path',
help='Path to file where to store results.',
default='../data/videos/Africa_styled.mp4')
@click.option('--model_path',
help='Path to model protobuf.',
default='../model/optimized_model.pb')
@click.option('--image_size',
help='Output image size.',
default='360,640')
@click.option('--batch_size',
help='Batch size.',
default='1')
def video_demo(video_path: str,
result_path: str,
model_path: str,
image_size: str,
batch_size: str):
image_size = [int(size) for size in image_size.split(',')]
batch_size = int(batch_size)
transformer = StyleTransferDemo(
model_path=model_path,
input_shape=image_size,
scope='style_transfer_cnn'
)
original_frames = get_frames(video_path=video_path)
original_frames = [
cv2.resize(frame, dsize=(image_size[1], image_size[0]))
for frame in original_frames
]
counter = tqdm(original_frames, desc='Processing frames')
num_frames = len(original_frames)
num_batches = num_frames // batch_size
num_batches += int(num_batches % batch_size != 0)
styled_frames = []
for i in range(num_batches):
begin = i * batch_size
end = min((i + 1) * batch_size, num_frames)
curr_frames = np.array(original_frames[begin:end])
out_frames = transformer(curr_frames)
if batch_size != 1:
styled_frames.extend(out_frames)
else:
styled_frames.append(out_frames)
counter.update(n=(end - begin))
resulting_images = combine_frames(
left_frames=original_frames,
right_frames=styled_frames
)
generate_video_by_frames(result_path, frames=resulting_images)
if __name__ == '__main__':
video_demo()
| 3.03125 | 3 |
logs/main.py | akshitdewan/cs61a-apps | 5 | 12798262 | <reponame>akshitdewan/cs61a-apps
from html import escape
from json import loads
from flask import Flask, abort
from common.oauth_client import create_oauth_client, get_user, is_staff, login
from common.shell_utils import sh
from common.url_for import url_for
from common.rpc.auth import is_admin
app = Flask(__name__, static_folder="", static_url_path="")
if __name__ == "__main__":
app.debug = True
create_oauth_client(app, "61a-logs")
@app.route("/")
def index():
if not is_staff("cs61a"):
return login()
email = get_user()["email"]
if not is_admin(course="cs61a", email=email):
abort(401)
service_list = "\n".join(
f"<p /><a href={url_for('create_secret', service=service)}>{service}</a>"
for service in list_services()
)
return f"""
<h1>Log Viewer</h1>
{service_list}
"""
@app.route("/service/<service>")
def create_secret(service):
if not is_staff("cs61a"):
return login()
email = get_user()["email"]
if not is_admin(course="cs61a", email=email):
abort(401)
if service not in list_services():
abort(404)
out = reversed(
[
entry["timestamp"] + " " + escape(entry["textPayload"])
for entry in loads(
sh(
"gcloud",
"logging",
"read",
f"projects/cs61a-140900/logs/run.googleapis.com AND resource.labels.service_name={service}",
"--limit",
"100",
"--format",
"json",
capture_output=True,
)
)
if "textPayload" in entry
]
)
return "<pre>" + "\n".join(map(str, out)) + "</pre>"
def list_services():
"""Returns the list of services from Google Cloud Run necessary to access app logs
:return: list of services
"""
return [
service["metadata"]["name"]
for service in loads(
sh(
"gcloud",
"run",
"services",
"list",
"--platform",
"managed",
"--region",
"us-west1",
"--format",
"json",
"-q",
capture_output=True,
)
)
]
if __name__ == "__main__":
app.run(debug=True)
| 2.25 | 2 |
nb_train_iib.py | icrdr/3D-UNet-Renal-Anatomy-Extraction | 0 | 12798263 | # %%
from trainer import Trainer
from network import ResUnet3D, ResAttrUnet3D, ResAttrUnet3D2, ResAttrBNUnet3D
from loss import Dice, HybirdLoss, DiceLoss, FocalLoss
from data import CaseDataset
from torchvision.transforms import Compose
from transform import Crop, RandomCrop, ToTensor, CombineLabels, \
RandomBrightness, RandomContrast, RandomGamma, \
RandomRescale, RandomRescaleCrop, RandomMirror
from torch.optim import Adam
from torch.optim.lr_scheduler import ReduceLROnPlateau
from datetime import datetime
model = ResUnet3D(out_channels=3).cuda()
optimizer = Adam(model.parameters(), lr=1e-4)
loss = HybirdLoss(weight_v=[1, 148, 191], alpha=0.9, beta=0.1)
metrics = {'dsc': DiceLoss(weight_v=[1, 148, 191], alpha=0.9, beta=0.1),
'focal': FocalLoss(weight_v=[1, 148, 191]),
'a_dsc': Dice(weight_v=[0, 1, 0]),
'v_dsc': Dice(weight_v=[0, 0, 1])}
scheduler = ReduceLROnPlateau(optimizer, factor=0.2, patience=25)
dataset = CaseDataset('data/Task20_Kidney/vessel_region_norm')
patch_size = (128, 128, 128)
train_transform = Compose([
RandomRescaleCrop(0.1,
patch_size,
crop_mode='random'),
RandomMirror((0.5, 0.5, 0.5)),
RandomContrast(0.1),
RandomBrightness(0.1),
RandomGamma(0.1),
ToTensor()
])
valid_transform = Compose([
RandomCrop(patch_size),
ToTensor()
])
# ckpt = torch.load('logs/Task20_Kidney/av-loss-last.pt')
# model.load_state_dict(ckpt['model_state_dict'])
# optimizer.load_state_dict(ckpt['optimizer_state_dict'])
trainer = Trainer(
model=model,
optimizer=optimizer,
loss=loss,
metrics=metrics,
dataset=dataset,
scheduler=scheduler,
train_transform=train_transform,
valid_transform=valid_transform,
batch_size=2,
valid_split=0.0,
num_samples=200,
)
# %%
save_dir = "logs/DOC/iib-H-09-{}".format(datetime.now().strftime("%y%m%d%H%M"))
save_dir = 'logs/DOC/iib-H-09-2006150257'
trainer.load_checkpoint('logs/DOC/iib-H-09-2006150257-last.pt')
trainer.fit(
num_epochs=800,
use_amp=True,
save_dir=save_dir
)
# %%
| 1.585938 | 2 |
features/steps/levenshtein_steps.py | clibc/howabout | 2 | 12798264 | <filename>features/steps/levenshtein_steps.py
import random
from behave import given, when, then
from howabout import get_levenshtein
@given('two long strings')
def step_two_long_strings(context):
alphabet = 'abcdefghijklmnopqrstuvwxyz'
random_str = lambda size: [random.choice(alphabet) for _ in range(0, size)]
context.first = random_str(1024)
context.second = random_str(1024)
@given('two empty strings')
def step_two_empty_strings(context):
context.first = ''
context.second = ''
@when('we compare them')
def step_compare_two_strings(context):
context.distance = get_levenshtein(context.first, context.second)
@then('the interpreter should not overflow')
def step_assert_no_overflow(context):
assert not context.failed
@given('"{string}" and the empty string')
def step_a_string_and_the_emtpy_string(context, string):
context.first = string
context.second = ''
@given('a string "{string}"')
def step_a_string(context, string):
context.first = string
@when('we compare it to itself')
def step_compare_string_to_itself(context):
string = context.first, context.first
context.distance = get_levenshtein(string, string)
@then('the distance is {distance:d}')
def step_assert_distance(context, distance):
assert context.distance == distance
@given('the first string "{first}" and the second string "{second}" starting with "{prefix}"')
def step_impl2(context, first, second, prefix):
"""
:type context behave.runner.Context
:type first str
:type second str
:type prefix str
"""
context.first = first
context.second = second | 3.046875 | 3 |
apis/custom_errors.py | gusibi/Metis | 84 | 12798265 | <reponame>gusibi/Metis
# -*- coding: utf-8 -*-
###
### DO NOT CHANGE THIS FILE
###
### The code is auto generated, your change will be overwritten by
### code generating.
###
from sanic.exceptions import SanicException
def add_status_code(code):
"""
Decorator used for adding exceptions to _sanic_exceptions.
"""
def class_decorator(cls):
cls.status_code = code
return cls
return class_decorator
class JSONException(SanicException):
def __init__(self, code, message=None, errors=None, status_code=None):
super().__init__(message)
self.error_code = code
self.message = message
self.errors = errors
if status_code is not None:
self.status_code = status_code
@add_status_code(422)
class UnprocessableEntity(JSONException):
pass
@add_status_code(401)
class Unauthorized(JSONException):
pass
@add_status_code(403)
class Forbidden(JSONException):
pass
@add_status_code(500)
class ServerError(JSONException):
pass | 1.96875 | 2 |
telewavesim/utils.py | mtoqeerpk/Telewavesim | 0 | 12798266 | <filename>telewavesim/utils.py
# Copyright 2019 <NAME>
# This file is part of Telewavesim.
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
'''
Utility functions to interact with ``telewavesim`` modules.
'''
import sys
import itertools
import numpy as np
import pyfftw
from scipy.signal import hilbert
from obspy.core import Trace, Stream
from obspy.signal.rotate import rotate_ne_rt
from telewavesim import conf as cf
from telewavesim import elast as es
from telewavesim.rmat_f import conf as cf_f
from telewavesim.rmat_f import plane as pw_f
def set_iso_tensor(a, b):
"""
Function to generate tensor for isotropic material.
Args:
a (float): P-wave velocity (km/s)
b (float): S-wave velocity (km/s)
Returns:
(np.ndarray): cc: Elastic tensor (GPa /density) \
(shape ``(3, 3, 3, 3)``)
"""
a = a*1.e3
b = b*1.e3
C = es.iso_tensor(a, b)
# Convert Voigt to full tensor
cc = voigt2cc(C)
return cc
def set_tri_tensor(a, b, tr, pl, ani):
"""
Function to generate tensor for transverse isotropy. The
tensor is rotated using the trend and plunge of the symmetry
axis.
Args:
a (float): P-wave velocity (km/s)
b (float): S-wave velocity (km/s)
tr (float): Trend angle of symmetry axis (degree)
pl (float): Plunge angle of symmetry axis (degree)
ani (float): Percent anisotropy
Returns:
(np.ndarray): cc: Elastic tensor (GPa /density) \
(shape ``(3, 3, 3, 3)``)
"""
# Trend and plunge of symmetry axis
tr = -tr*np.pi/180.
pl = (90. - pl)*np.pi/180.
# Percent anisotropy
da = (a*1.e3)*ani/100.
db = (b*1.e3)*ani/100.
# Set up matrix elements
AA = (a*1.e3 - da/2.)**2
CC = (a*1.e3 + da/2.)**2
LL = (b*1.e3 + db/2.)**2
NN = (b*1.e3 - db/2.)**2
AC = (a*1.e3)**2
FF = -LL + np.sqrt((2.*AC)**2 - 2.*AC*(AA + CC + 2.*LL) +
(AA + LL)*(CC + LL))
# eta = FF/(AA - 2.*LL)
# Get tensor with horizontal axis
cc = es.tri_tensor(AA, CC, FF, LL, NN)
# Rotate tensor using trend and plunge
cc = rot_tensor(cc, pl, tr, 0.)
# Return tensor
return cc
def set_aniso_tensor(tr, pl, typ='atg'):
"""
Function to generate tensor for anisotropic minerals. The \
tensor is rotated using the trend and plunge of the symmetry \
axis.
Args:
tr (float): Trend angle of symmetry axis (degree)
pl (float): Plunge angle of symmetry axis (degree)
type (str, optional): Type of elastic material
Returns:
(tuple): Tuple containing:
* cc (np.ndarray): Elastic tensor (GPa /density)\
(shape ``(3, 3, 3, 3)``)
* rho (float): Density (kg/m^3)
"""
# Trend and plunge of symmetry axis
tr = -tr*np.pi/180.
pl = (90. - pl)*np.pi/180.
# Get tensor with horizontal axis
# Minerals
if typ=='atg':
C, rho = es.antigorite()
elif typ=='bt':
C, rho = es.biotite()
elif typ=='cpx':
C, rho = es.clinopyroxene_92()
elif typ=='dol':
C, rho = es.dolomite()
elif typ=='ep':
C, rho = es.epidote()
elif typ=='grt':
C, rho = es.garnet()
elif typ=='gln':
C, rho = es.glaucophane()
elif typ=='hbl':
C, rho = es.hornblende()
elif typ=='jade':
C, rho = es.jadeite()
elif typ=='lws':
C, rho = es.lawsonite()
elif typ=='lz':
C, rho = es.lizardite()
elif typ=='ms':
C, rho = es.muscovite()
elif typ=='ol':
C, rho = es.olivine()
elif typ=='opx':
C, rho = es.orthopyroxene()
elif typ=='plag':
C, rho = es.plagioclase_06()
elif typ=='qtz':
C, rho = es.quartz()
elif typ=='zo':
C, rho = es.zoisite()
# Rocks
elif typ=='BS_f':
C, rho = es.blueschist_felsic()
elif typ=='BS_m':
C, rho = es.blueschist_mafic()
elif typ=='EC_f':
C, rho = es.eclogite_foliated()
elif typ=='EC_m':
C, rho = es.eclogite_massive()
elif typ=='HB':
C, rho = es.harzburgite()
elif typ=='SP_37':
C, rho = es.serpentinite_37()
elif typ=='SP_80':
C, rho = es.serpentinite_80()
elif typ=='LHZ':
C, rho = es.lherzolite()
else:
print('type of mineral/rock not implemented')
return
# Convert Voigt to full tensor
cc = voigt2cc(C)*1.e9/rho
# Rotate tensor using trend and plunge
cc = rot_tensor(cc, pl, tr, 0.)
# Return tensor
return cc, rho
def full_3x3_to_Voigt_6_index(i, j):
"""
Conversion of tensor to Voigt notation for indices
"""
if i == j:
return i
return 6-i-j
def voigt2cc(C):
"""
Convert the Voigt representation of the stiffness matrix to the full
3x3x3x3 tensor representation.
Args:
C (np.ndarray): Stiffness matrix (shape ``(6, 6)``)
Returns:
(np.ndarray): cc: Elastic tensor (shape ``(3, 3, 3, 3)``)
"""
C = np.asarray(C)
cc = np.zeros((3,3,3,3), dtype=float)
for i, j, k, l in itertools.product(range(3), range(3), range(3), range(3)):
Voigt_i = full_3x3_to_Voigt_6_index(i, j)
Voigt_j = full_3x3_to_Voigt_6_index(k, l)
cc[i, j, k, l] = C[Voigt_i, Voigt_j]
return cc
def cc2voigt(cc):
"""
Convert from the full 3x3x3x3 tensor representation
to the Voigt notation of the stiffness matrix.
Args:
cc (np.ndarray): Elastic tensor (shape ``(3, 3, 3, 3)``)
Returns:
(np.ndarray): C: Stiffness matrix (shape ``(6, 6)``)
"""
Voigt_notation = [(0, 0), (1, 1), (2, 2), (1, 2), (0, 2), (0, 1)]
tol = 1e-3
cc = np.asarray(cc)
C = np.zeros((6,6))
for i in range(6):
for j in range(6):
k, l = Voigt_notation[i]
m, n = Voigt_notation[j]
C[i,j] = cc[k,l,m,n]
return C
def VRH_average(C):
"""
Performs a Voigt-Reuss-Hill average of the anisotropic
stifness matrix to the bulk modulus K and the shear modulus
G.
Args:
C (np.ndarray): Stiffness matrix (shape ``(6, 6)``)
Returns:
(tuple): Tuple containing:
* Kvoigt (float): Voigt average bulk modulus (GPa)
* Gvoigt (float): Voigt average shear modulus (GPa)
* Kreuss (float): Reuss average bulk modulus (GPa)
* Greuss (float): Reuss average shear modulus (GPa)
* Kvrh (float): Voigt-Reuss-Hill average bulk modulus (GPa)
* Gvrh (float): Voigt-Reuss-Hill average shear modulus (GPa)
Example
-------
>>> from telewavesim import utils
>>> cc, rho = utils.set_aniso_tensor(0., 0., typ='atg')
>>> C = utils.cc2voigt(cc)
>>> utils.VRH_average(C*rho)
(75655555555.555557, 48113333333.333336, 61245706544.967415, 28835098086.844658,
68450631050.26149, 38474215710.088997)
"""
# Compliance matrix
S = np.linalg.inv(C)
# Voigt averaging
Kvoigt = (C[0,0] + C[1,1] + C[2,2] + 2.*C[0,1] + 2.*C[0,2] + 2.*C[1,2])/9.
Gvoigt = (C[0,0] + C[1,1] + C[2,2] - C[0,1] - C[0,2] - C[1,2] + 3.*C[3,3] + \
3.*C[4,4] + 3.*C[5,5])/15.
# Reuss averaging
Kreuss = 1./(S[0,0] + S[1,1] + S[2,2] + 2.*S[0,1] + 2.*S[0,2] + 2.*S[1,2])
Greuss = 15./(4.*S[0,0] + 4.*S[1,1] + 4.*S[2,2] - 4.*S[0,1] - 4.*S[0,2] - \
4.*S[1,2] + 3.*S[3,3] + 3.*S[4,4] + 3.*S[5,5])
# Voigt-Reuss-Hill average
Kvrh = (Kvoigt + Kreuss)/2.
Gvrh = (Gvoigt + Greuss)/2.
return Kvoigt, Gvoigt, Kreuss, Greuss, Kvrh, Gvrh
def mod2vel(K,G,rho):
"""
Calculates the isotropic P and S wave velocities from given
bulk (K) and shear (G) moduli and density (rho) in kg/m^3
Args:
K (float): Bulk modulus (GPa)
G (float): Shear modulus (GPa)
rho (float): Density (kg/m^3)
Returns:
(tuple): tuple containing:
* Vp (float): P-wave velocity (m/s)
* Vs (float): S-wave velocity (m/s)
Example
-------
>>> from telewavesim import utils
>>> cc, rho = utils.set_aniso_tensor(0., 0., typ='atg')
>>> C = utils.cc2voigt(cc)
>>> K, G = utils.VRH_average(C*rho)[4:6]
>>> utils.mod2vel(K, G, rho)
(6760.617471753726, 3832.0771334254896)
"""
Vp = np.sqrt((K + 4.*G/3.)/rho)
Vs = np.sqrt(G/rho)
return Vp, Vs
def rot_tensor(a,alpha,beta,gam):
"""
Performs a rotation of the tensor cc (c_ijkl) about three angles (alpha,
beta, gamma)
Args:
a (np.ndarray): Elastic tensor with shape ``(3, 3, 3, 3)``
alpha (float): Angle in radians
beta (float): Angle in radians
gam (float): Angle in radians
Returns:
(np.ndarray): aa: Rotated tensor with shape ``(3, 3, 3, 3)``
.. note::
The three angles (``alpha``, ``beta``, ``gam``) correspond to rotation about the
x_2, x_3, x_1 axes. Note that the sequence of the rotation is important:
(AB ~= BA). In this case we rotate about x_2 first, x_3 second and x_1 third.
For trend and plunge of symmetry axis (e.g., tri_tensor):
``alpha`` = plunge
``beta`` = trend
"""
rot = np.zeros((3,3))
aa = np.zeros((3,3,3,3))
rot[0,0] = np.cos(alpha)*np.cos(beta)
rot[0,1] = np.sin(beta)
rot[0,2] = np.sin(alpha)*np.cos(beta)
rot[1,0] = -np.cos(gam)*np.sin(beta)*np.cos(alpha) - \
np.sin(gam)*np.sin(alpha)
rot[1,1] = np.cos(gam)*np.cos(beta)
rot[1,2] = -np.cos(gam)*np.sin(beta)*np.sin(alpha) + \
np.sin(gam)*np.cos(alpha)
rot[2,0] = np.sin(gam)*np.sin(beta)*np.cos(alpha) - \
np.cos(gam)*np.sin(alpha)
rot[2,1] = -np.sin(gam)*np.cos(beta)
rot[2,2] = np.sin(gam)*np.sin(beta)*np.sin(alpha) + \
np.cos(gam)*np.cos(alpha)
#
# c_ijkl ---> c_mnrs
#
for m in range(3):
for n in range(3):
for r in range(3):
for s in range(3):
asum=0.0
for i in range(3):
for j in range(3):
for k in range(3):
for l in range(3):
rr = rot[m,i]*rot[n,j]*rot[r,k]*rot[s,l]
asum = asum + rr*a[i,j,k,l]
aa[m,n,r,s] = asum
return aa
def rotate_zrt_pvh(trZ, trR, trT, vp=6., vs=3.5):
"""
Rotates traces from `Z-R-T` orientation to `P-SV-SH` wave mode.
Args:
trZ (obspy.trace): Vertical component
trR (obspy.trace): Radial component
trT (obspy.trace): Transverse component
vp (float, optional): P-wave velocity used for rotation
vs (float, optional): S-wave velocity used for rotation
Returns:
(tuple): tuple containing:
* trP (obspy.trace): Compressional (P) wave mode
* trV (obspy.trace): Vertically polarized shear (SV) wave mode
* trH (obspy.trace): Horizontally polarized shear (SH) wave mode
"""
# Copy traces
trP = trZ.copy()
trV = trR.copy()
trH = trT.copy()
# Vertical slownesses
qp = np.sqrt(1/vp/vp - cf.slow*cf.slow)
qs = np.sqrt(1/vs/vs - cf.slow*cf.slow)
# Elements of rotation matrix
m11 = cf.slow*vs*vs/vp
m12 = -(1 - 2*vs*vs*cf.slow*cf.slow)/(2*vp*qp)
m21 = (1 - 2*vs*vs*cf.slow*cf.slow)/(2*vs*qs)
m22 = cf.slow*vs
# Rotation matrix
rot = np.array([[-m11, m12], [-m21, m22]])
# Vector of Radial and Vertical
r_z = np.array([trR.data,trZ.data])
# Rotation
vec = np.dot(rot, r_z)
# Extract P and SV components
trP.data = vec[0,:]
trV.data = vec[1,:]
trH.data = -trT.data/2.
return trP, trV, trH
def stack_all(st1, st2, pws=False):
"""
Stacks all traces in two ``Stream`` objects.
Args:
st1 (obspy.stream): Stream 1
st2 (obspy.stream,): Stream 2
pws (bool, optional): Enables Phase-Weighted Stacking
Returns:
(tuple): tuple containing:
* stack1 (obspy.trace): Stacked trace for Stream 1
* stack2 (obspy.trace): Stacked trace for Stream 2
"""
print()
print('Stacking ALL traces in streams')
# Copy stats from stream
str_stats = st1[0].stats
# Initialize arrays
tmp1 = np.zeros(len(st1[0].data))
tmp2 = np.zeros(len(st2[0].data))
weight1 = np.zeros(len(st1[0].data), dtype=complex)
weight2 = np.zeros(len(st2[0].data), dtype=complex)
# Stack all traces
for tr in st1:
tmp1 += tr.data
hilb1 = hilbert(tr.data)
phase1 = np.arctan2(hilb1.imag, hilb1.real)
weight1 += np.exp(1j*phase1)
for tr in st2:
tmp2 += tr.data
hilb2 = hilbert(tr.data)
phase2 = np.arctan2(hilb2.imag, hilb2.real)
weight2 += np.exp(1j*phase2)
# Normalize
tmp1 = tmp1/np.float(len(st1))
tmp2 = tmp2/np.float(len(st2))
# Phase-weighting
if pws:
weight1 = weight1/np.float(len(st1))
weight2 = weight2/np.float(len(st2))
weight1 = np.real(abs(weight1))
weight2 = np.real(abs(weight2))
else:
weight1 = np.ones(len(st1[0].data))
weight2 = np.ones(len(st1[0].data))
# Put back into traces
stack1 = Trace(data=weight1*tmp1,header=str_stats)
stack2 = Trace(data=weight2*tmp2,header=str_stats)
return stack1, stack2
def calc_ttime(slow):
"""
Calculates total propagation time through model. The
bottom layer is irrelevant in this calculation.
.. note::
The ``conf`` global variables need to be set for this calculation
to succeed. This is typically ensured through reading of the
model file from the function ``utils.read_model(modfile)``,
and setting the variable ``conf.wvtype``
Args:
slow (float): Slowness value (s/km)
Returns:
(float): t1: Time in seconds
Example
-------
>>> from telewavesim import conf
>>> from telewavesim import utils
>>> import numpy as np
>>> cc, rho = utils.set_aniso_tensor(0., 0., typ='atg')
>>> # Define two-layer model model with identical material
>>> conf.nlay = 2
>>> conf.a = np.zeros((3,3,3,3,conf.nlay))
>>> conf.rho = np.zeros((conf.nlay))
>>> conf.thickn = np.zeros((conf.nlay))
>>> # Pass variables to the `conf` module
>>> # Only topmost layer is useful for travel time calculation
>>> conf.isoflg = ['atg']
>>> conf.a[:,:,:,:,0] = cc
>>> conf.rho[0] = rho
>>> conf.thickn[0] = 10.
>>> conf.wvtype = 'P'
>>> slow = 0.06 # s/km
>>> utils.calc_ttime(slow)
0.0013519981570791182
"""
t1 = 0.
for i in range(cf.nlay-1):
if cf.isoflg[i] == 'iso':
a0 = cf.a[2,2,2,2,i]
b0 = cf.a[1,2,1,2,i]
else:
cc = cc2voigt(cf.a[:,:,:,:,i])
rho = cf.rho[i]
K1,G1,K2,G2,K,G = VRH_average(cc*rho)
a0, b0 = mod2vel(K,G,rho)
a0 = a0**2
b0 = b0**2
if cf.wvtype=='P':
t1 += cf.thickn[i]*np.sqrt(1./a0 - (slow*1.e-3)**2)
elif cf.wvtype=='Si' or cf.wvtype=='SV' or cf.wvtype=='SH':
t1 += cf.thickn[i]*np.sqrt(1./b0 - (slow*1.e-3)**2)
return t1
def read_model(modfile):
"""
Reads model parameters from file that are passed
through the configuration module ``conf``.
Returns:
None: Parameters are now global variables shared
between all other modules
"""
h = []; r = []; a = []; b = []; fl = []; ani = []; tr = []; pl = []
# Read file line by line and populate lists
try:
open(modfile)
except:
raise(Exception('model file cannot be opened: ',modfile))
with open(modfile) as fileobj:
for line in fileobj:
if not line.rstrip().startswith('#'):
model = line.rstrip().split()
h.append(np.float64(model[0])*1.e3)
r.append(np.float64(model[1]))
a.append(np.float64(model[2]))
b.append(np.float64(model[3]))
fl.append(model[4])
ani.append(np.float64(model[5]))
tr.append(np.float64(model[6]))
pl.append(np.float64(model[7]))
# Pass configuration parameters
cf.nlay = len(h)
cf.thickn = h
cf.rho = r
cf.isoflg = fl
cf.a = np.zeros((3,3,3,3,cf.nlay))
cf.evecs = np.zeros((6,6,cf.nlay),dtype=complex)
cf.evals = np.zeros((6,cf.nlay),dtype=complex)
cf.Tui = np.zeros((3,3,cf.nlay),dtype=complex)
cf.Rui = np.zeros((3,3,cf.nlay),dtype=complex)
cf.Tdi = np.zeros((3,3,cf.nlay),dtype=complex)
cf.Rdi = np.zeros((3,3,cf.nlay),dtype=complex)
mins = ['atg', 'bt', 'cpx', 'dol', 'ep', 'grt', 'gln', 'hbl', 'jade',\
'lws', 'lz', 'ms', 'ol', 'opx', 'plag', 'qtz', 'zo']
rocks = ['BS_f', 'BS_m', 'EC_f', 'EC_m', 'HB', 'LHZ', 'SP_37', 'SP_80']
for j in range(cf.nlay):
if fl[j]=='iso':
cc = set_iso_tensor(a[j],b[j])
cf.a[:,:,:,:,j] = cc
elif fl[j]=='tri':
cc = set_tri_tensor(a[j],b[j],tr[j],pl[j],ani[j])
cf.a[:,:,:,:,j] = cc
elif fl[j] in mins or fl[j] in rocks:
cc, rho = set_aniso_tensor(tr[j],pl[j],typ=fl[j])
cf.a[:,:,:,:,j] = cc
cf.rho[j] = rho
else:
print('\nFlag not defined: use either "iso", "tri" or one among\n')
print(mins,rocks)
print()
raise(Exception())
return
def check_cf(obs=False):
"""
Checks whether or not all required global variables are set and throws an Exception if not.
Args:
obs (bool, optional): Whether the analysis is done for an OBS case or not.
:raises ExceptionError: Throws ExceptionError if not all variables are set.
"""
lst = [cf.a, cf.rho, cf.thickn, cf.isoflg, cf.dt, cf.nt, cf.slow, cf.baz]
check = [f is None for f in lst]
if sum(check)/len(check)>0.:
raise Exception("global variables not all set. Set all of the following variables through the conf module: 'a', 'rho', 'thickn', 'isoflg', 'dt', 'nt', 'slow', 'baz'")
if obs:
lst = [cf.dp, cf.c, cf.rhof]
check = [f is None for f in lst]
if sum(check)/len(check)>0.:
raise Exception("global variables not all set for OBS case. Set all of the following variables through the conf module: 'dp', 'c', 'rhof'")
def model2for():
"""
Passes global model variables to Fortran ``conf`` module.
Returns:
None
Variables to pass are ``a``, ``rho``, ``thickn``, ``isoflg``
"""
nlaymx = cf_f.nlaymx
cf_f.a = np.zeros((3,3,3,3,nlaymx))
cf_f.rho = np.zeros((nlaymx))
cf_f.thickn = np.zeros((nlaymx))
cf_f.isoflg = np.zeros((nlaymx), dtype='int')
for i in range(cf.nlay):
cf_f.a[:,:,:,:,i] = cf.a[:,:,:,:,i]
cf_f.rho[i] = cf.rho[i]
cf_f.thickn[i] = cf.thickn[i]
if cf.isoflg[i]=='iso':
cf_f.isoflg[i] = 1
def wave2for():
"""
Passes global wavefield variables to Fortran ``conf`` module.
Returns:
None
Variables to pass are ``dt``, ``slow``, ``baz``
"""
cf_f.dt = cf.dt
cf_f.slow = cf.slow
cf_f.baz = cf.baz
def obs2for():
"""
Passes global OBS-related variables to Fortran ``conf`` module.
Returns:
None
Variables to pass are ``dp``, ``c``, ``rhof``
"""
cf_f.dp = cf.dp
cf_f.c = cf.c
cf_f.rhof = cf.rhof
def run_plane(obs=False):
"""
Function to run the ``plane`` module and return 3-component seismograms as an ``obspy``
``Stream`` object.
.. note::
The ``conf`` global variables need to be set for this calculation
to succeed. This function first checks to make sure the variables are all set
before executing the main ``telewavesim.rmat_f.plane_****`` function.
Args:
fortran (book, option): Whether or not the Fortran modules are used
obs (bool, optional): Whether or not the analysis is done for an OBS stations
Returns:
(obspy.stream): trxyz: Stream containing 3-component displacement seismograms
"""
# Check if all variables are set. If not, throw an Exception and stop
check_cf(obs)
# Pass variables to Fortran conf
model2for()
wave2for()
# Run the ``plane`` module depending on land or OBS case.
if obs:
# If OBS, then further pass OBS-related paramters to Fortran conf
obs2for()
# Get the Fourier transform of seismograms for ``obs``case
yx, yy, yz = pw_f.plane_obs(cf.nt,cf.nlay,np.array(cf.wvtype, dtype='c'))
else:
# Get the Fourier transform of seismograms for ``land`` case
yx, yy, yz = pw_f.plane_land(cf.nt,cf.nlay,np.array(cf.wvtype, dtype='c'))
# Transfer displacement seismograms to an ``obspy`` ``Stream`` object.
trxyz = get_trxyz(yx, yy, yz)
return trxyz
def get_trxyz(yx, yy, yz):
"""
Function to store displacement seismograms into ``obspy`` ``Trace`` obsjects and
then an ``obspy`` ``Stream`` object.
Args:
ux (np.ndarray): x-component displacement seismogram
uy (np.ndarray): y-component displacement seismogram
uz (np.ndarray): z-component displacement seismogram
Returns:
(obspy.stream): trxyz: Stream containing 3-component displacement seismograms
"""
# Get displacements in time domain
ux = np.real(pyfftw.interfaces.numpy_fft.fft(yx))
uy = np.real(pyfftw.interfaces.numpy_fft.fft(yy))
uz = -np.real(pyfftw.interfaces.numpy_fft.fft(yz))
# Store in traces
tux = Trace(data=ux)
tuy = Trace(data=uy)
tuz = Trace(data=uz)
# Update trace header
tux = update_stats(tux, cf.nt, cf.dt, cf.slow, cf.baz)
tuy = update_stats(tuy, cf.nt, cf.dt, cf.slow, cf.baz)
tuz = update_stats(tuz, cf.nt, cf.dt, cf.slow, cf.baz)
# Append to stream
trxyz = Stream(traces=[tux, tuy, tuz])
return trxyz
def tf_from_xyz(trxyz, pvh=False):
"""
Function to generate transfer functions from displacement traces.
Args:
trxyz (obspy.stream): Obspy ``Stream`` object in cartesian coordinate system
pvh (bool, optional): Whether to rotate from Z-R-T coordinate system to P-SV-SH wave mode
Returns:
(obspy.stream): tfs: Stream containing Radial and Transverse transfer functions
"""
# Extract East, North and Vertical
ntr = trxyz[0]
etr = trxyz[1]
ztr = trxyz[2]
baz = cf.baz
# Copy to radial and transverse
rtr = ntr.copy()
ttr = etr.copy()
# Rotate to radial and transverse
rtr.data, ttr.data = rotate_ne_rt(ntr.data, etr.data, baz)
a = pyfftw.empty_aligned(len(rtr.data), dtype='float')
# print(rtr.data, ttr.data)
if pvh:
vp = np.sqrt(cf.a[2,2,2,2,0])/1.e3
vs = np.sqrt(cf.a[1,2,1,2,0])/1.e3
trP, trV, trH = rotate_zrt_pvh(ztr, rtr, ttr, vp=vp, vs=vs)
tfr = trV.copy(); tfr.data = np.zeros(len(tfr.data))
tft = trH.copy(); tft.data = np.zeros(len(tft.data))
ftfv = pyfftw.interfaces.numpy_fft.fft(trV.data)
ftfh = pyfftw.interfaces.numpy_fft.fft(trH.data)
ftfp = pyfftw.interfaces.numpy_fft.fft(trP.data)
if cf.wvtype=='P':
# Transfer function
tfr.data = np.fft.fftshift(np.real(pyfftw.interfaces.numpy_fft.ifft(np.divide(ftfv,ftfp))))
tft.data = np.fft.fftshift(np.real(pyfftw.interfaces.numpy_fft.ifft(np.divide(ftfh,ftfp))))
elif cf.wvtype=='Si':
tfr.data = np.fft.fftshift(np.real(pyfftw.interfaces.numpy_fft.ifft(np.divide(-ftfp,ftfv))))
tft.data = np.fft.fftshift(np.real(pyfftw.interfaces.numpy_fft.ifft(np.divide(-ftfp,ftfh))))
elif cf.wvtype=='SV':
tfr.data = np.fft.fftshift(np.real(pyfftw.interfaces.numpy_fft.ifft(np.divide(-ftfp,ftfv))))
elif cf.wvtype=='SH':
tft.data = np.fft.fftshift(np.real(pyfftw.interfaces.numpy_fft.ifft(np.divide(-ftfp,ftfh))))
else:
tfr = rtr.copy(); tfr.data = np.zeros(len(tfr.data))
tft = ttr.copy(); tft.data = np.zeros(len(tft.data))
ftfr = pyfftw.interfaces.numpy_fft.fft(rtr.data)
ftft = pyfftw.interfaces.numpy_fft.fft(ttr.data)
ftfz = pyfftw.interfaces.numpy_fft.fft(ztr.data)
if cf.wvtype=='P':
# Transfer function
tfr.data = np.fft.fftshift(np.real(pyfftw.interfaces.numpy_fft.ifft(np.divide(ftfr,ftfz))))
tft.data = np.fft.fftshift(np.real(pyfftw.interfaces.numpy_fft.ifft(np.divide(ftft,ftfz))))
elif cf.wvtype=='Si':
tfr.data = np.fft.fftshift(np.real(pyfftw.interfaces.numpy_fft.ifft(np.divide(-ftfz,ftfr))))
tft.data = np.fft.fftshift(np.real(pyfftw.interfaces.numpy_fft.ifft(np.divide(-ftfz,ftft))))
elif cf.wvtype=='SV':
tfr.data = np.fft.fftshift(np.real(pyfftw.interfaces.numpy_fft.ifft(np.divide(-ftfz,ftfr))))
elif cf.wvtype=='SH':
tft.data = np.fft.fftshift(np.real(pyfftw.interfaces.numpy_fft.ifft(np.divide(-ftfz,ftft))))
# Store in stream
tfs = Stream(traces=[tfr, tft])
# Return stream
return tfs
def update_stats(tr, nt, dt, slow, baz):
"""
Updates the ``stats`` doctionary from an obspy ``Trace`` object.
Args:
tr (obspy.trace): Trace object to update
nt (int): Number of samples
dt (float): Sampling rate
slow (float): Slowness value (s/km)
baz (float): Back-azimuth value (degree)
Returns:
(obspy.trace): tr: Trace with updated stats
"""
tr.stats.delta = dt
tr.stats.slow = slow
tr.stats.baz = baz
return tr | 1.742188 | 2 |
pdx_beer_finder/beer_googles/models.py | mgborgman/pcg2015_mgb | 0 | 12798267 | <filename>pdx_beer_finder/beer_googles/models.py
from django.db import models
from django.contrib.auth.models import User
class Beer(models.Model):
name = models.CharField(max_length=200)
description = models.TextField()
slug = models.SlugField(unique=True, default='')
def __unicode__(self):
return self.name
class Meta:
verbose_name = "Beer"
verbose_name_plural = "Beers"
class Bar(models.Model):
name = models.CharField(max_length=200)
description = models.TextField()
beers = models.ManyToManyField(Beer, related_name='bars')
slug = models.SlugField(unique=True, default='')
def __unicode__(self):
return self.name
class Meta:
verbose_name = "Bar"
verbose_name_plural = "Bars"
class BarRating(models.Model):
rating = models.IntegerField()
bar = models.ForeignKey(Bar)
user = models.ForeignKey(User)
def __unicode__(self):
return unicode(self.rating)
class BeerRating(models.Model):
rating = models.IntegerField(default=0)
comment = models.TextField()
beer = models.ForeignKey(Beer)
user = models.ForeignKey(User)
def __unicode__(self):
return unicode(self.user)
class BarComment(models.Model):
comment = models.TextField()
bar = models.ForeignKey(Bar)
user = models.ForeignKey(User)
def __unicode__(self):
return unicode(self.user)
class BeerComment(models.Model):
Comment = models.TextField()
bar = models.ForeignKey(Bar)
user = models.ForeignKey(User)
def __unicode__(self):
return unicode(self.user)
class UserProfile(models.Model):
user = models.ForeignKey(User, null=True, blank=True)
def __unicode__(self):
return self.user.username
| 2.328125 | 2 |
JoystickInput/JoystickServer.py | Mnenmenth/RobotCode | 0 | 12798268 | from serial import Serial
import time
import platform
import socket
serialPort = Serial('COM3' if platform.system() == 'Windows' else '/dev/ttyUSB0', 9600)
time.sleep(2)
server = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
server.bind(('', 2222))
server.listen(1)
while True:
(client, address) = server.accept()
print('Connected')
while True:
data = client.recv(6)#.decode()
if 'CLOSE' in data: break
#print(data)
serialPort.write(data)
| 2.71875 | 3 |
app/api/v1/stu/teacher_class.py | Hansybx/guohe3 | 1 | 12798269 | <filename>app/api/v1/stu/teacher_class.py
"""
-*- coding: utf-8 -*-
Time : 2019/7/27 16:15
Author : Hansybx
"""
from flask import request, jsonify
from app.api.v1.stu import stu
from app.models.error import PasswordFailed
from app.models.res import Res
from app.utils.teacher_class.teacher_class_utils import get_teacher_class
@stu.route('/class/teacher', methods=['POST'])
def teacher_class_get():
username = request.form['username']
password = request.form['password']
semester = request.form['semester']
academy = request.form['academy']
zc = request.form['zc']
try:
result = get_teacher_class(username, password, semester, academy, zc)
code = 200
msg = '查询成功'
info = result
except PasswordFailed:
code = 401
msg = '查询失败'
info = {
'result': '账号或密码错误'
}
except Exception:
code = 500
msg = '查询失败'
info = [
{
'result': '未知异常'
}
]
res_json = Res(code, msg, info)
return jsonify(res_json.__dict__)
| 2.640625 | 3 |
process_html.py | weepingwillowben/web-script-wars | 1 | 12798270 | <reponame>weepingwillowben/web-script-wars<gh_stars>1-10
import base64
from jinja2 import Template
import sys
import os
import urllib.request
def encodebase64(filename):
fin = open(filename, 'rb')
contents = fin.read()
data_url = base64.b64encode(contents)
fin.close()
return data_url.decode("utf-8")
def make_data_url_png(filename):
prefix = 'data:image/png;base64,'
return prefix + encodebase64(filename)
def make_data_url_of_type(filename,type):
prefix = 'data:{};base64,'.format(type)
return prefix + encodebase64(filename)
def render_template():
src_file = "template.tmpl"
dest_file = "index.html"
template = Template(get_local_file(src_file))
result = template.render(
get_local_file=get_local_file,
get_remote_file=get_remote_file,
make_data_url_png=make_data_url_png,
make_data_url_of_type=make_data_url_of_type,
encodebase64=encodebase64,
)
save_file(result,dest_file)
def get_local_file(filename):
return open(filename).read()
def save_file(data,filename):
open(filename,'w').write(data)
def get_remote_file(url):
basename = url.split("/")[-1]
if os.path.exists(basename):
return get_local_file(basename)
else:
with urllib.request.urlopen(url) as response:
html = response.read().decode("utf-8")
save_file(html,basename)
return html
if __name__ == "__main__":
render_template()
# gets tensorflow for s3 upload
get_remote_file("https://cdnjs.cloudflare.com/ajax/libs/tensorflow/0.14.1/tf.min.js")
| 2.703125 | 3 |
setup.py | litrin/MACD | 13 | 12798271 | <filename>setup.py
from distutils.core import setup
setup(
name='MACD',
version='1.0',
py_modules=["Average", "MACD"],
license='MIT',
author='<NAME>',
author_email='<EMAIL>'
)
| 1.070313 | 1 |
hplusminus/sid.py | bio-phys/hplusminus | 1 | 12798272 | <filename>hplusminus/sid.py
# Copyright (c) 2020 <NAME>, Max Planck Institute of Biophysics, Frankfurt am Main, Germany
# Released under the MIT Licence, see the file LICENSE.txt.
import os
import numpy as np
from scipy.stats import gamma as gamma_dist
import scipy
def _get_package_gsp():
"""
Return the directory path containing gamma spline parameter files that come bundled with the package.
-------
gsp_dir: str
directory path containing gamma spline parameter files
"""
package_dir = os.path.dirname(os.path.abspath(__file__))
gsp_dir = os.path.join(package_dir, "gsp")
if not os.path.exists(gsp_dir):
raise RuntimeError("gamma spline parameter directory not found at " + gsp_dir)
else:
return gsp_dir
def load_spline_parameters(ipath, tests=['h', 'both', 'h_simple', 'both_simple']):
"""
Load knots and coefficients for B-splines representing :math:`\alpha`, :math:`\beta`, :math:`\matcal{I}_o` paramters of the shifted gamma disributions as functions of :math:`\log_{10} N`, where :math:`N` is the number of data points.
Parameters
----------
ipath: str
Input path.
tests: List of str (optional)
Names of tests, for which paramaters are read in. Names identify the corresponding files.
Returns
-------
spline_par: dict
Dictionary containing knots and coefficients of B-splines for all tests and parameters of the shifted gamma disributions.
"""
spline_par = {}
for k in tests:
spline_par[k] = {}
for na in ["alpha", "beta", "I0"]:
spline_par[k][na] = {}
for tmp in ["knots", "coeffs"]:
iname = "%s_%s_%s.npy" % (tmp, k, na)
spline_par[k][na][tmp] = np.load(os.path.join(ipath, iname))
return spline_par
def cumulative_SID_gamma(SI, alpha, beta, I0):
"""
Returns cumulative distribution function of the Shannon information given by gamma distribution.
Parameters
----------
SI: float or array-like
Shannon information
alpha: float
Shape parameter of the gamma disribution.
beta: float
Inverser scale parameter of the gamma disribution.
I0: float
Shift (location) parameter of the gamma distribution.
Returns
-------
cdf: float
Value of Shannon information
"""
cdf = 1. - gamma_dist.cdf(SI, alpha, scale=1. / beta, loc=I0)
return cdf
def get_spline(spline_par, tests=['h', 'both', 'h_simple', 'both_simple']):
"""
Returns spline function objects for the data size dependence of the parameters of the gamma distributions representing cumulative Shannon information distribution functions.
Parameters
----------
spline_par: dict
Dictionary containing knots and coefficients of B-splines for all tests and parameters of the shifted gamma disributions. Ouput of load_spline_parameters().
tests: List of str (optional)
Names of tests.
Returns
-------
spline_func: dict
Dictionary of spline functions.
"""
nam = ["alpha", "beta", "I0"]
spline_func = {}
for k in tests:
spline_func[k] = {}
for i in range(3):
spline_func[k][nam[i]] = scipy.interpolate.BSpline(t=spline_par[k][nam[i]]["knots"], c=spline_par[k][nam[i]]["coeffs"], k=3)
return spline_func
def get_gamma_parameters(Ns, test, spline_func):
"""
Returns parameters of shifted gamma distributions for given number of data points.
Parameters
----------
Ns: int
Number of data points.
test: str
Name of test.
spline_func: dict
Dictionary of spline functions. Output of get_spline() or init().
Returns
-------
alpha: float
Shape parameter of the gamma disribution.
beta: float
Inverser scale parameter of the gamma disribution.
I0: float
Shift (location) parameter of the gamma distribution.
"""
log_Ns = np.log10(Ns)
alpha = spline_func[test]["alpha"](log_Ns)
beta = spline_func[test]["beta"](log_Ns)
I0 = spline_func[test]["I0"](log_Ns)
return alpha, beta, I0
def init(gamma_params_ipath=_get_package_gsp()):
"""
Initialises spline function object.
Parameters
----------
gamma_params_ipath: str
Input path.
Returns
-------
spline_func: dict
Dictionary of spline functions. Output of get_spline() or init().
"""
spline_par = load_spline_parameters(gamma_params_ipath)
spline_func = get_spline(spline_par)
return spline_func
def cumulative(SI, number_data_points, test, spline_func):
"""
Calculate p-values for given test using gamma disribuiton approximation of Shannon information distribution.
Parameters
----------
SI: float
Shannon information value.
number_data_points: int
Number of data points.
test: str
Name of statistical test.
spline_func: dict
Dictionary of spline functions. Output of get_spline() or init().
Returns
-------
p-value: float
P-value for given test.
"""
#tests = ['chi2', 'h', 'hpm', 'chi2_h', 'chi2_hp']
if test == "chi2":
alpha = 0.5
beta = 1.
I0 = -np.log(scipy.stats.chi2.pdf(number_data_points - 2, number_data_points))
p_value = cumulative_SID_gamma(SI, alpha, beta, I0)
elif test == "h":
alpha, beta, I0 = get_gamma_parameters(number_data_points, "h_simple", spline_func)
p_value = cumulative_SID_gamma(SI, alpha, beta, I0)
elif test == "hpm":
alpha, beta, I0 = get_gamma_parameters(number_data_points, "h", spline_func)
p_value = cumulative_SID_gamma(SI, alpha, beta, I0)
elif test == "chi2_h":
alpha, beta, I0 = get_gamma_parameters(number_data_points, "both_simple", spline_func)
p_value = cumulative_SID_gamma(SI, alpha, beta, I0)
elif test == "chi2_hpm":
alpha, beta, I0 = get_gamma_parameters(number_data_points, "both", spline_func)
p_value = cumulative_SID_gamma(SI, alpha, beta, I0)
else:
print("Error: Test \"%s\" not available!")
print("Exiting. Returning -1.")
return -1.
return p_value
def get_p_value(SI, number_data_points, test, spline_func):
"""
Calculate p-values for given test using the gamma distribution approximation of the Shannon information distribution.
Wrapper function for function cumulative(SI, number_data_points, test, spline_func)
Parameters
----------
SI: float
Shannon information value.
number_data_points: int
Number of data points.
test: str
Name of statistical test.
spline_func: dict
Dictionary of spline functions. Output of get_spline() or init().
Returns
-------
p-value: float
P-value for given test.
"""
p_value = cumulative(SI, number_data_points, test, spline_func)
return p_value
| 2.375 | 2 |
book/form.py | GyanendraMaurya/book-recommendation-system | 0 | 12798273 | from django.forms import ModelForm, Textarea, TextInput
from .models import Review
from django import forms
from django.contrib.auth.models import User
# Form to take display to take user's review
class ReviewForm(ModelForm):
class Meta:
model = Review
fields = ['rating', 'comment']
#user_name = forms.CharField(widget=forms.TextInput(attrs={'placeholder': 'abcd'}))
widgets = {
'comment' : Textarea(attrs={'cols':35, 'rows':10}),
#'user_name' : TextInput(attrs={'placeholder':User.username,})
}
class LoginForm(forms.Form):
username = forms.CharField(widget=forms.TextInput(attrs={'class':'forminput'}))
password = forms.CharField(widget=forms.PasswordInput)
| 2.25 | 2 |
app.py | DanielNery/vagas-emprego-flask-api | 0 | 12798274 | <gh_stars>0
from flask import Flask
from flask_restful import Api
from resources.vagas import VagasEmpregoResource
App = Flask(__name__)
App.config['SQLALCHEMY_DATABASE_URI'] = 'sqlite:///banco.db'
App.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = False
Api = Api(App)
# Vai criar o banco e todas aas suas tabelas
@App.before_first_request
def cria_banco():
banco.create_all()
def set_routes(Api):
Api.add_resource(
VagasEmpregoResource,
"/vagas"
)
if __name__ == '__main__':
from sql_alchemy import banco
banco.init_app(App)
set_routes(Api)
App.run(debug=True) | 2.4375 | 2 |
item_engine/textbase/__init__.py | GabrielAmare/ItemEngine | 0 | 12798275 | from item_engine import *
from .constants import *
from .items import *
from .functions import *
from .base_materials import *
from .materials import *
from .operators import *
from .display import *
from .setup import *
| 1.101563 | 1 |
output/models/sun_data/ctype/derivation_method/derivation_method00102m/derivation_method00102m2_xsd/derivation_method00102m2.py | tefra/xsdata-w3c-tests | 1 | 12798276 | <reponame>tefra/xsdata-w3c-tests
from dataclasses import dataclass, field
from typing import Optional
__NAMESPACE__ = "derivationMethod"
@dataclass
class A1:
class Meta:
name = "A"
value: Optional[int] = field(
default=None,
metadata={
"required": True,
}
)
t: Optional[int] = field(
default=None,
metadata={
"type": "Attribute",
}
)
@dataclass
class A(A1):
class Meta:
name = "a"
namespace = "derivationMethod"
| 2.234375 | 2 |
models.py | ethanabrooks/jax-rl | 0 | 12798277 | from flax import nn
import jax
from haiku._src.typing import PRNGKey
from jax import random
import jax.numpy as jnp
import numpy as onp
from typing import List, Tuple
from utils import gaussian_likelihood
class TD3Actor(nn.Module):
def apply(self, x, action_dim, max_action):
x = nn.Dense(x, features=256)
x = nn.relu(x)
x = nn.Dense(x, features=256)
x = nn.relu(x)
x = nn.Dense(x, features=action_dim)
return max_action * nn.tanh(x)
class TD3Critic(nn.Module):
def apply(self, state, action, Q1=False):
state_action = jnp.concatenate([state, action], axis=1)
q1 = nn.Dense(state_action, features=256)
q1 = nn.relu(q1)
q1 = nn.Dense(q1, features=256)
q1 = nn.relu(q1)
q1 = nn.Dense(q1, features=1)
if Q1:
return q1
q2 = nn.Dense(state_action, features=256)
q2 = nn.relu(q2)
q2 = nn.Dense(q2, features=256)
q2 = nn.relu(q2)
q2 = nn.Dense(q2, features=1)
return q1, q2
class DoubleCritic(nn.Module):
def apply(self, state, action, Q1=False):
state_action = jnp.concatenate([state, action], axis=1)
q1 = nn.Dense(state_action, features=500)
q1 = nn.LayerNorm(q1)
q1 = nn.tanh(q1)
q1 = nn.Dense(q1, features=500)
q1 = nn.elu(q1)
q1 = nn.Dense(q1, features=1)
if Q1:
return q1
q2 = nn.Dense(state_action, features=500)
q2 = nn.LayerNorm(q2)
q2 = nn.tanh(q2)
q2 = nn.Dense(q2, features=500)
q2 = nn.elu(q2)
q2 = nn.Dense(q2, features=1)
return q1, q2
class GaussianPolicy(nn.Module):
def apply(
self,
x,
action_dim,
max_action,
key=None,
MPO=False,
sample=False,
log_sig_min=-20,
log_sig_max=2,
):
x = nn.Dense(x, features=200)
x = nn.LayerNorm(x)
x = nn.tanh(x)
x = nn.Dense(x, features=200)
x = nn.elu(x)
x = nn.Dense(x, features=2 * action_dim)
mu, log_sig = jnp.split(x, 2, axis=-1)
log_sig = nn.softplus(log_sig)
log_sig = jnp.clip(log_sig, log_sig_min, log_sig_max)
if MPO:
return mu, log_sig
if not sample:
return max_action * nn.tanh(mu), log_sig
else:
pi = mu + random.normal(key, mu.shape) * jnp.exp(log_sig)
log_pi = gaussian_likelihood(pi, mu, log_sig)
pi = nn.tanh(pi)
log_pi -= jnp.sum(jnp.log(nn.relu(1 - pi ** 2) + 1e-6), axis=1)
return max_action * pi, log_pi
class Constant(nn.Module):
def apply(self, start_value, dtype=jnp.float32):
value = self.param("value", (1,), nn.initializers.ones)
return start_value * jnp.asarray(value, dtype)
def build_constant_model(start_value, init_rng):
constant = Constant.partial(start_value=start_value)
_, init_params = constant.init(init_rng)
return nn.Model(constant, init_params)
def build_td3_actor_model(input_shapes, action_dim, max_action, init_rng):
actor = TD3Actor.partial(action_dim=action_dim, max_action=max_action)
_, init_params = actor.init_by_shape(init_rng, input_shapes)
return nn.Model(actor, init_params)
def build_td3_critic_model(input_shapes, init_rng):
critic = TD3Critic.partial()
_, init_params = critic.init_by_shape(init_rng, input_shapes)
return nn.Model(critic, init_params)
def build_model(module: nn.Module, key: PRNGKey, input_shapes):
_, init_params = module.init_by_shape(key, input_shapes)
return nn.Model(module, init_params)
def build_double_critic_model(input_shapes, init_rng):
critic = DoubleCritic.partial()
_, init_params = critic.init_by_shape(init_rng, input_shapes)
return nn.Model(critic, init_params)
def build_gaussian_policy_model(input_shapes, action_dim, max_action, init_rng):
actor = GaussianPolicy.partial(action_dim=action_dim, max_action=max_action)
_, init_params = actor.init_by_shape(init_rng, input_shapes)
return nn.Model(actor, init_params)
| 2.09375 | 2 |
bootcamp/authentication/admin.py | ChowBu/bootcamp | 0 | 12798278 | <reponame>ChowBu/bootcamp
from django.contrib import admin
from bootcamp.authentication.models import Profile
@admin.register(Profile)
class ProfileAdmin(admin.ModelAdmin):
# 不能添加tags 会出错,暂时不管
# list_display = ('id','user','location', 'url','job_title',)
# search_fields = ('user','location', 'url','job_title',)
# 设置列表页展示条目数比较小,可提高打开列表页性能
list_per_page = 10
# list_filter是性能杀手,尽量不要开启
# list_filter = ('user', 'post',)
# 开始没有加id进入list_display导致第一项title没法编辑,出现以上问题,后来加了个id在前面解决
# list_editable = ('user','location', 'url','job_title',) | 2.09375 | 2 |
tests/test_anagram.py | npcasler/anagram | 0 | 12798279 | # Anagram Utility
# License: MIT
""" Tests for anagram"""
import unittest
import errno
import shutil
from os.path import join
import anagram.anagram as anagram
class TestAnagram(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.parser = anagram.args_options()
cls.mock_path = '/path/to/folder'
@classmethod
def tearDownClass(cls):
try:
shutil.rmtree('path')
except OSError as exc:
if exc.errno != errno.ENOENT:
raise
def test_incorrect_dict_path(self):
""" Test read from non-existant file """
args = ['--input', 'tests/samples/test']
with self.assertRaises(IOError):
anagram.main(self.parser.parse_args(args))
def test_negative_threshold(self):
""" Test an negative value for the character threshold """
args = ['--count', '-1']
with self.assertRaises(ValueError):
anagram.main(self.parser.parse_args(args))
def test_empty_output(self):
""" Test an empty output """
args = ['--input', 'tests/samples/empty']
self.assertEquals(anagram.main(self.parser.parse_args(args)),
'Input dataset is empty')
| 3.328125 | 3 |
moulin/builders/android_kernel.py | Deedone/moulin | 0 | 12798280 | # SPDX-License-Identifier: Apache-2.0
# Copyright 2021 EPAM Systems
"""
Android kernel builder module
"""
import os.path
from typing import List
from moulin.yaml_wrapper import YamlValue
from moulin import ninja_syntax
def get_builder(conf: YamlValue, name: str, build_dir: str, src_stamps: List[str],
generator: ninja_syntax.Writer):
"""
Return configured AndroidKernel class
"""
return AndroidKernel(conf, name, build_dir, src_stamps, generator)
def gen_build_rules(generator: ninja_syntax.Writer):
"""
Generate yocto build rules for ninja
"""
cmd = " && ".join([
"export $env",
"cd $build_dir",
"build/build.sh",
])
generator.rule("android_kernel_build",
command=f'bash -c "{cmd}"',
description="Invoke Android Kernel build script",
pool="console")
generator.newline()
class AndroidKernel:
"""
AndroidBuilder class generates Ninja rules for given Android build configuration
"""
def __init__(self, conf: YamlValue, name: str, build_dir: str, src_stamps: List[str],
generator: ninja_syntax.Writer):
self.conf = conf
self.name = name
self.generator = generator
self.src_stamps = src_stamps
self.build_dir = build_dir
def gen_build(self):
"""Generate ninja rules to build AOSP"""
env_node = self.conf.get("env", None)
if env_node:
env_values = [x.as_str for x in env_node]
else:
env_values = []
env = " ".join(env_values)
variables = {
"build_dir": self.build_dir,
"env": env,
}
targets = self.get_targets()
self.generator.build(targets, "android_kernel_build", self.src_stamps, variables=variables)
self.generator.newline()
return targets
def get_targets(self):
"Return list of targets that are generated by this build"
return [os.path.join(self.build_dir, t.as_str) for t in self.conf["target_images"]]
def capture_state(self):
"""
This method should capture Android Kernel state for a reproducible builds.
Luckily, there is nothing to do, as Android state is controlled solely by
its repo state. And repo state is captured by repo fetcher code.
"""
| 2.125 | 2 |
backend/lk/logic/websites.py | Purus/LaunchKitDocker | 2,341 | 12798281 | <gh_stars>1000+
# encoding: utf-8
#
# Copyright 2016 Cluster Labs, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import logging
from datetime import datetime
from django.conf import settings
from django.db import transaction
from backend.lk.logic import appstore_fetch
from backend.lk.models import AppWebsiteScreenshot
from backend.lk.models import AppWebsitePage
from backend.util import dnsutil
from backend.util import text
def check_domain_for_cname_record(domain):
cname, error_message = dnsutil.get_cname_for_domain(domain)
if error_message:
return False, error_message
if cname != '%s.' % settings.HOSTED_WEBSITE_CNAME:
return False, 'The CNAME value is set but incorrect'
return True, None
def _short_description(long_description):
if not long_description:
return long_description
return '%s...' % long_description[:180]
def example_from_itunes_id(itunes_id, country):
info = appstore_fetch.app_info_with_id(itunes_id, country)
app_name, app_tagline = text.app_name_tagline(info.name)
example_website = {
'id': 'example',
'appName': app_name,
'tagline': app_tagline,
'longDescription': info.description,
'shortDescription': _short_description(info.description),
'itunesId': info.itunes_id,
'images': {
'screenshots': {'iPhone': [{'url': screenshot} for screenshot in info.screenshots]},
'icon': {'url': info.icon_512},
}
}
return example_website
def get_fancy_cluster_example():
return {
'id': 'example',
'domain': 'cluster.co',
'template': '',
'appName': 'Cluster',
'tagline': 'Privately share special moments with friends and family',
'shortDescription': 'Cluster gives you a private space to share photos and memories with the people you choose, away from social media. Make your own groups and share pics, videos, comments, and chat!',
'longDescription': u'Cluster makes it possible to create private groups where you share moments through photos and videos with the people you care about. Create a group with family, a group of friends, coworkers, people from your home town, or anyone else!\r\n\r\nGreat for:\r\n\u2022 New Moms! Share photos of a new baby with close friends and family without spamming everyone on other social networks\r\n\u2022 College Students! Share memories with friends not appropriate for Facebook\r\n\u2022 Families! Keep in touch even if you\u2019re not in the same place.\r\n\r\nTons of people already trust Cluster. Here\u2019s why:\r\n\r\n\u2022 Private & secure: Only invited members of the group can see what you post.\r\n\u2022 An app for everyone: Access Cluster through gorgeous mobile apps and the web.\r\n\u2022 Relevant notifications: Know when people you invited post new things to the group.',
'keywords': 'private,group,social,network,space,family,album,photo,video,collaborative,shared,sharing,event,baby',
'itunesId': '596595032',
'playStoreId': 'com.getcluster.android',
'supportLink': 'http://cluster.co/help',
'termsLink': 'http://cluster.co/terms',
'privacyLink': 'http://cluster.co/privacy',
'primaryColor': '#0092F2',
'font': 'Lato',
'frameScreenshots': 'white',
'images': {
'logo': {'url':'https://cluster-static.s3.amazonaws.com/images/marketing/presskit/cluster-logo-white-v1f813d97.png'},
'background': {'url':'https://cluster-static.s3.amazonaws.com/images/namespaces/default/homepage-billboard-v4bead2de.jpg'},
'icon': {'url':'http://a1668.phobos.apple.com/us/r30/Purple3/v4/01/c6/f0/01c6f095-df15-7bd9-03f6-53dba727cc8b/mzl.clrnjwyb.png'},
'screenshots':
{'iPhone': [{'url':'http://a3.mzstatic.com/us/r30/Purple3/v4/46/6a/f0/466af0fb-f1d7-80b5-6d03-ccd36ad904ef/screen1136x1136.jpeg'},
{'url':'http://a2.mzstatic.com/us/r30/Purple3/v4/4d/41/8c/4d418cfe-a384-312b-f04f-ac336c3359ff/screen1136x1136.jpeg'},
{'url':'http://a5.mzstatic.com/us/r30/Purple5/v4/21/a6/5a/21a65abd-2f66-6265-1fb0-c08c72e403b3/screen1136x1136.jpeg'},
{'url':'http://a3.mzstatic.com/us/r30/Purple3/v4/a6/6d/4e/a66d4e25-d0f7-d1d0-05ab-edffa3899c14/screen1136x1136.jpeg'},
{'url':'http://a4.mzstatic.com/us/r30/Purple1/v4/33/a0/d0/33a0d056-1761-9c51-4bb7-35813eb14f1f/screen1136x1136.jpeg'},
],
}
},
}
@transaction.atomic
def update_website_screenshots(website, screenshot_images, platform):
existing_screenshots = list(AppWebsiteScreenshot.objects.filter(website_id=website.id, platform=platform).order_by('order'))
screenshot_image_ids = set([i.id for i in screenshot_images])
screenshots_to_delete = [s for s in existing_screenshots
if s.image_id not in screenshot_image_ids]
for screenshot in screenshots_to_delete:
screenshot.image.decrement_ref_count()
screenshot.delete()
existing_by_image_id = {i.image_id: i for i in existing_screenshots}
for i, image in enumerate(screenshot_images):
order = i + 1
if image.id in existing_by_image_id:
screenshot = existing_by_image_id[image.id]
if screenshot.order != order:
screenshot.order = order
screenshot.save()
else:
image.increment_ref_count()
screenshot = AppWebsiteScreenshot(website=website, image=image, platform=platform, order=order)
screenshot.save()
@transaction.atomic
def create_or_update_hosted_page(website, slug, body):
hosted_page_titles = {
'terms' : 'Terms and Conditions',
'privacy' : 'Privacy Policy',
'support' : 'Support',
}
page = AppWebsitePage.objects.filter(website=website, slug=slug).first()
if page and body:
page.body = body
page.save()
elif not page and body:
AppWebsitePage.objects.create(website=website, slug=slug, body=body, title=hosted_page_titles[slug])
elif page and not body:
page.delete()
@transaction.atomic
def delete_website(website):
screenshots = list(website.screenshots.all())
for screenshot in screenshots:
screenshot.image.decrement_ref_count()
screenshot.delete()
if website.icon:
website.icon.decrement_ref_count()
website.icon = None
if website.logo:
website.logo.decrement_ref_count()
website.logo = None
if website.background:
website.background.decrement_ref_count()
website.background = None
# TODO(Taylor): Mark as deleted instead of actually deleting potentially huge number of rows
# AppWebsiteView.objects.filter(website_id=website.id).delete()
website.domain = None
website.delete_time = datetime.now()
website.save()
| 1.5625 | 2 |
plumeria/config/common.py | sk89q/plumeria | 18 | 12798282 | <filename>plumeria/config/common.py<gh_stars>10-100
"""A list of common configuration options that might be used by plugins."""
from functools import wraps
from plumeria import config
from plumeria.command import CommandError
from plumeria.config.types import boolstr, dateformatstr
from plumeria.core.scoped_config import scoped_config
allow_games = config.create("common", "allow_games", type=boolstr, fallback=False,
comment="Whether to allow game functions",
scoped=True, private=False)
nsfw = config.create("common", "nsfw", type=boolstr, fallback=False, comment="Whether to allow NSFW functions",
scoped=True, private=False)
short_date_time_format = config.create("common", "date_time_short", type=dateformatstr,
fallback="%b %m, %Y %I:%M %p %Z", comment="Short date and time format",
scoped=True, private=False)
config.add(allow_games)
config.add(nsfw)
config.add(short_date_time_format)
def games_allowed_only(f):
@wraps(f)
async def wrapper(message, *args, **kwargs):
if not scoped_config.get(allow_games, message.channel):
raise CommandError(
"Games aren't allowed here! Enable games by setting the `common/allow_games` config setting to `yes` for the channel or server.")
if not message.channel.is_private:
return await f(message, *args, **kwargs)
return wrapper
| 2.3125 | 2 |
tests/test_totalchance.py | sponsfreixes/dice_stats | 2 | 12798283 | from fractions import Fraction
from dice_stats import Dice
def test_totalchance():
d6 = Dice.from_dice(6)
for c in [
Fraction(1),
Fraction(1, 2),
Fraction(1, 6),
]:
assert d6 @ c * 2 == 2 * d6 @ c
def test_nested_d6():
d6 = Dice.from_dice(6)
d6_a = Dice.sum(v * d6 for v, c in d6.items())
d6_b = Dice.sum([1 * d6, 2 * d6, 3 * d6, 4 * d6, 5 * d6, 6 * d6])
assert d6_a == d6_b
assert d6_a._total_chance == Fraction(6)
def test_nested_d6_chance():
d6 = Dice.from_dice(6)
d6_a = Dice.sum(v * d6 @ c for v, c in d6.items())
c = Fraction(1, 6)
d6_b = Dice.sum(
[1 * d6 @ c, 2 * d6 @ c, 3 * d6 @ c, 4 * d6 @ c, 5 * d6 @ c, 6 * d6 @ c,]
)
assert d6_a == d6_b
assert d6_a._total_chance == Fraction(1)
def test_nested_d6_chance_squared():
d6 = Dice.from_dice(6)
d6_a = Dice.sum(v * d6 @ c for v, c in d6.items())
c = Fraction(1, 6)
d6_b = Dice.sum(
[d6 @ c * 1, d6 @ c * 2, d6 @ c * 3, d6 @ c * 4, d6 @ c * 5, d6 @ c * 6,]
)
assert d6_a == d6_b
def test_applyfunction():
d6 = Dice.from_dice(6)
result = d6.apply_functions({(1,): lambda d: d6 @ d}, lambda d: d)
expected = Dice.from_full(
{
1: Fraction(1, 36),
2: Fraction(7, 36),
3: Fraction(7, 36),
4: Fraction(7, 36),
5: Fraction(7, 36),
6: Fraction(7, 36),
}
)
assert result == expected
assert result._total_chance == Fraction(1)
def test_applyfunction_old():
d6 = Dice.from_dice(6)
result = d6.apply_functions({(1,): lambda _: d6}, lambda d: d)
expected = Dice.from_external(
{
1: Fraction(1, 6),
2: Fraction(1, 3),
3: Fraction(1, 3),
4: Fraction(1, 3),
5: Fraction(1, 3),
6: Fraction(1, 3),
},
Fraction(11, 6),
)
assert result == expected
assert result._total_chance == Fraction(11, 6)
def test_applydice():
result = Dice.from_dice(3).apply_dice(
{(1,): Dice.from_dice(4, 2)}, Dice.from_dice(6),
)
expected = Dice.from_external(
{
1: Fraction(5, 18),
2: Fraction(5, 18),
3: Fraction(5, 18),
4: Fraction(5, 18),
5: Fraction(1, 9),
6: Fraction(1, 9),
},
Fraction(4, 3),
)
assert result == expected
def test_sum_to_one():
result = Dice.sum(
[Dice.from_dice(6) @ Fraction(1 / 2), Dice.from_dice(3) @ Fraction(1 / 2),]
)
expected = Dice.from_external(
{
1: Fraction(1, 4),
2: Fraction(1, 4),
3: Fraction(1, 4),
4: Fraction(1, 12),
5: Fraction(1, 12),
6: Fraction(1, 12),
},
Fraction(1),
)
assert result == expected
def test_sum_to_half():
result = Dice.sum([Dice.from_dice(6), Dice.from_dice(3) @ Fraction(1 / 2),])
expected = Dice.from_external(
{
1: Fraction(1, 3),
2: Fraction(1, 3),
3: Fraction(1, 3),
4: Fraction(1, 6),
5: Fraction(1, 6),
6: Fraction(1, 6),
},
Fraction(3, 2),
)
assert result == expected
| 3.28125 | 3 |
datasets/sult_model/fetch_smiles.py | ssirimulla/openDMPK | 17 | 12798284 | <gh_stars>10-100
# Script to fetch smiles using chembl id
# Source: https://www.ebi.ac.uk/chembl/ws
# For monkey patching (necessary?)
# import gevent.monkey
# gevent.monkey.patch_all()
# from requests.packages.urllib3.util.ssl_ import create_urllib3_context
# create_urllib3_context()
from chembl_webresource_client.new_client import new_client
molecule = new_client.molecule
def get_smiles(_id):
mol = molecule.get(_id)
return mol["molecule_structures"]["canonical_smiles"]
print(get_smiles("CHEMBL300797"))
| 2.359375 | 2 |
python/sqlflow_submitter/pai/model.py | pake35/sqlflow | 1 | 12798285 | # Copyright 2019 The SQLFlow Authors. All rights reserved.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import io
import pickle
import tarfile
import odps
import tensorflow as tf
from tensorflow.python.platform import gfile
from sqlflow_submitter import db
import os
def save(oss_model_dir, *meta):
'''
Save model descriptions like the training SQL statements to OSS directory.
Data are saved using pickle.
Args:
oss_model_dir: OSS URI that the model will be saved to.
*meta: python objects to be saved.
Return:
None
'''
uri_parts = oss_model_dir.split("?")
if len(uri_parts) != 2:
raise ValueError("error oss_model_dir: ", oss_model_dir)
oss_path = "/".join([uri_parts[0].rstrip("/"), "sqlflow_model_desc"])
writer = gfile.GFile(oss_path, mode='w')
pickle.dump(list(meta), writer)
writer.flush()
writer.close()
def load(oss_model_dir):
'''
Load and restore a directory and metadata that are saved by `model.save`
from a MaxCompute table
Args:
oss_model_dir: OSS URI that the model will be saved to.
Return:
A list contains the saved python objects
'''
uri_parts = oss_model_dir.split("?")
if len(uri_parts) != 2:
raise ValueError("error oss_model_dir: ", oss_model_dir)
oss_path = "/".join([uri_parts[0].rstrip("/"), "sqlflow_model_desc"])
reader = gfile.GFile(oss_path, mode='r')
return pickle.load(reader)
| 2.125 | 2 |
houdini_manage/library.py | NiklasRosenstein/houdini-manage | 7 | 12798286 | # Copyright (C) 2017 <NAME>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
import datetime
import json
import os
import operator
import shlex
import subprocess
from . import __version__
from .config import config
def get_houdini_environment_path(hou=None):
hou = hou or config.get('houdinienv', 'houdini16.0')
if not '/' in hou and not os.sep in hou:
hou = os.path.expanduser('~/Documents/' + hou + '/houdini.env')
return os.path.normpath(hou)
def get_houdini_user_prefs_directories():
directory = os.path.expanduser('~/Documents')
if not os.path.isdir(directory):
return []
result = []
for name in os.listdir(directory):
envfile = os.path.join(directory, name, 'houdini.env')
if name.startswith('houdini') and os.path.isfile(envfile):
result.append((name, envfile))
result.sort(key=operator.itemgetter(0), reverse=True)
return result
def load_library_config(directory):
config_file = os.path.join(directory, 'houdini-library.json')
if not os.path.isfile(config_file):
raise NotALibraryError('missing library configuration file: {}'.format(config_file))
with open(config_file) as fp:
return json.load(fp)
def install_library(env, directory, overwrite=False):
# Open the librarie's configuration file.
config = load_library_config(directory)
now = datetime.datetime.now().strftime('%Y-%m-%d %H:%M')
version = __version__
# Initialize the default section. It's purpose is to make sure that
# Houdini's default paths do not get messed up.
section = env.get_named_section('DEFAULT')
if not section:
section = env.add_named_section('DEFAULT', '', before=env.get_first_named_section())
else:
section.clear()
section.add_comment(' Automatically generated by houdini-manage v{}'.format(version))
section.add_comment(' Last update: {}'.format(now))
#for info in HOUDINI_PATH_ENVVARS:
# # Houdini will use the default value of the variable when it sees
# # the ampersand.
# section.add_variable(info['var'], '&')
section.add_variable('HOUDINI_PATH', '&')
section.add_variable('PYTHONPATH', '&')
# Create or update the section for this library.
directory = os.path.normpath(os.path.abspath(directory))
section = env.get_named_section('library:' + config['libraryName'])
if not section:
previous = False
section = env.add_named_section('library:' + config['libraryName'], '')
else:
previous = True
if not overwrite:
raise PreviousInstallationFoundError(config['libraryName'])
section.clear()
section.add_comment(' Automatically generated by houdini-manage v{}'.format(version))
section.add_comment(' Last update: {}'.format(now))
#for info in HOUDINI_PATH_ENVVARS:
# if not info['dir']: continue
# vardir = os.path.join(directory, info['dir'])
# if not os.path.isdir(vardir): continue
# section.add_variable(info['var'], '$' + info['var'], vardir)
section.add_variable('HOUDINI_PATH', '$HOUDINI_PATH', directory)
section.add_variable('PYTHONPATH', '$PYTHONPATH', os.path.join(directory, 'python'))
section.add_variable('HLIBPATH_' + config['libraryName'], directory)
section.add_variable('HLIBVERSION_' + config['libraryName'], config['libraryVersion'])
if config.get('environment'):
section.add_comment('Environment variables specified by the library:')
for line in config['environment']:
section.add_line(line)
def remove_library(env, name):
section = env.get_library(name)
if section:
env.remove_section(section)
return True
return False
def get_houdini_application_dir():
install_dir = config.get('houdiniapp')
if install_dir:
return install_dir
if os.name == 'nt':
import winreg
key = winreg.OpenKey(winreg.HKEY_CLASSES_ROOT, 'Houdini.hip\\shell\\open\\command')
path = shlex.split(winreg.QueryValue(key, None))[0]
path = os.path.dirname(os.path.dirname(path))
else:
path = ''
return path
def build_dso(hou_app_dir, library_dir):
hcustom = os.path.join(hou_app_dir, 'bin\\hcustom.exe' if os.name == 'nt' else 'bin/hcustom')
library_dir = os.path.abspath(library_dir)
config = load_library_config(library_dir)
dso_source = os.path.join(library_dir, config.get('dsoSource', 'dso_source'))
if not os.path.isdir(dso_source):
return 0, True
dso_dir = os.path.join(library_dir, 'dso')
if not os.path.isdir(dso_dir):
os.makedirs(dso_dir)
files = []
for name in os.listdir(dso_source):
ext = os.path.splitext(name)[1].lower()
if ext in ('.c', '.cc', '.cxx', '.cpp'):
files.append(os.path.join(dso_source, name))
if not files:
return 0, True
command = [hcustom]
if config.get('dsoDebug'):
command += '-g'
for path in config.get('dsoInclude', []):
command += ['-I', os.path.join(library_dir, path)]
for path in config.get('dsoLibdir', []):
command += ['-L', os.path.join(library_dir, path)]
for lib in config.get('dsoLibs', []):
command += ['-l', lib]
command += ['-i', dso_dir]
print('Building DSOs for "{}" ...'.format(config['libraryName']))
ok = True
for filename in files:
current_command = command + [filename]
print()
print(' {} ...'.format(os.path.basename(filename)))
print()
res = subprocess.call(current_command, cwd=dso_dir)
if res != 0:
print('Error: hcustom failed with exit code', res)
ok = False
print('Done.')
return len(files), ok
class InstallError(Exception):
pass
class NotALibraryError(InstallError):
pass
class PreviousInstallationFoundError(InstallError):
def __init__(self, library_name):
self.library_name = library_name
| 1.882813 | 2 |
code/waldo/prepare/summarize.py | amarallab/waldo | 0 | 12798287 | from __future__ import print_function, absolute_import, unicode_literals, division
import six
from six.moves import (zip, filter, map, reduce, input, range)
# standard library
import functools
# third party
# project specific
from waldo.conf import settings
from waldo import wio
from . import secondary
from . import primary
__all__ = ['summarize']
CALLBACK_LOAD_FRAC = 0.02
CALLBACK_PRIMARY_FRAC = 0.90
CALLBACK_SECONDARY_FRAC = 0.08
# TODO remove ex_id from parameters. rely solely on experiment
def summarize(ex_id, experiment=None, verbose=False, callback=None):
"""
intermediate summary data.
"""
if verbose:
talk = print
else:
talk = lambda *a, **k: None
if callback:
def cb_load(p):
callback(CALLBACK_LOAD_FRAC * p)
def cb_pri(p):
callback(CALLBACK_LOAD_FRAC + CALLBACK_PRIMARY_FRAC * p)
def cb_sec(p):
callback(CALLBACK_LOAD_FRAC + CALLBACK_PRIMARY_FRAC +
CALLBACK_SECONDARY_FRAC * p)
def cb_pri_steps(p, step, num_steps):
cb_pri((step + p) / num_steps)
else:
cb_load = cb_pri = cb_sec = cb_pri_steps = None
talk('preparing blob files')
if experiment is None:
# load experiment
experiment = wio.Experiment(experiment_id=ex_id, callback=cb_load)
talk('Loaded experiment ID: {}'.format(experiment.id))
def save_processed_data(data, experiment):
talk(' - Saving to CSVs...')
dumped_keys = []
for key, value in six.iteritems(data):
talk(' - {}'.format(key))
experiment.prepdata.dump(data_type=key, dataframe=value, index=False)
dumped_keys.append(key)
# free up memory once this is saved
for key in dumped_keys:
del data[key]
# process the basic blob data
talk(' - Summarizing raw data...')
data = {}
for i, df_type in enumerate(['bounds', 'terminals', 'sizes']):
if callback:
cb = lambda x: cb_pri_steps(x, i, 3)
else:
cb = None
print(' - Summarizing {df} data...'.format(df=df_type))
data[df_type] = primary.create_primary_df(experiment, df_type, callback=cb)
save_processed_data(data, experiment)
# TODO: remove this commented method. it keeps failing.
# data = primary.summarize(experiment, callback=cb_pri)
# generate secondary data
talk(' - Generating secondary data...')
# data['roi'] = secondary.in_roi(experiment=experiment, bounds=data['bounds'])
data['roi'] = secondary.in_roi(experiment=experiment, bounds=None)
if callback:
cb_sec(0.4)
save_processed_data(data, experiment)
if callback:
cb_sec(0.6)
# data['moved'] = secondary.bodylengths_moved(bounds=data['bounds'], sizes=data['sizes'])
data['moved'] = secondary.bodylengths_moved(experiment=experiment)
if callback:
cb_sec(0.8)
save_processed_data(data, experiment)
if callback:
cb_sec(1)
# dump it out | 1.984375 | 2 |
ismore/invasive/sim_passive_movement.py | DerekYJC/bmi_python | 0 | 12798288 | import numpy as np
import socket, struct
from ismore import settings, udp_feedback_client
import time
from ismore import common_state_lists, ismore_bmi_lib
import pandas as pd
import pickle
import os
class Patient(object):
def __init__(self, targets_matrix_file):
self.addrs = [settings.ARMASSIST_UDP_SERVER_ADDR, settings.REHAND_UDP_SERVER_ADDR]
self.socks = [socket.socket(socket.AF_INET, socket.SOCK_DGRAM), socket.socket(socket.AF_INET, socket.SOCK_DGRAM)]
self.n_dofs = [range(3), range(3, 7)]
self.plant_types = ['ArmAssist', 'ReHand']
self.aa_p = range(3) #common_state_lists.aa_pos_states
self.rh_p = range(4) #common_state_lists.rh_pos_states
self.rh_v = range(4, 8) #common_state_lists.rh_vel_states
self.aa_v = range(3, 6)
#self.aa = udp_feedback_client.ArmAssistData()
#self.rh = udp_feedback_client.ReHandData()
#self.aa.start()
#self.last_aa_pos = pd.Series(np.zeros((3, )), dtype=self.aa_p) #self.aa.get()['data'][self.aa_p]
#self.last_aa_pos_t = time.time()
#self.rh.start()
assister_kwargs = {
'call_rate': 20,
'xy_cutoff': 5,
}
self.assister = ismore_bmi_lib.ASSISTER_CLS_DICT['IsMore'](**assister_kwargs)
self.targets_matrix = pickle.load(open(targets_matrix_file))
def send_vel(self, vel):
for i, (ia, sock, ndof, plant) in enumerate(zip(self.addrs, self.socks, self.n_dofs, self.plant_types)):
self._send_command('SetSpeed %s %s\r' % (plant, self.pack_vel(vel[ndof], ndof)), ia, sock)
def pack_vel(self, vel, n_dof):
format_str = "%f " * len(n_dof)
return format_str % tuple(vel)
def _send_command(self, command, addr, sock):
sock.sendto(command, addr)
def _get_current_state(self):
#aa_data = self.aa.get()['data']
with open(os.path.expandvars('$HOME/code/bmi3d/log/armassist.txt'), 'r') as f:
lines = f.read().splitlines()
last_line = lines[-2]
aa_data = np.array([float(i) for i in last_line.split(',')])
with open(os.path.expandvars('$HOME/code/bmi3d/log/rehand.txt'), 'r') as f:
lines = f.read().splitlines()
last_line = lines[-2]
rh_data = np.array([float(i) for i in last_line.split(',')])
#daa = np.array([aa_data[0][i] - self.last_aa_pos[0][i] for i in range(3)])
#aa_vel = daa/(time.time() - self.last_aa_pos_t)
#self.last_aa_pos = aa_data[self.aa_p]
#rh_data = self.rh.get()['data']
pos = np.hstack(( aa_data[self.aa_p], rh_data[self.rh_p] ))
vel = np.hstack(( aa_data[self.aa_v], rh_data[self.rh_v] ))
return np.hstack((pos, vel))
def get_to_target(self, target_pos):
current_state = np.mat(self._get_current_state()).T
target_state = np.mat(np.hstack((target_pos, np.zeros((7, ))))).T
assist_kwargs = self.assister(current_state, target_state, 1., mode=None)
self.send_vel(10*np.squeeze(np.array(assist_kwargs['Bu'][7:14])))
return np.sum((np.array(current_state)-np.array(target_state))**2)
def go_to_target(self, target_name, tix=0):
if len(self.targets_matrix[target_name].shape) > 1:
targ = self.targets_matrix[target_name][tix]
else:
targ = self.targets_matrix[target_name]
d = 100
while d > 20:
d = self.get_to_target(targ)
print d
| 2.078125 | 2 |
batchglm/train/tf/base_glm_all/estimator_graph.py | SabrinaRichter/batchglm | 0 | 12798289 | <reponame>SabrinaRichter/batchglm
from typing import Union
import logging
import tensorflow as tf
import numpy as np
import xarray as xr
from .external import GradientGraphGLM, NewtonGraphGLM, TrainerGraphGLM
from .external import EstimatorGraphGLM, FullDataModelGraphGLM, BatchedDataModelGraphGLM
from .external import op_utils
from .external import pkg_constants
logger = logging.getLogger(__name__)
class FullDataModelGraph(FullDataModelGraphGLM):
"""
Computational graph to evaluate negative binomial GLM metrics on full data set.
"""
def __init__(
self,
sample_indices: tf.Tensor,
fetch_fn,
batch_size: Union[int, tf.Tensor],
model_vars,
constraints_loc,
constraints_scale,
train_a,
train_b,
noise_model: str,
dtype
):
"""
:param sample_indices:
TODO
:param fetch_fn:
TODO
:param batch_size: int
Size of mini-batches used.
:param model_vars: ModelVars
Variables of model. Contains tf.Variables which are optimized.
:param constraints_loc: tensor (all parameters x dependent parameters)
Tensor that encodes how complete parameter set which includes dependent
parameters arises from indepedent parameters: all = <constraints, indep>.
This tensor describes this relation for the mean model.
This form of constraints is used in vector generalized linear models (VGLMs).
:param constraints_scale: tensor (all parameters x dependent parameters)
Tensor that encodes how complete parameter set which includes dependent
parameters arises from indepedent parameters: all = <constraints, indep>.
This tensor describes this relation for the dispersion model.
This form of constraints is used in vector generalized linear models (VGLMs).
:param train_mu: bool
Whether to train mean model. If False, the initialisation is kept.
:param train_r: bool
Whether to train dispersion model. If False, the initialisation is kept.
:param dtype: Precision used in tensorflow.
"""
if noise_model == "nb":
from .external_nb import BasicModelGraph, Jacobians, Hessians, FIM
else:
raise ValueError("noise model not rewcognized")
self.noise_model = noise_model
dataset = tf.data.Dataset.from_tensor_slices(sample_indices)
batched_data = dataset.batch(batch_size)
batched_data = batched_data.map(fetch_fn, num_parallel_calls=pkg_constants.TF_NUM_THREADS)
batched_data = batched_data.prefetch(1)
def map_model(idx, data) -> BasicModelGraph:
X, design_loc, design_scale, size_factors = data
model = BasicModelGraph(
X=X,
design_loc=design_loc,
design_scale=design_scale,
constraints_loc=constraints_loc,
constraints_scale=constraints_scale,
a_var=model_vars.a_var,
b_var=model_vars.b_var,
dtype=dtype,
size_factors=size_factors)
return model
model = map_model(*fetch_fn(sample_indices))
with tf.name_scope("log_likelihood"):
log_likelihood = op_utils.map_reduce(
last_elem=tf.gather(sample_indices, tf.size(sample_indices) - 1),
data=batched_data,
map_fn=lambda idx, data: map_model(idx, data).log_likelihood,
parallel_iterations=1,
)
norm_log_likelihood = log_likelihood / tf.cast(tf.size(sample_indices), dtype=log_likelihood.dtype)
norm_neg_log_likelihood = - norm_log_likelihood
with tf.name_scope("loss"):
loss = tf.reduce_sum(norm_neg_log_likelihood)
with tf.name_scope("hessians"):
# Hessian of full model for reporting.
hessians_full = Hessians(
batched_data=batched_data,
sample_indices=sample_indices,
constraints_loc=constraints_loc,
constraints_scale=constraints_scale,
model_vars=model_vars,
mode=pkg_constants.HESSIAN_MODE,
noise_model=noise_model,
iterator=True,
hess_a=True,
hess_b=True,
dtype=dtype
)
# Hessian of submodel which is to be trained.
if train_a or train_b:
if not train_a or not train_b:
hessians_train = Hessians(
batched_data=batched_data,
sample_indices=sample_indices,
constraints_loc=constraints_loc,
constraints_scale=constraints_scale,
model_vars=model_vars,
mode=pkg_constants.HESSIAN_MODE,
noise_model=noise_model,
iterator=True,
hess_a=train_a,
hess_b=train_b,
dtype=dtype
)
else:
hessians_train = hessians_full
else:
hessians_train = None
fim_full = FIM(
batched_data=batched_data,
sample_indices=sample_indices,
constraints_loc=constraints_loc,
constraints_scale=constraints_scale,
model_vars=model_vars,
mode=pkg_constants.HESSIAN_MODE,
noise_model=noise_model,
iterator=True,
update_a=True,
update_b=True,
dtype=dtype
)
# Fisher information matrix of submodel which is to be trained.
if train_a or train_b:
if not train_a or not train_b:
fim_train = FIM(
batched_data=batched_data,
sample_indices=sample_indices,
constraints_loc=constraints_loc,
constraints_scale=constraints_scale,
model_vars=model_vars,
mode=pkg_constants.HESSIAN_MODE,
noise_model=noise_model,
iterator=True,
update_a=train_a,
update_b=train_b,
dtype=dtype
)
else:
fim_train = fim_full
else:
fim_train = None
with tf.name_scope("jacobians"):
# Jacobian of full model for reporting.
jacobian_full = Jacobians(
batched_data=batched_data,
sample_indices=sample_indices,
batch_model=None,
constraints_loc=constraints_loc,
constraints_scale=constraints_scale,
model_vars=model_vars,
mode=pkg_constants.JACOBIAN_MODE,
noise_model=noise_model,
iterator=True,
jac_a=True,
jac_b=True,
dtype=dtype
)
# Jacobian of submodel which is to be trained.
if train_a or train_b:
if not train_a or not train_b:
jacobian_train = Jacobians(
batched_data=batched_data,
sample_indices=sample_indices,
batch_model=None,
constraints_loc=constraints_loc,
constraints_scale=constraints_scale,
model_vars=model_vars,
mode=pkg_constants.JACOBIAN_MODE,
noise_model=noise_model,
iterator=True,
jac_a=train_a,
jac_b=train_b,
dtype=dtype
)
else:
jacobian_train = jacobian_full
else:
jacobian_train = None
self.X = model.X
self.design_loc = model.design_loc
self.design_scale = model.design_scale
self.batched_data = batched_data
self.mu = model.mu
self.r = model.r
self.sigma2 = model.sigma2
self.probs = model.probs
self.log_probs = model.log_probs
# custom
self.sample_indices = sample_indices
self.log_likelihood = log_likelihood
self.norm_log_likelihood = norm_log_likelihood
self.norm_neg_log_likelihood = norm_neg_log_likelihood
self.loss = loss
self.jac = jacobian_full.jac
self.jac_train = jacobian_train
self.hessians = hessians_full
self.hessians_train = hessians_train
self.fim = fim_full
self.fim_train = fim_train
class BatchedDataModelGraph(BatchedDataModelGraphGLM):
"""
Computational graph to evaluate negative binomial GLM metrics on batched data set.
"""
def __init__(
self,
num_observations,
fetch_fn,
batch_size: Union[int, tf.Tensor],
buffer_size: int,
model_vars,
constraints_loc,
constraints_scale,
train_a,
train_b,
noise_model: str,
dtype
):
"""
:param fetch_fn:
TODO
:param batch_size: int
Size of mini-batches used.
:param model_vars: ModelVars
Variables of model. Contains tf.Variables which are optimized.
:param constraints_loc: tensor (all parameters x dependent parameters)
Tensor that encodes how complete parameter set which includes dependent
parameters arises from indepedent parameters: all = <constraints, indep>.
This tensor describes this relation for the mean model.
This form of constraints is used in vector generalized linear models (VGLMs).
:param constraints_scale: tensor (all parameters x dependent parameters)
Tensor that encodes how complete parameter set which includes dependent
parameters arises from indepedent parameters: all = <constraints, indep>.
This tensor describes this relation for the dispersion model.
This form of constraints is used in vector generalized linear models (VGLMs).
:param train_mu: bool
Whether to train mean model. If False, the initialisation is kept.
:param train_r: bool
Whether to train dispersion model. If False, the initialisation is kept.
:param dtype: Precision used in tensorflow.
"""
if noise_model == "nb":
from .external_nb import BasicModelGraph, Jacobians, Hessians, FIM
else:
raise ValueError("noise model not rewcognized")
self.noise_model = noise_model
with tf.name_scope("input_pipeline"):
data_indices = tf.data.Dataset.from_tensor_slices((
tf.range(num_observations, name="sample_index")
))
training_data = data_indices.apply(tf.contrib.data.shuffle_and_repeat(buffer_size=2 * batch_size))
training_data = training_data.batch(batch_size, drop_remainder=True)
training_data = training_data.map(tf.contrib.framework.sort) # sort indices
training_data = training_data.map(fetch_fn, num_parallel_calls=pkg_constants.TF_NUM_THREADS)
training_data = training_data.prefetch(buffer_size)
iterator = training_data.make_one_shot_iterator()
batch_sample_index, batch_data = iterator.get_next()
(batch_X, batch_design_loc, batch_design_scale, batch_size_factors) = batch_data
with tf.name_scope("batch"):
batch_model = BasicModelGraph(
X=batch_X,
design_loc=batch_design_loc,
design_scale=batch_design_scale,
constraints_loc=constraints_loc,
constraints_scale=constraints_scale,
a_var=model_vars.a_var,
b_var=model_vars.b_var,
dtype=dtype,
size_factors=batch_size_factors
)
# Define the jacobian on the batched model for newton-rhapson:
# (note that these are the Jacobian matrix blocks
# of the trained subset of parameters).
if train_a or train_b:
batch_jac = Jacobians(
batched_data=batch_data,
sample_indices=batch_sample_index,
batch_model=batch_model,
constraints_loc=constraints_loc,
constraints_scale=constraints_scale,
model_vars=model_vars,
mode=pkg_constants.JACOBIAN_MODE,
noise_model=noise_model,
iterator=False,
jac_a=train_a,
jac_b=train_b,
dtype=dtype
)
else:
batch_jac = None
# Define the hessian on the batched model for newton-rhapson:
# (note that these are the Hessian matrix blocks
# of the trained subset of parameters).
if train_a or train_b:
batch_hessians = Hessians(
batched_data=batch_data,
sample_indices=batch_sample_index,
constraints_loc=constraints_loc,
constraints_scale=constraints_scale,
model_vars=model_vars,
mode=pkg_constants.HESSIAN_MODE,
noise_model=noise_model,
iterator=False,
hess_a=train_a,
hess_b=train_b,
dtype=dtype
)
else:
batch_hessians = None
# Define the IRLS components on the batched model:
# (note that these are the IRLS matrix blocks
# of the trained subset of parameters).
if train_a or train_b:
batch_fim = FIM(
batched_data=batch_data,
sample_indices=batch_sample_index,
constraints_loc=constraints_loc,
constraints_scale=constraints_scale,
model_vars=model_vars,
mode=pkg_constants.HESSIAN_MODE,
noise_model=noise_model,
iterator=False,
update_a=train_a,
update_b=train_b,
dtype=dtype
)
else:
batch_fim = None
self.X = batch_model.X
self.design_loc = batch_model.design_loc
self.design_scale = batch_model.design_scale
self.batched_data = batch_data
self.mu = batch_model.mu
self.r = batch_model.r
self.sigma2 = batch_model.sigma2
self.probs = batch_model.probs
self.log_probs = batch_model.log_probs
self.sample_indices = batch_sample_index
self.log_likelihood = batch_model.log_likelihood
self.norm_log_likelihood = batch_model.norm_log_likelihood
self.norm_neg_log_likelihood = batch_model.norm_neg_log_likelihood
self.loss = batch_model.loss
self.jac_train = batch_jac
self.hessians_train = batch_hessians
self.fim_train = batch_fim
class EstimatorGraphAll(EstimatorGraphGLM):
"""
"""
mu: tf.Tensor
sigma2: tf.Tensor
def __init__(
self,
fetch_fn,
feature_isnonzero,
num_observations,
num_features,
num_design_loc_params,
num_design_scale_params,
num_loc_params,
num_scale_params,
constraints_loc: xr.DataArray,
constraints_scale: xr.DataArray,
graph: tf.Graph = None,
batch_size: int = None,
init_a=None,
init_b=None,
train_loc: bool = True,
train_scale: bool = True,
provide_optimizers: Union[dict, None] = None,
termination_type: str = "global",
extended_summary=False,
noise_model: str = None,
dtype="float32"
):
"""
:param fetch_fn:
TODO
:param feature_isnonzero:
Whether all observations of a feature are zero. Features for which this
is the case are not fitted.
:param num_observations: int
Number of observations.
:param num_features: int
Number of features.
:param num_design_loc_params: int
Number of parameters per feature in mean model.
:param num_design_scale_params: int
Number of parameters per feature in scale model.
:param graph: tf.Graph
:param batch_size: int
Size of mini-batches used.
:param init_a: nd.array (mean model size x features)
Initialisation for all parameters of mean model.
:param init_b: nd.array (dispersion model size x features)
Initialisation for all parameters of dispersion model.
:param constraints_loc: tensor (all parameters x dependent parameters)
Tensor that encodes how complete parameter set which includes dependent
parameters arises from indepedent parameters: all = <constraints, indep>.
This tensor describes this relation for the mean model.
This form of constraints is used in vector generalized linear models (VGLMs).
:param constraints_scale: tensor (all parameters x dependent parameters)
Tensor that encodes how complete parameter set which includes dependent
parameters arises from indepedent parameters: all = <constraints, indep>.
This tensor describes this relation for the dispersion model.
This form of constraints is used in vector generalized linear models (VGLMs).
:param train_loc: bool
Whether to train mean model. If False, the initialisation is kept.
:param train_scale: bool
Whether to train dispersion model. If False, the initialisation is kept.
:param provide_optimizers:
:param termination_type:
:param extended_summary:
:param dtype: Precision used in tensorflow.
"""
if noise_model == "nb":
from .external_nb import BasicModelGraph, ModelVars, Jacobians, Hessians, FIM
else:
raise ValueError("noise model not recognized")
self.noise_model = noise_model
EstimatorGraphGLM.__init__(
self=self,
num_observations=num_observations,
num_features=num_features,
num_design_loc_params=num_design_loc_params,
num_design_scale_params=num_design_scale_params,
num_loc_params=num_loc_params,
num_scale_params=num_scale_params,
graph=graph,
batch_size=batch_size,
constraints_loc=constraints_loc,
constraints_scale=constraints_scale,
dtype=dtype
)
# initial graph elements
with self.graph.as_default():
with tf.name_scope("model_vars"):
self.model_vars = ModelVars(
dtype=dtype,
init_a=init_a,
init_b=init_b,
constraints_loc=constraints_loc,
constraints_scale=constraints_scale
)
self.idx_nonconverged = np.where(self.model_vars.converged == False)[0]
# ### performance related settings
buffer_size = 4
with tf.name_scope("batched_data"):
logger.debug(" ** Build batched data model")
self.batched_data_model = BatchedDataModelGraph(
num_observations=self.num_observations,
fetch_fn=fetch_fn,
batch_size=batch_size,
buffer_size=buffer_size,
model_vars=self.model_vars,
constraints_loc=constraints_loc,
constraints_scale=constraints_scale,
train_a=train_loc,
train_b=train_scale,
noise_model=noise_model,
dtype=dtype
)
with tf.name_scope("full_data"):
logger.debug(" ** Build full data model")
# ### alternative definitions for custom observations:
sample_selection = tf.placeholder_with_default(
tf.range(num_observations),
shape=(None,),
name="sample_selection"
)
self.full_data_model = FullDataModelGraph(
sample_indices=sample_selection,
fetch_fn=fetch_fn,
batch_size=batch_size * buffer_size,
model_vars=self.model_vars,
constraints_loc=constraints_loc,
constraints_scale=constraints_scale,
train_a=train_loc,
train_b=train_scale,
noise_model=noise_model,
dtype=dtype
)
self._run_trainer_init(
termination_type=termination_type,
provide_optimizers=provide_optimizers,
train_loc=train_loc,
train_scale=train_scale,
dtype=dtype
)
# Define output metrics:
self._set_out_var(
feature_isnonzero=feature_isnonzero,
dtype=dtype
)
self.loss = self.full_data_model.loss
self.log_likelihood = self.full_data_model.log_likelihood
self.hessians = self.full_data_model.hessians.hessian
self.fisher_inv = op_utils.pinv(self.full_data_model.hessians.neg_hessian) # TODO switch for fim?
# Summary statistics on feature-wise model gradients:
self.gradients = tf.reduce_sum(tf.transpose(self.gradients_full), axis=1)
with tf.name_scope('summaries'):
tf.summary.histogram('a_var', self.model_vars.a_var)
tf.summary.histogram('b_var', self.model_vars.b_var)
tf.summary.scalar('loss', self.batched_data_model.loss)
tf.summary.scalar('learning_rate', self.learning_rate)
if extended_summary:
pass
self.saver = tf.train.Saver()
self.merged_summary = tf.summary.merge_all()
| 2.5 | 2 |
tno/mpc/encryption_schemes/utils/utils.py | TNO-MPC/encryption_schemes.utils | 0 | 12798290 | """
Useful functions for creating encryption schemes.
"""
from math import gcd
from typing import Tuple
import sympy
from ._check_gmpy2 import USE_GMPY2
if USE_GMPY2:
import gmpy2
def randprime(low: int, high: int) -> int:
"""
Generate a random prime number in the range [low, high). Returns GMPY2 MPZ integer if available.
:param low: Lower bound (inclusive) of the range.
:param high: Upper bound (exclusive) of the range.
:return: Random prime number.
:raise ValueError: the lower bound should be strictly lower than the upper bound
"""
if low >= high:
raise ValueError(
"the lower bound should be smaller or equal to the upper bound"
)
if USE_GMPY2:
return gmpy2.mpz(sympy.ntheory.generate.randprime(low, high))
# else
return sympy.ntheory.generate.randprime(low, high)
def pow_mod(base: int, exponent: int, modulus: int) -> int:
"""
Compute base**exponent % modulus. Uses GMPY2 if available.
:param base: base
:param exponent: exponent
:param modulus: modulus
:return: base**exponent % modulus
"""
if USE_GMPY2:
return gmpy2.powmod(base, exponent, modulus)
# else
return pow(base, exponent, modulus)
def mod_inv(value: int, modulus: int) -> int:
"""
Compute the inverse of a number, given the modulus of the group.
Note that the inverse might not exist. Uses GMPY2 if available.
:param value: The number to be inverted.
:param modulus: The group modulus.
:raise ZeroDivisionError: Raised when the inverse of the value does not exist.
:return: The inverse of a under the modulus.
"""
value %= modulus
if USE_GMPY2:
return gmpy2.invert(value, modulus)
# else
gcd_, inverse, _ = extended_euclidean(value, modulus)
if gcd_ != 1:
raise ZeroDivisionError(f"Inverse of {value} mod {modulus} does not exist.")
return inverse
def extended_euclidean(num_a: int, num_b: int) -> Tuple[int, int, int]:
"""
Perform the extended euclidean algorithm on the input numbers.
The method returns gcd, x, y, such that a*x + b*y = gcd.
:param num_a: First number a.
:param num_b: Second number b.
:return: Tuple containing gcd, x, and y, such that a*x + b*y = gcd.
"""
# a*x + b*y = gcd
x_old, x_cur, y_old, y_cur = 0, 1, 1, 0
while num_a != 0:
quotient, num_b, num_a = num_b // num_a, num_a, num_b % num_a
y_old, y_cur = y_cur, y_old - quotient * y_cur
x_old, x_cur = x_cur, x_old - quotient * x_cur
return num_b, x_old, y_old
def lcm(num_a: int, num_b: int) -> int:
"""
Compute the least common multiple of two input numbers. Uses GMPY2 if available.
:param num_a: First number a.
:param num_b: Second number b.
:return: Least common multiple of a and b.
"""
if USE_GMPY2:
return gmpy2.lcm(num_a, num_b)
# else
return num_a * num_b // gcd(num_a, num_b)
def is_prime(number: int) -> bool:
"""
Check if the input number is a prime number. Uses GMPY2 if available
:param number: The number to check
:return: Whether the input is prime or not
"""
if USE_GMPY2:
return gmpy2.mpz(number).is_prime()
# else
return sympy.isprime(number)
| 3.90625 | 4 |
src/mbed_cloud/_backends/enrollment/models/bulk_create_response.py | GQMai/mbed-cloud-sdk-python | 12 | 12798291 | <reponame>GQMai/mbed-cloud-sdk-python
# coding: utf-8
"""
Enrollment API
Mbed Cloud Connect Enrollment Service allows users to claim the ownership of a device which is not yet assigned to an account. A device without an assigned account can be a device purchased from the open market (OEM dealer) or a device transferred from an account to another. More information in [Device ownership: First-to-claim](https://cloud.mbed.com/docs/current/connecting/device-ownership.html) document.
OpenAPI spec version: 3
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from pprint import pformat
from six import iteritems
import re
class BulkCreateResponse(object):
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'account_id': 'str',
'completed_at': 'datetime',
'created_at': 'datetime',
'errors_count': 'int',
'errors_report_file': 'str',
'etag': 'str',
'full_report_file': 'str',
'id': 'str',
'object': 'str',
'processed_count': 'int',
'status': 'str',
'total_count': 'int'
}
attribute_map = {
'account_id': 'account_id',
'completed_at': 'completed_at',
'created_at': 'created_at',
'errors_count': 'errors_count',
'errors_report_file': 'errors_report_file',
'etag': 'etag',
'full_report_file': 'full_report_file',
'id': 'id',
'object': 'object',
'processed_count': 'processed_count',
'status': 'status',
'total_count': 'total_count'
}
def __init__(self, account_id=None, completed_at=None, created_at=None, errors_count=None, errors_report_file=None, etag=None, full_report_file=None, id=None, object=None, processed_count=None, status='new', total_count=None):
"""
BulkCreateResponse - a model defined in Swagger
"""
self._account_id = account_id
self._completed_at = completed_at
self._created_at = created_at
self._errors_count = errors_count
self._errors_report_file = errors_report_file
self._etag = etag
self._full_report_file = full_report_file
self._id = id
self._object = object
self._processed_count = processed_count
self._status = status
self._total_count = total_count
self.discriminator = None
@property
def account_id(self):
"""
Gets the account_id of this BulkCreateResponse.
ID
:return: The account_id of this BulkCreateResponse.
:rtype: str
"""
return self._account_id
@account_id.setter
def account_id(self, account_id):
"""
Sets the account_id of this BulkCreateResponse.
ID
:param account_id: The account_id of this BulkCreateResponse.
:type: str
"""
if account_id is None:
raise ValueError("Invalid value for `account_id`, must not be `None`")
self._account_id = account_id
@property
def completed_at(self):
"""
Gets the completed_at of this BulkCreateResponse.
The time of completing the bulk creation task.
:return: The completed_at of this BulkCreateResponse.
:rtype: datetime
"""
return self._completed_at
@completed_at.setter
def completed_at(self, completed_at):
"""
Sets the completed_at of this BulkCreateResponse.
The time of completing the bulk creation task.
:param completed_at: The completed_at of this BulkCreateResponse.
:type: datetime
"""
self._completed_at = completed_at
@property
def created_at(self):
"""
Gets the created_at of this BulkCreateResponse.
The time of receiving the bulk creation task.
:return: The created_at of this BulkCreateResponse.
:rtype: datetime
"""
return self._created_at
@created_at.setter
def created_at(self, created_at):
"""
Sets the created_at of this BulkCreateResponse.
The time of receiving the bulk creation task.
:param created_at: The created_at of this BulkCreateResponse.
:type: datetime
"""
if created_at is None:
raise ValueError("Invalid value for `created_at`, must not be `None`")
self._created_at = created_at
@property
def errors_count(self):
"""
Gets the errors_count of this BulkCreateResponse.
The number of enrollment identities with failed processing.
:return: The errors_count of this BulkCreateResponse.
:rtype: int
"""
return self._errors_count
@errors_count.setter
def errors_count(self, errors_count):
"""
Sets the errors_count of this BulkCreateResponse.
The number of enrollment identities with failed processing.
:param errors_count: The errors_count of this BulkCreateResponse.
:type: int
"""
if errors_count is None:
raise ValueError("Invalid value for `errors_count`, must not be `None`")
self._errors_count = errors_count
@property
def errors_report_file(self):
"""
Gets the errors_report_file of this BulkCreateResponse.
:return: The errors_report_file of this BulkCreateResponse.
:rtype: str
"""
return self._errors_report_file
@errors_report_file.setter
def errors_report_file(self, errors_report_file):
"""
Sets the errors_report_file of this BulkCreateResponse.
:param errors_report_file: The errors_report_file of this BulkCreateResponse.
:type: str
"""
self._errors_report_file = errors_report_file
@property
def etag(self):
"""
Gets the etag of this BulkCreateResponse.
etag
:return: The etag of this BulkCreateResponse.
:rtype: str
"""
return self._etag
@etag.setter
def etag(self, etag):
"""
Sets the etag of this BulkCreateResponse.
etag
:param etag: The etag of this BulkCreateResponse.
:type: str
"""
if etag is None:
raise ValueError("Invalid value for `etag`, must not be `None`")
if etag is not None and not re.search('[A-Za-z0-9]{0,256}', etag):
raise ValueError("Invalid value for `etag`, must be a follow pattern or equal to `/[A-Za-z0-9]{0,256}/`")
self._etag = etag
@property
def full_report_file(self):
"""
Gets the full_report_file of this BulkCreateResponse.
:return: The full_report_file of this BulkCreateResponse.
:rtype: str
"""
return self._full_report_file
@full_report_file.setter
def full_report_file(self, full_report_file):
"""
Sets the full_report_file of this BulkCreateResponse.
:param full_report_file: The full_report_file of this BulkCreateResponse.
:type: str
"""
self._full_report_file = full_report_file
@property
def id(self):
"""
Gets the id of this BulkCreateResponse.
Bulk ID
:return: The id of this BulkCreateResponse.
:rtype: str
"""
return self._id
@id.setter
def id(self, id):
"""
Sets the id of this BulkCreateResponse.
Bulk ID
:param id: The id of this BulkCreateResponse.
:type: str
"""
if id is None:
raise ValueError("Invalid value for `id`, must not be `None`")
if id is not None and not re.search('^[A-Za-z0-9]{32}', id):
raise ValueError("Invalid value for `id`, must be a follow pattern or equal to `/^[A-Za-z0-9]{32}/`")
self._id = id
@property
def object(self):
"""
Gets the object of this BulkCreateResponse.
:return: The object of this BulkCreateResponse.
:rtype: str
"""
return self._object
@object.setter
def object(self, object):
"""
Sets the object of this BulkCreateResponse.
:param object: The object of this BulkCreateResponse.
:type: str
"""
if object is None:
raise ValueError("Invalid value for `object`, must not be `None`")
allowed_values = ["enrollment-identity-bulk-uploads"]
if object not in allowed_values:
raise ValueError(
"Invalid value for `object` ({0}), must be one of {1}"
.format(object, allowed_values)
)
self._object = object
@property
def processed_count(self):
"""
Gets the processed_count of this BulkCreateResponse.
The number of enrollment identities processed until now.
:return: The processed_count of this BulkCreateResponse.
:rtype: int
"""
return self._processed_count
@processed_count.setter
def processed_count(self, processed_count):
"""
Sets the processed_count of this BulkCreateResponse.
The number of enrollment identities processed until now.
:param processed_count: The processed_count of this BulkCreateResponse.
:type: int
"""
if processed_count is None:
raise ValueError("Invalid value for `processed_count`, must not be `None`")
self._processed_count = processed_count
@property
def status(self):
"""
Gets the status of this BulkCreateResponse.
The state of the process is 'new' at the time of creation. If the creation is still in progress, the state is shown as 'processing'. When the request has been fully processed, the state changes to 'completed'.
:return: The status of this BulkCreateResponse.
:rtype: str
"""
return self._status
@status.setter
def status(self, status):
"""
Sets the status of this BulkCreateResponse.
The state of the process is 'new' at the time of creation. If the creation is still in progress, the state is shown as 'processing'. When the request has been fully processed, the state changes to 'completed'.
:param status: The status of this BulkCreateResponse.
:type: str
"""
if status is None:
raise ValueError("Invalid value for `status`, must not be `None`")
allowed_values = ["new", "processing", "completed"]
if status not in allowed_values:
raise ValueError(
"Invalid value for `status` ({0}), must be one of {1}"
.format(status, allowed_values)
)
self._status = status
@property
def total_count(self):
"""
Gets the total_count of this BulkCreateResponse.
Total number of enrollment identities found in the input CSV.
:return: The total_count of this BulkCreateResponse.
:rtype: int
"""
return self._total_count
@total_count.setter
def total_count(self, total_count):
"""
Sets the total_count of this BulkCreateResponse.
Total number of enrollment identities found in the input CSV.
:param total_count: The total_count of this BulkCreateResponse.
:type: int
"""
if total_count is None:
raise ValueError("Invalid value for `total_count`, must not be `None`")
self._total_count = total_count
def to_dict(self):
"""
Returns the model properties as a dict
"""
result = {}
for attr, _ in iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""
Returns the string representation of the model
"""
return pformat(self.to_dict())
def __repr__(self):
"""
For `print` and `pprint`
"""
return self.to_str()
def __eq__(self, other):
"""
Returns true if both objects are equal
"""
if not isinstance(other, BulkCreateResponse):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""
Returns true if both objects are not equal
"""
return not self == other
| 2.03125 | 2 |
nets/vgg16.py | bubbliiiing/classification-pytorch | 88 | 12798292 | import torch
import torch.nn as nn
from torchvision.models.utils import load_state_dict_from_url
model_urls = {
'vgg16': 'https://download.pytorch.org/models/vgg16-397923af.pth',
}
class VGG(nn.Module):
def __init__(self, features, num_classes=1000, init_weights=True):
super(VGG, self).__init__()
self.features = features
self.avgpool = nn.AdaptiveAvgPool2d((7, 7))
self.classifier = nn.Sequential(
nn.Linear(512 * 7 * 7, 4096),
nn.ReLU(True),
nn.Dropout(),
nn.Linear(4096, 4096),
nn.ReLU(True),
nn.Dropout(),
nn.Linear(4096, num_classes),
)
if init_weights:
self._initialize_weights()
def forward(self, x):
x = self.features(x)
x = self.avgpool(x)
x = torch.flatten(x, 1)
x = self.classifier(x)
return x
def _initialize_weights(self):
for m in self.modules():
if isinstance(m, nn.Conv2d):
nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')
if m.bias is not None:
nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.BatchNorm2d):
nn.init.constant_(m.weight, 1)
nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.Linear):
nn.init.normal_(m.weight, 0, 0.01)
nn.init.constant_(m.bias, 0)
def freeze_backbone(self):
for param in self.features.parameters():
param.requires_grad = False
def Unfreeze_backbone(self):
for param in self.features.parameters():
param.requires_grad = True
def make_layers(cfg, batch_norm=False):
layers = []
in_channels = 3
for v in cfg:
if v == 'M':
layers += [nn.MaxPool2d(kernel_size=2, stride=2)]
else:
conv2d = nn.Conv2d(in_channels, v, kernel_size=3, padding=1)
if batch_norm:
layers += [conv2d, nn.BatchNorm2d(v), nn.ReLU(inplace=True)]
else:
layers += [conv2d, nn.ReLU(inplace=True)]
in_channels = v
return nn.Sequential(*layers)
# 224,224,3 -> 224,224,64 -> 112,112,64 -> 112,112,128 -> 56,56,128 -> 56,56,256 -> 28,28,256 -> 28,28,512
# 14,14,512 -> 14,14,512 -> 7,7,512
cfgs = {
'D': [64, 64, 'M', 128, 128, 'M', 256, 256, 256, 'M', 512, 512, 512, 'M', 512, 512, 512, 'M'],
}
def vgg16(pretrained=False, progress=True, num_classes=1000):
model = VGG(make_layers(cfgs['D']))
if pretrained:
state_dict = load_state_dict_from_url(model_urls['vgg16'], model_dir='./model_data',
progress=progress)
model.load_state_dict(state_dict,strict=False)
if num_classes!=1000:
model.classifier = nn.Sequential(
nn.Linear(512 * 7 * 7, 4096),
nn.ReLU(True),
nn.Dropout(),
nn.Linear(4096, 4096),
nn.ReLU(True),
nn.Dropout(),
nn.Linear(4096, num_classes),
)
return model
| 2.59375 | 3 |
data-detective-airflow/data_detective_airflow/test_utilities/test_helper.py | dmitriy-e/metadata-governance | 5 | 12798293 | <reponame>dmitriy-e/metadata-governance
"""Helper for creating DAG tests
"""
import logging
from typing import Any, Union
import petl
from airflow.models import BaseOperator, TaskInstance
from airflow.utils import timezone
from pandas import DataFrame
from pytest_mock.plugin import MockerFixture
from data_detective_airflow.dag_generator import TDag
from data_detective_airflow.operators.tbaseoperator import TBaseOperator
from data_detective_airflow.test_utilities.assertions import assert_frame_equal
def run_task(task: Union[TBaseOperator, BaseOperator], context: dict = None):
"""Run a task"""
# Removing launch restrictions based on the status of previous operators
task.trigger_rule = 'dummy'
task.render_template_fields(task.generate_context())
task.pre_execute(context)
task.execute(context)
task.post_execute(context)
def mock_task_inputs(task, dataset, mocker):
for i, uptask in enumerate(task.upstream_list):
task.upstream_list[i].result.read = mocker.MagicMock(return_value=dataset[uptask.task_id])
def run_and_read(task: Union[TBaseOperator, BaseOperator], context: dict = None) -> DataFrame:
"""Run the task and return the DataFrame from the BaseResult instance."""
logging.info(f'Running task {task.task_id}')
run_task(task, context)
return task.read_result(context)
def run_and_assert_task(
task: Union[TBaseOperator, BaseOperator],
dataset: dict[str, Any],
mocker: MockerFixture = None,
exclude_cols: list = None,
**kwargs
):
"""Run the task, get the result and compare
:param task: Id of the running task
:param dataset: Dictionary with comparison examples. Output and input datasets are needed.
:param exclude_cols: Columns excluded from comparison
:param mocker: MockerFixture fixture
"""
task_instance = TaskInstance(task=task, execution_date=timezone.utcnow())
context = task_instance.get_template_context()
if mocker and dataset:
mock_task_inputs(task, dataset, mocker)
actual = run_and_read(task=task, context=context)
if actual is not None:
expected = dataset[task.task_id]
if isinstance(expected, DataFrame):
if task.include_columns:
actual = actual[list(task.include_columns)]
expected = expected[list(task.include_columns)]
if isinstance(actual, tuple):
# pylint: disable=no-member
actual = petl.wrap(actual).todataframe()
exclude_cols = exclude_cols or []
e_cols = list(task.exclude_columns) + exclude_cols
actual = actual.drop(e_cols, axis=1, errors='ignore')
expected = expected.drop(e_cols, axis=1, errors='ignore')
assert_frame_equal(actual, expected, **kwargs)
else:
assert actual == expected
def run_and_assert(
dag: TDag, task_id: str, test_datasets: dict, mocker: MockerFixture, exclude_cols: list = None
): # pylint: disable=inconsistent-return-statements
"""Using run_and_assert_task
Run the task and if it is TBaseOperator then get the result and compare it with the example
Also if the task is PgReplacePartitions then the target table will be cleared first,
and then after the launch, compare the contents of the target table with the example
:param dag: TDag
:param task_id: Id of the running task
:param test_datasets: Dictionary with examples
:param exclude_cols: Columns excluded from comparison
:param mocker: MockerFixture fixture
"""
task: TBaseOperator = dag.task_dict[task_id]
run_and_assert_task(task, dataset=test_datasets, mocker=mocker, exclude_cols=exclude_cols)
| 2.03125 | 2 |
fast_tmp/apps/api/__init__.py | Chise1/fast-tmp2 | 1 | 12798294 | from datetime import timedelta
from fastapi import Form
from fastapi.security import OAuth2PasswordRequestForm
from fastapi import Depends, HTTPException
from pydantic import BaseModel
from starlette import status
from starlette.requests import Request
from fast_tmp.apps.api.schemas import LoginR
from fast_tmp.depends import get_current_active_user
from fast_tmp.func import get_site_from_permissionschema, init_permission
from fast_tmp.models import Permission, User
from fast_tmp.amis_router import AmisRouter
from fast_tmp.conf import settings
from fast_tmp.depends import authenticate_user
from fast_tmp.responses import Success, LoginError
from fast_tmp.templates_app import templates
from fast_tmp.utils.token import create_access_token
from fastapi.responses import JSONResponse
ACCESS_TOKEN_EXPIRE_MINUTES = settings.EXPIRES_DELTA
app = AmisRouter(title="fast_tmp", prefix="/auth", tags=["auth"])
INIT_PERMISSION = False
@app.post("/token", response_class=JSONResponse)
async def login(form_data: OAuth2PasswordRequestForm = Depends()):
"""
仅用于docs页面测试返回用
"""
user = await authenticate_user(form_data.username, form_data.password)
if not user:
raise HTTPException(
status_code=status.HTTP_401_UNAUTHORIZED,
detail="Incorrect username or password",
headers={"WWW-Authenticate": "Bearer"},
)
access_token_expires = timedelta(minutes=ACCESS_TOKEN_EXPIRE_MINUTES)
access_token = create_access_token(
data={"sub": user.username, "id": user.pk},
expires_delta=access_token_expires
)
return {"access_token": access_token, "token_type": "bearer"}
@app.post("/get-token")
async def login(form_data: LoginR):
"""
标准的请求接口
"""
user = await authenticate_user(form_data.username, form_data.password)
if not user:
raise LoginError()
access_token_expires = timedelta(minutes=ACCESS_TOKEN_EXPIRE_MINUTES)
access_token = create_access_token(
data={"sub": user.username, "id": user.pk},
expires_delta=access_token_expires
)
return {"access_token": access_token, "token_type": "bearer"}
async def get_pages(user: User):
global INIT_PERMISSION
app = settings.app
# 初始化permission
if not INIT_PERMISSION:
await init_permission(app.site_schema, list(await Permission.all()))
INIT_PERMISSION = True
permissions = await user.perms
site = get_site_from_permissionschema(app.site_schema, permissions, "",
user.is_superuser)
if site:
return [site]
else:
return []
@app.get("/index", summary="主页面")
async def index(request: Request):
return templates.TemplateResponse(
"gh-pages/index.html",
{"request": request, },
)
class L(BaseModel):
username: str
password: <PASSWORD>
@app.post("/index", summary="登录")
async def index(request: Request, u: L):
user = await authenticate_user(u.username, u.password)
if not user:
raise HTTPException(
status_code=status.HTTP_401_UNAUTHORIZED,
detail="Incorrect username or password",
headers={"WWW-Authenticate": "Bearer"},
)
access_token = create_access_token(
data={"sub": user.username},
expires_delta=timedelta(minutes=settings.EXPIRES_DELTA)
)
return Success(data={"access_token": access_token})
@app.get("/site", summary="获取目录")
async def get_site(user: User = Depends(get_current_active_user)):
"""
获取左侧导航栏
:param user:
:return:
"""
global INIT_PERMISSION
app = settings.app
# 初始化permission
if not INIT_PERMISSION:
await init_permission(app.site_schema, list(await Permission.all()))
INIT_PERMISSION = True
permissions = await user.perms
site = get_site_from_permissionschema(app.site_schema, permissions, "",
user.is_superuser)
if site:
return {"pages": [site]}
else:
return {"pages": []}
| 2.15625 | 2 |
algorithms/dqn.py | GambuzX/sokoban-rl | 0 | 12798295 | import gym
import gym_sokoban
from stable_baselines.common.vec_env import DummyVecEnv
from stable_baselines.deepq.policies import MlpPolicy
from stable_baselines import DQN
def run():
# hyperparameters
gamma = 0.99 #discount factor
learning_rate = 0.00025 #learning rate for adam optimizer
buffer_size = 50000 #size of the replay buffer
exploration_fraction=0.1 #fraction of entire training period over which the exploration rate is annealed
exploration_final_eps=0.02 #final value of random action probability
exploration_initial_eps=1.0 #initial value of random action probability
train_freq=1 #update the model every train_freq steps. set to None to disable printing
batch_size=32 #size of a batched sampled from replay buffer for training
double_q=True #whether to enable Double-Q learning or not.
learning_starts=100 #how many steps of the model to collect transitions for before learning starts
timesteps = 1000#2000
verbose = 1
env = gym.make('Boxoban-Train-v1')
model = DQN(MlpPolicy, env,
gamma=gamma,
learning_rate=learning_rate,
buffer_size=buffer_size,
exploration_fraction=exploration_fraction,
exploration_final_eps=exploration_final_eps,
exploration_initial_eps=exploration_initial_eps,
train_freq=train_freq,
batch_size=batch_size,
double_q=double_q,
learning_starts=learning_starts,
verbose=1)
model.learn(total_timesteps=timesteps)
model.save("trained_models/dqn_sokoban_model")
# Enjoy trained agent
obs = env.reset()
print(model.action_probability(obs))
while True:
action, _states = model.predict(obs)
obs, rewards, done, info = env.step(action)
env.render()
if __name__ == "__main__":
run() | 2.390625 | 2 |
Python/hardware/Arms.py | marcostrullato/RoobertV2 | 0 | 12798296 | #!/usr/bin/env python
# Roobert V2 - second version of home robot project
# ________ ______ _____
# ___ __ \______________ /_______________ /_
# __ /_/ / __ \ __ \_ __ \ _ \_ ___/ __/
# _ _, _// /_/ / /_/ / /_/ / __/ / / /_
# /_/ |_| \____/\____//_.___/\___//_/ \__/
#
# Project website: http://roobert.springwald.de
#
# ########
# # Arms #
# ########
#
# Licensed under MIT License (MIT)
#
# Copyright (c) 2018 <NAME> | <EMAIL>
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
from __future__ import division
import time, sys, os
my_file = os.path.abspath(__file__)
my_path ='/'.join(my_file.split('/')[0:-1])
sys.path.insert(0,my_path + "/../DanielsRasPiPythonLibs/multitasking")
sys.path.insert(0,my_path + "/../DanielsRasPiPythonLibs/hardware")
from MultiProcessing import *
from array import array
from SharedInts import SharedInts
from SharedFloats import SharedFloats
from LX16AServos import LX16AServos
from SmartServoManager import SmartServoManager
import atexit
clear = lambda: os.system('cls' if os.name=='nt' else 'clear')
class Arms():
_servoManager = None;
_released = False;
_armHanging = [[1,185],[3,273],[5,501],[6,541],[7,495],[8,499]]
_lookAtHand = [[1,226],[3,680],[5,346],[6,802],[7,830],[8,499]]
_wink1 = [[1,476],[3,770],[5,396],[6,866],[7,542],[8,499]]
_wink2 = [[1,459],[3,639],[5,396],[6,739],[7,601],[8,499]]
_stretchSide = [[1,335],[3,442],[5,542],[6,593],[7,770],[8,499]]
#_rightCenteredValues = [[1,370],[3,685],[5,510],[6,460],[7,495],[8,500]]
def __init__(self, smartServoManager, leftHandOpen=480, leftHandClose=580, rightHandOpen=540, rightHandClose=430):
self._servoManager = smartServoManager
self._leftHandOpen = leftHandOpen
self._leftHandClose = leftHandClose
self._rightHandOpen = rightHandOpen
self._rightHandClose = rightHandClose
self.DefineArms()
#self.SetArm(gesture=Arms._armHanging, left=False);
#self.SetHand(opened=True, left=False);
#self.SetArm(gesture=Arms._armHanging, left=True);
#self.SetHand(opened=True, left=True);
#self.WaitTillTargetsReached();
def DefineArms(self):
# right arm
self._servoManager.AddMasterServo(servoId=1, centeredValue=370);
self._servoManager.AddSlaveServo(servoId=2, masterServoId=1, reverseToMaster=-1, centeredValue=608);
self._servoManager.AddMasterServo(servoId=3, centeredValue=685);
self._servoManager.AddSlaveServo(servoId=4, masterServoId=3, reverseToMaster=-1, centeredValue=352);
self._servoManager.AddMasterServo(servoId=5, centeredValue=510);
self._servoManager.AddMasterServo(servoId=6, centeredValue=460);
self._servoManager.AddMasterServo(servoId=7, centeredValue=495);
self._servoManager.AddMasterServo(servoId=8, centeredValue=500);
# left arm
self._servoManager.AddMasterServo(servoId=11, centeredValue=545);
self._servoManager.AddSlaveServo(servoId=12, masterServoId=11, reverseToMaster=-1, centeredValue=459);
self._servoManager.AddMasterServo(servoId=13, centeredValue=329);
self._servoManager.AddSlaveServo(servoId=14, masterServoId=13, reverseToMaster=-1, centeredValue=700);
self._servoManager.AddMasterServo(servoId=15, centeredValue=477);
self._servoManager.AddMasterServo(servoId=16, centeredValue=486);
self._servoManager.AddMasterServo(servoId=17, centeredValue=501);
self._servoManager.AddMasterServo(servoId=18, centeredValue=503);
def PrintRightArmValues(self):
for id in range(1,8):
self._servoManager.SetIsReadOnly(servoId=id, isReadOnly=True);
self._servoManager.Start()
while(True):
self._servoManager.PrintReadOnlyServoValues()
time.sleep(0.1)
def PrintLeftArmValues(self):
for id in range(11,18):
self._servoManager.SetIsReadOnly(servoId=id, isReadOnly=True);
self._servoManager.Start()
while(True):
self._servoManager.PrintReadOnlyServoValues(onlyMasterServos=False)
time.sleep(0.1)
def MirrorRightArmToLeftStart(self):
for id in range(1,8):
self._servoManager.SetIsReadOnly(servoId=id, isReadOnly=True);
#self._servoManager.Start()
def MirrorRightArmToLeftUpdate(self):
for id in [1,3,5,6,7,8]:
value = self._servoManager.ReadServo(id);
#print (str(id) + ":" +str(value))
value = -(value - self._servoManager.GetCenteredValue(id)) + self._servoManager.GetCenteredValue(id+10)
self._servoManager.MoveServo(id+10, pos=value);
def MirrorRightArmToLeftEnd(self):
for id in range(1,8):
self._servoManager.SetIsReadOnly(servoId=id, isReadOnly=False);
def SetArm(self, gesture, left):
for p in range(0,len(gesture)):
id = gesture[p][0]
value = gesture[p][1]
if (left == True):
id = id + 10;
value = -(value - self._servoManager.GetCenteredValue(id-10)) + self._servoManager.GetCenteredValue(id)
self._servoManager.MoveServo(id,value);
#print ("left:" + str(id));
else:
self._servoManager.MoveServo(id,value);
#print ("right:" + str(id))
def WaitTillTargetsReached(self):
while (self._servoManager.allTargetsReached == False):
time.sleep(0.1);
def SetHand(self, opened, left):
if (left==True):
if (opened==True):
self._servoManager.MoveServo(18,self._leftHandOpen)
else:
self._servoManager.MoveServo(18,self._leftHandClose)
else:
if (opened==True):
self._servoManager.MoveServo(8,self._rightHandOpen);
else:
self._servoManager.MoveServo(8,self._rightHandClose);
def Release(self):
if (self._released == False):
self._released = True;
self.SetArm(gesture=Arms._armHanging, left=False);
self.SetArm(gesture=Arms._armHanging, left=True);
self.SetHand(opened=True, left=False);
self.SetHand(opened=True, left=True);
self.WaitTillTargetsReached();
def __del__(self):
self.Release()
def exit_handler():
tester.Release()
servoManager.Release()
servos.Release()
if __name__ == "__main__":
atexit.register(exit_handler)
ended = False;
servos = LX16AServos()
servoManager = SmartServoManager(lX16AServos=servos, ramp=0, maxSpeed=1)
tester = Arms(servoManager)
#tester.MirrorRightArmToLeft();
#tester.PrintRightArmValues()
tester.PrintLeftArmValues();
servoManager.Start();
#time.sleep(1);
#tester.SetArm(gesture=Arms._rightCenteredValues, left=True);
#tester.WaitTillTargetsReached();
#while(True):
# print()
while(True):
tester.SetArm(gesture=Arms._armHanging, left=False);
tester.SetArm(gesture=Arms._armHanging, left=True);
tester.WaitTillTargetsReached();
tester.SetArm(gesture=Arms._lookAtHand, left=False);
tester.WaitTillTargetsReached();
for i in range(1,4):
tester.SetArm(gesture=Arms._wink2, left=False);
tester.WaitTillTargetsReached();
tester.SetArm(gesture=Arms._wink1, left=False);
tester.WaitTillTargetsReached();
tester.SetArm(gesture=Arms._armHanging, left=True);
tester.WaitTillTargetsReached();
tester.SetArm(gesture=Arms._lookAtHand, left=True);
tester.WaitTillTargetsReached();
for i in range(1,4):
tester.SetArm(gesture=Arms._wink2, left=True);
tester.WaitTillTargetsReached();
tester.SetArm(gesture=Arms._wink1, left=True);
tester.WaitTillTargetsReached();
#plus = 100
#servoManager.Start()
#while(True):
#plus = - plus
##tester._servoManager.MoveServo(1,400+plus)
#tester._servoManager.MoveServo(3,600+plus)
#while (tester._servoManager.allTargetsReached == False):
#time.sleep(0.1)
#tester.SetHand(opened=False, left= True);
#tester.SetHand(opened=False, left= False);
#tester.WaitTillTargetsReached();
#time.sleep(1);
#tester.SetHand(opened=True, left= True);
#tester.SetHand(opened=True, left= False);
#tester.WaitTillTargetsReached();
#time.sleep(1);
##while(True):
## time.sleep(1)
## print("sleep")
#tester.SetArm(gesture=Arms._strechSide, left=True);
#tester.WaitTillTargetsReached();
##tester.SetArm(gesture=Arms._lookHand, left=False);
##tester.WaitTillTargetsReached();
#tester.SetArm(gesture=Arms._strechSide, left=True);
#tester.SetArm(gesture=Arms._strechSide, left=False);
#tester.WaitTillTargetsReached();
#tester.SetArm(gesture=Arms._wink1, left=True);
#tester.WaitTillTargetsReached();
#tester.SetArm(gesture=Arms._wink2, left= True);
#tester.WaitTillTargetsReached();
#tester.SetArm(gesture=Arms._wink1, left=True);
#tester.WaitTillTargetsReached();
#tester.SetArm(gesture=Arms._wink2, left= True);
#tester.WaitTillTargetsReached();
#tester.SetHand(opened=False, left= True);
#tester.SetArm(gesture=Arms._ghettoFist1, left= True);
#tester.WaitTillTargetsReached();
#tester.SetArm(gesture=Arms._ghettoFist2, left= True);
#tester.WaitTillTargetsReached();
print("done");
| 1.773438 | 2 |
PyPoll/main.py | v33na/python-challenge | 0 | 12798297 | <gh_stars>0
# Modules
import os
import csv
#Set the variables
total_votes = 0
total_candidates = 0
candidates_names = []
candidate_votes = []
# Winning Candidate and Winning Count Tracker
percent = []
# Set path for file
poll_path = os.path.join("Resources", "election_data.csv")
output_path = os.path.join("Resources", "Election Analysis")
# Open and read csv
with open(poll_path, newline="") as csvfile:
poll_reader = csv.reader(csvfile, delimiter=",")
# Read the header row first (skip this part if there is no header)
poll_header = next(csvfile)
#To loop through the data to collect the answers
for row in poll_reader:
total_votes = total_votes + 1
#read in the candidate name from column 3 row 2 of csv
candidate_in = (row[2])
if candidate_in in candidates_names:
candidate_index = candidates_names.index(candidate_in)
candidate_votes[candidate_index] = candidate_votes[candidate_index] + 1
else:
#if candidate was not found in candidates_unique list then append to list and add 1 to vote count
candidates_names.append(candidate_in)
candidate_votes.append(1)
#print(f'Total votes {total_votes}')
#print(f'Each candidate: {candidates_names}')
#print(f'Index: {candidates_names.index(candidate_in)}')
#print(f"candidates votes: {candidate_votes}")
#The percentage of votes each candidate won
for x in range(len(candidates_names)):
vote_percent = round(candidate_votes[x]/total_votes *100, 4)
percent.append(vote_percent)
max_votes = max(candidate_votes)
max_index= candidate_votes.index(max_votes)
election_winner = candidates_names[max_index]
#print results to terminal
print("------------------------------------------------------------")
print("Election Results")
print("-------------------------------------------------------------")
print(f"The total number of votes cast : {total_votes}")
print("--------------------------------------------------------------")
for x in range(len(candidates_names)):
print(f"{candidates_names[x]} : {percent[x]}% ({candidate_votes[x]})")
print("------------------------------------------------------------------")
print(f"The Winner is : {election_winner}")
print("--------------------------------------------------------------")
#To export a text file with the results
# Open the file using "write" mode. Specify the variable to hold the contents
with open(output_path, 'w', newline='') as textfile:
# Initialize csv.writer
csvwriter = csv.writer(csvfile, delimiter=',')
#print results to Text file
textfile.write("Election Results\n")
textfile.write("-------------------------------------------------------------\n")
textfile.write(f"The total number of votes cast : {total_votes}\n")
textfile.write("--------------------------------------------------------------\n")
for x in range(len(candidates_names)):
textfile.write(f"{candidates_names[x]} : {percent[x]}% ({candidate_votes[x]})\n")
textfile.write("------------------------------------------------------------------\n")
textfile.write(f"The Winner is : {election_winner}\n")
textfile.write("--------------------------------------------------------------\n")
| 3.546875 | 4 |
receivers/utils.py | cosnomi/smart-mail-slacker | 0 | 12798298 | <gh_stars>0
from datetime import datetime
class MessageData:
keys = [('internal_date', datetime), ('subject', str), ('body', str),
('from', list), ('to', list)]
def __init__(self, params):
for key, expected_type in self.keys:
if not key in params:
raise KeyError(key)
if not isinstance(params[key], expected_type):
raise TypeError('expected {} to be {}, but got {}'.format(
key, expected_type, type(params[key])))
setattr(self, key, params[key])
def get_value(self, key):
if key not in [x[0] for x in self.keys]:
raise KeyError(key)
return getattr(self, key) | 2.796875 | 3 |
code/backend/billing/services.py | rollethu/noe | 16 | 12798299 | <gh_stars>10-100
from typing import List
from collections import defaultdict
import logging
from django.conf import settings
from online_payments.billing.enums import Currency
from online_payments.billing.models import Item, PaymentMethod, Invoice, Customer
from online_payments.billing.szamlazzhu import Szamlazzhu
from payments.prices import PRODUCTS, get_product_items
logger = logging.getLogger(__name__)
def send_seat_invoice(seat):
_send_invoice(seat.appointment.billing_detail, seat.appointment.email, _get_items_for_seats([seat]))
def send_appointment_invoice(appointment):
_send_invoice(appointment.billing_detail, appointment.email, _get_items_for_seats(appointment.seats.all()))
def _get_items_for_seats(seats) -> List[Item]:
grouped_products = defaultdict(int)
for seat in seats:
grouped_products[seat.payment.product_type] += 1
items = []
for product_type, quantity in grouped_products.items():
items.extend(get_product_items(PRODUCTS[product_type], quantity))
return items
def _send_invoice(billing_detail, email, items):
customer = Customer(
name=billing_detail.company_name,
post_code=billing_detail.post_code,
city=billing_detail.city,
address=billing_detail.address_line1,
email=email,
tax_number=billing_detail.tax_number,
)
invoice = Invoice(items=items, payment_method=PaymentMethod.CREDIT_CARD, customer=customer)
szamlazzhu = Szamlazzhu(settings.SZAMLAZZHU_AGENT_KEY, Currency.HUF)
logger.info("Sending invoice to: %s", email)
szamlazzhu.send_invoice(invoice, settings.SZAMLAZZHU_INVOICE_PREFIX)
| 1.976563 | 2 |
lb_colloids/Colloids/Colloid_output.py | jdlarsen-UA/LB-colloids | 1 | 12798300 | """
The Colloid_output module contains classes to read LB Colloids simulation
outputs and perform post processing. Many classes are available to
provide plotting functionality. ModelPlot and CCModelPlot are useful for
visualizing colloid-surface forces and colloid-colloid forces respectively.
example import of the Colloid_output.py module is as follows
>>> from lb_colloids import ColloidOutput
>>> import matplotlib.pyplot as plt
>>>
>>> hdf = "mymodel.hdf5"
>>> mp = ColloidOutput.ModelPlot(hdf)
>>> # model plot accepts matplotlib args and kwargs!!!
>>> mp.plot('edl_x', cmap='viridis')
>>> plt.show()
"""
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
import h5py as H
class Breakthrough(object):
"""
Class to prepare and plot breakthrough curve data from endpoint
files.
Parameters:
----------
:param str filename: <>.endpoint file
Attributes:
----------
:ivar df: (pandas DataFrame): dataframe of endpoint data
:ivar resolution: (float): model resolution
:ivar timestep: (float): model timestep
:ivar continuous: (int): interval of continuous release, 0 means pulse
:ivar ncol: (float): number of colloids per release in simulation
:ivar total_ncol: (int): total number of colloids in simulation
"""
def __init__(self, filename):
if not filename.endswith('.endpoint'):
raise FileTypeError('.endpoint file must be supplied')
reader = ASCIIReader(filename)
self.df = reader.df
self.resolution = reader.resolution
self.timestep = reader.timestep
self.continuous = reader.continuous
# todo: replace this call with something from the header later!
self.ncol = reader.ncol
self.total_ncol = float(self.df.shape[0])
self.__breakthrough_curve = None
self.__reader = reader
@property
def breakthrough_curve(self):
"""
Property method that performs a dynamic
calculation of breakthrough curve data
"""
max_ts = self.df['nts'].max()
if self.__breakthrough_curve is None:
if not self.continuous:
bt_colloids = self.df.loc[self.df['flag'] == 3]
bt_colloids = bt_colloids.sort_values('end-ts')
ncols = []
nts = []
ncol = 0
for index, row in bt_colloids.iterrows():
ncol += 1
ncols.append(float(ncol))
nts.append(row['end-ts'])
ncols.append(float(ncol))
nts.append(max_ts)
df = pd.DataFrame({'nts': nts, 'ncol': ncols}).set_index('ncol')
self.__breakthrough_curve = df
else:
bt_colloids = self.df.loc[self.df['flag'] == 3]
bt_colloids = bt_colloids.sort_values('end-ts')
ncols = []
nts = []
ncol = 0
ncol_per_release = []
for index, row in bt_colloids.iterrows():
lower_ts = row['end-ts'] - self.continuous
upper_ts = row['end-ts']
t = bt_colloids.loc[(bt_colloids['end-ts'] >= lower_ts) & (bt_colloids['end-ts'] <= upper_ts)]
ncol += 1
ncols.append(float(ncol))
ncol_per_release.append(len(t))
nts.append(row['end-ts'])
ncols.append(float(ncol))
nts.append(max_ts)
ncol_per_release.append(len(bt_colloids.loc[(bt_colloids['end-ts'] >= max_ts - self.continuous)
& (bt_colloids['end-ts'] <= max_ts)]))
df = pd.DataFrame({'nts': nts, 'ncol': ncols, 'ncpr': ncol_per_release}).set_index('ncol')
self.__breakthrough_curve = df
return self.__breakthrough_curve
def pore_volume_conversion(self):
"""
Method to retrieve the pore volume calculation
conversion for plotting colloids.
"""
pv_factor = (abs(self.__reader.uy) * self.__reader.velocity_factor) /\
(self.__reader.ylen * self.resolution)
return pv_factor
def plot(self, time=True, *args, **kwargs):
"""
Convience method to plot data into a matplotlib
chart.
Parameters:
----------
:param bool time: if true x-axis is time, false is nts
:param *args: matplotlib args for 1d charts
:param **kwargs: matplotlib keyword arguments for 1d charts
"""
if time:
if self.continuous:
plt.plot(self.breakthrough_curve['nts'] * self.timestep,
self.breakthrough_curve['ncpr'] / float(self.ncol),
*args, **kwargs)
else:
plt.plot(self.breakthrough_curve['nts'] * self.timestep,
self.breakthrough_curve.index.values / float(self.ncol),
*args, **kwargs)
else:
if self.continuous:
plt.plot(self.breakthrough_curve['nts'] * self.timestep,
self.breakthrough_curve['ncpr'] / float(self.ncol),
*args, **kwargs)
else:
plt.plot(self.breakthrough_curve['nts'],
self.breakthrough_curve.index.values / float(self.ncol),
*args, **kwargs)
plt.ylim([0, 1])
def plot_pv(self, *args, **kwargs):
"""
Method to plot breakthrough data with pore
volumes (non-dimensional time)
Parameters:
----------
:param *args: matplotlib args for 1d plotting
:param **kwargs: matplotlib kwargs for 1d plotting
"""
pv_factor = self.pore_volume_conversion()
if self.continuous:
plt.plot(self.breakthrough_curve['nts'] * pv_factor * self.timestep,
self.breakthrough_curve['ncpr'] / float(self.ncol),
*args, **kwargs)
else:
plt.plot(self.breakthrough_curve['nts'] * pv_factor * self.timestep,
self.breakthrough_curve.index.values / float(self.ncol),
*args, **kwargs)
plt.ylim([0, 1])
plt.xlim([0, max(self.breakthrough_curve['nts'] * pv_factor * self.timestep)])
class DistributionFunction(object):
"""
Class to plot a probablity distribution function of colloid breakthrough
from endpoint files.
Parameters:
----------
:param str filename: <>.endpoint file name
:param int nbin: number of bins for pdf calculation
Attributes:
----------
:ivar df: (pandas DataFrame): dataframe of endpoint data
:ivar resolution: (float): model resolution
:ivar timestep: (float): model timestep
:ivar continuous: (int): interval of continuous release, 0 means pulse
:ivar ncol: (float): number of colloids per release in simulation
:ivar total_ncol: (int): total number of colloids in simulation
:ivar pdf: (np.recarray) colloid probability distribution function
"""
def __init__(self, filename, nbin=1000):
if not filename.endswith('.endpoint'):
raise FileTypeError('.endpoint file must be supplied')
reader = ASCIIReader(filename)
self.df = reader.df
self.resolution = reader.resolution
self.timestep = reader.timestep
self.continuous = reader.continuous
self.ncol = float(reader.ncol)
self.total_ncol = float(self.df.shape[0])
self.bin = nbin
self.pdf = None
self.reset_pdf(nbin)
self.__normalize = False
self.__reader = reader
def reset_pdf(self, nbin, normalize=False):
"""
Method to generate a probability distribution function
based upon user supplied bin size.
Parameters:
----------
:param int nbin: number of time steps to base bin on
:param bool normalize: method to calculate pdf by residence time or end time
"""
self.bin = nbin
self.__normalize = normalize
ts = []
ncols = []
lower_nts = 0
max_ts = self.df['nts'].max()
pdf_colloids = self.df.loc[self.df['flag'] == 3]
pdf_colloids = pdf_colloids.sort_values('delta-ts')
for upper_nts in range(0, int(max_ts) + 1, nbin):
ncol = 0
for index, row in pdf_colloids.iterrows():
if normalize:
if lower_nts < row['delta-ts'] <= upper_nts:
ncol += 1
else:
if lower_nts < row['end-ts'] <= upper_nts:
ncol += 1
ts.append(upper_nts)
ncols.append(ncol)
lower_nts = upper_nts
arr = np.recarray((len(ts),), dtype=[('nts', np.float),
('ncol', np.float)])
for idx, value in enumerate(ts):
arr[idx] = tuple([value, ncols[idx]])
self.pdf = arr
def pore_volume_conversion(self):
"""
Method to retrieve the pore volume calculation
conversion for plotting colloids.
"""
pv_factor = (abs(self.__reader.uy) * self.__reader.velocity_factor) /\
(self.__reader.ylen * self.resolution)
return pv_factor
def plot(self, time=True, *args, **kwargs):
"""
Method to plot data into a matplotlib chart.
Parameters:
----------
:param bool time: if true x-axis is time, false is nts
:param *args: matplotlib args for 1d charts
:param **kwargs: matplotlib keyword arguments for 1d charts
"""
if time:
if self.__normalize:
plt.plot(self.pdf['nts'] * self.timestep,
self.pdf['ncol'] / self.total_ncol,
*args, **kwargs)
else:
plt.plot(self.pdf['nts'] * self.timestep,
self.pdf['ncol'] / self.ncol,
*args, **kwargs)
else:
if self.__normalize:
plt.plot(self.pdf['nts'],
self.pdf['ncol'] / self.total_ncol,
*args, **kwargs)
else:
plt.plot(self.pdf['nts'],
self.pdf['ncol'] / self.ncol,
*args, **kwargs)
plt.ylim([0, 1])
def plot_pv(self, *args, **kwargs):
"""
Method to plot pdf data with pore volumes (non-dimensional time)
Parameters:
----------
:param *args: matplotlib args for 1d plotting
:param **kwargs: matplotlib kwargs for 1d plotting
"""
pv_factor = self.pore_volume_conversion()
plt.plot(self.pdf['nts'] * pv_factor * self.timestep,
self.pdf['ncol'] / self.ncol,
*args, **kwargs)
class ADE(object):
"""
Class to calculate macroscopic advection dispersion
equation parameters for field scale model parameterization
Class needs to be re-named and updated to CDE equation
Parameters:
----------
:param str filename: ascii output file name from colloid model
:param int nbin: number of timesteps to bin a pdf for calculation
"""
def __init__(self, filename, nbin=1000):
if not filename.endswith('.endpoint'):
raise FileTypeError('<>.endpoint file must be supplied')
reader = ASCIIReader(filename)
self.timestep = reader.timestep
self.resolution = reader.resolution
self.ylen = reader.ylen
self.ncol = reader.ncol
self.total_ncol = float(reader.df.shape[0])
self.uy = reader.uy
self.pdf = None
self.__dist_func = DistributionFunction(filename, nbin)
self.bt = Breakthrough(filename).breakthrough_curve
self.reset_pdf(nbin)
def __reset(self):
self.pdf = self.__dist_func.pdf
def reset_pdf(self, nbin, normalize=False):
"""
User method to reset values based on changing
the pdf bin values
Parameters:
----------
:param int nbin: number of timesteps to bin a pdf for calculation
:param bool normalize: flag to calculate pdf by residence time or end time
"""
self.__dist_func.reset_pdf(nbin, normalize)
self.pdf = self.__dist_func.pdf
def solve_jury_1991(self, D=0.01, R=0.01, ftol=1e-10,
max_nfev=1000, **kwargs):
"""
Scipy optimize method to solve least sqares
for jury 1991. Pulse flux.
Parameters:
----------
:param float D: Diffusivity initial guess. Cannot be 0
:param float R: Retardation initial guess. Cannot be 0
:param float ftol: scipy function tolerance for solution
:param int max_nfev: maximum number of function iterations
:param **kwargs: scipy least squares kwargs
Returns:
-------
:return: scipy least squares dictionary. Answer in dict['x']
"""
# todo: test this method! look up references for clearer examples!
from scipy.optimize import leastsq, minimize, least_squares
a = self.ncol
l = self.ylen * self.resolution
v = self.uy
pdf, t = self.__prep_data()
x0 = np.array([D, R])
return least_squares(self.__jury_residuals, x0,
args=(a, l, t, v, pdf),
ftol=ftol, max_nfev=max_nfev,
**kwargs)
def __jury_residuals(self, vars, A, L, t, v, pdf):
"""
Method to estimate residuals from jury 1991 equation
using data
Parameters
vars: (np.array) [dispersivity, retardation]
A: ncol
l: (float) ylen
v: (float) mean fluid_velocity
t: (float) time
pdf: pd.dataframe c/co of colloid pdf
"""
return pdf - self.__jury_1991(vars, A, L, t, v)
def __jury_1991(self, vars, A, L, t, v):
"""
Equation for Jury 1991 calculation of Dispersivity
and Retardation
Parameters
vars: (np.array) [dispersivity, retardation]
A: ncol
l: (float) ylen
v: (float) mean fluid_velocity
t: (float) time
"""
D = vars[0]
R = vars[1]
eq0 = (A * L * np.sqrt(R))
eq1 = 2 * np.sqrt(np.pi * D * t ** 3)
eq2 = -(R * L - v * t) ** 2
eq3 = 4 * R * D * t
x = (eq0 / eq1) * np.exp(eq2 / eq3)
x[0] = 0
return x
def solve_van_genuchten_1986(self, D=0.01, R=0.01, ftol=1e-10,
max_nfev=1000, **kwargs):
"""
Scipy optimize method to solve least squares
for van genuchten 1986. Miscable displacement.
Parameters:
----------
:param float D: Diffusivity initial guess. Cannot be 0
:param float R: Retardation initial guess. Cannot be 0
:param float ftol: scipy function tolerance for solution
:param int max_nfev: maximum number of function iterations
:param **kwargs: scipy least squares kwargs
Returns:
-------
:return: scipy least squares dictionary. Answer in dict['x']
"""
from scipy.optimize import least_squares
l = self.ylen * self.resolution
v = self.uy
t = self.bt['nts'].as_matrix() * self.timestep
bt = self.bt['ncpr'].as_matrix() / self.ncol
x0 = np.array([D, R])
return least_squares(self.__van_genuchten_residuals, x0,
args=(l, v, t, bt),
ftol=ftol, max_nfev=max_nfev,
**kwargs)
def __van_genuchten_residuals(self, vars, l, v, t, bt):
"""
Method to estimate residuals from vanGenuchten and Winerega
1986
Parameters:
vars: (np.array) [dispersivity, retardation]
x: (float) column length
v: (float) mean fluid velocity
t: (float) time
bt: (np.array) breakthrough curve
"""
return bt - self.__van_genuchten_1986(vars, l, v, t)
def __van_genuchten_1986(self, vars, l, v, t):
"""
Equation for <NAME> and Winerega 1986 to calculate
Dispersivity and Retardation from breakthrough data.
Parameters:
vars: (np.array) [dispersivity, retardation]
x: (float) column length
v: (float) mean fluid velocity
t: (float) time
"""
from scipy import special
D = vars[0]
R = vars[1]
eq0 = R * l - v * t
eq1 = np.sqrt(4 * D * R * t)
x = 0.5 * special.erfc(eq0/eq1)
if np.isnan(x[0]):
x[0] = 0
return x
def __prep_data(self):
"""
Prepares breakthrough data by stripping off trailing
zeros.
Returns:
pdf = (np.array) stripped pdf
t = (np.array) times
"""
strip_idx = None
seq = False
bt = False
for idx, rec in enumerate(self.pdf):
if not bt:
if rec['ncol'] != 0:
bt = True
else:
pass
else:
if rec['ncol'] == 0:
if not seq:
strip_idx = idx
seq = True
else:
pass
else:
seq = False
strip_idx = None
if strip_idx is not None:
pdf = self.pdf['ncol'][:strip_idx + 1]
time = self.pdf['nts'][:strip_idx + 1]
else:
pdf = self.pdf['ncol']
time = self.pdf['nts']
return pdf, time
class ModelPlot(object):
"""
Class to retrieve Colloid force arrays
and plot for data analysis.
Parameters:
----------
:param str hdf5: hdf5 file name
"""
def __init__(self, hdf5):
if not hdf5.endswith('hdf') and\
not hdf5.endswith('hdf5'):
raise FileTypeError('hdf or hdf5 file must be supplied')
self.__hdf = Hdf5Reader(hdf5)
@property
def keys(self):
return self.__hdf.keys
def get_data(self, key):
"""
Get data method to view and analyze colloid
force arrays
Parameters:
----------
:param str key: valid dictionary key from self.keys
Returns:
-------
:return: data <varies>
"""
return self.__hdf.get_data(key)
def get_data_by_path(self, path):
"""
Method to retrieve hdf5 data by specific path
Parameters:
----------
:param str path: hdf5 directory path to data
Returns:
-------
:return: data <varies>
"""
return self.__hdf.get_data_by_path(path)
def plot(self, key, ax=None, masked=False, *args, **kwargs):
"""
Hdf array plotting using Hdf5Reader keys
Parameters:
----------
:param str key: valid dictionary key from self.keys
:param object ax: matplotlib pyplot axes object (optional)
:param *args: matplotlib plotting args
:param **kwargs: matplotlib plotting kwargs
"""
# todo: create a function_fmt for axis options
mesh = None
if ax is None:
ax = plt.gca()
if key in ('lvdw_x', 'lvdw_y',
'lewis_x', 'lewis_y',
'edl_x', 'edl_y',
'dlvo_x', 'dlvo_y',
'attractive_x', 'attractive_y'):
x_axis = self.__hdf.get_data('distance_array')
arr = self.__hdf.get_data(key)
ax.plot(x_axis, arr, *args, **kwargs)
elif key in ('conversion_factor',
'gravity',
'bouyancy'):
raise KeyError('{}: key not valid for plotting'.format(key))
elif key in ('dlvo_fine', 'edl_fine',
'attractive_fine'):
x_axis = self.__hdf.get_data('distance_fine')
arr = self.__hdf.get_data(key)
ax.plot(x_axis, arr, *args, **kwargs)
elif key == "image":
arr = self.__hdf.get_data(key)
if masked:
arr = np.ma.masked_where(arr == 0, a=arr)
ax.imshow(arr, *args, **kwargs)
else:
arr = self.__hdf.get_data(key)
if masked:
img = self.__hdf.get_data("image")
arr = np.ma.masked_where(img == 1, a=arr)
mesh = ax.imshow(arr, *args, **kwargs)
if mesh is not None:
return mesh
else:
return ax
def plot_velocity_magnitude(self, nbin=10, dimensional=True,
masked=False, *args, **kwargs):
"""
Method to create a quiver plot to display the
magnitude and direction of velocity vectors within
the system.
Parameters:
----------
:param int nbin: refinement for quiver plotting
:param *args: matplotlib plotting args
:param **kwargs: matplotlib plotting kwargs
"""
if dimensional:
x = self.__hdf.get_data('velocity_x')
y = self.__hdf.get_data('velocity_y')
else:
x = self.__hdf.get_data('lb_velocity_x')
y = self.__hdf.get_data('lb_velocity_y')
xx = np.arange(0, x.shape[1])
yy = np.arange(0, x.shape[0])
xx, yy = np.meshgrid(xx, yy)
if masked:
img = self.__hdf.get_data('image')
xx = np.ma.masked_where(img == 1, a=xx)
yy = np.ma.masked_where(img == 1, a=yy)
x = np.ma.masked_where(img == 1, a=x)
y = np.ma.masked_where(img == 1, a=y)
Q = plt.quiver(xx[::nbin, ::nbin], yy[::nbin, ::nbin],
x[::nbin, ::nbin], y[::nbin, ::nbin],
units='width', *args, **kwargs)
qk = plt.quiverkey(Q, 0.9, 0.9, 0.01, r'$1 \frac{cm}{s}$',
coordinates='figure')
plt.xlim(0, x.shape[1])
plt.ylim(x.shape[0], 0)
class CCModelPlot(object):
"""
Class to query colloid-colloid interactions
and plot data as 1d or as a meshgrid object
More sophisticated than standard ModelPlot
Parameters:
----------
:param str hdf5: hdf5 file name
"""
data_paths = {'col_col_x': 'colloidcolloid/x',
'col_col_y': 'colloidcolloid/y',
'col_col': None,
'distance_x': 'colloid_colloid/distance/x',
'distance_y': 'colloid_colloid/distance/y',
'distance_fine_x': 'colloid_colloid/fine/distance/x',
'distance_fine_y': 'colloid_colloid/fine/distance/y',
'col_col_fine_x': 'colloid_colloid/fine/x',
'col_col_fine_y': 'colloid_colloid/fine/y',
'col_col_fine': None}
def __init__(self, hdf5):
if not hdf5.endswith('hdf') and\
not hdf5.endswith('hdf5'):
raise FileTypeError('hdf or hdf5 file must be supplied')
self.__hdf5 = Hdf5Reader(hdf5)
@property
def keys(self):
"""
Property method to return valid keys to obtain data
"""
return CCModelPlot.keys
def get_data(self, key):
"""
Method to return data by key
Parameters:
----------
:param str key: valid model key
"""
return self.__hdf5.get_data(key)
def get_data_by_path(self, path):
"""
Method to return data by hdf5 path
Parameters:
----------
:param str path: valid HDF5 data path
"""
return self.__hdf5.get_data_by_path(path)
def plot(self, key, *args, **kwargs):
"""
Plotting method for 1d colloid-colloid dlvo profiles
Parameters:
----------
:param str key: valid data key
:param *args: matplotlib plotting args
:param **kwargs: matplotlib plotting kwargs
"""
if key not in ('col_col_x', 'col_col_y',
'col_col_fine_x', 'col_col_fine_y'):
raise KeyError("{} is not a valid key".format(key))
colcol = self.__hdf5.get_data(key)
shape = colcol.shape
center = shape[0] // 2
if key == "<KEY>":
x = self.__hdf5.get_data('distance_x')
x = x[center, center:]
y = colcol[center, center:]
elif key == "col_col_y":
x = self.__hdf5.get_data('distance_y')
x = x.T[center, center:]
y = colcol.T[center, center:]
elif key == "col_col_fine_x":
x = self.__hdf5.get_data('distance_fine_x')
x = x[center, center:] # * 1e-6
y = colcol[center, center:]
else:
x = self.__hdf5.get_data('distance_fine_y')
x = x[center, center:] # * 1e-6
y = colcol[center, center:]
plt.plot(x, y * -1, *args, **kwargs)
def plot_mesh(self, key, ax=None, *args, **kwargs):
"""
Plotting method for 2d representation of colloid-colloid
dlvo profiles.
Parameters:
----------
:param str key: valid data key
:param object ax: matplotlib axes object (optional)
:param *args: matplotlib plotting args
:param **kwargs: matplotlib plotting kwargs
"""
from matplotlib.colors import LogNorm
if ax is None:
ax = plt.gca()
if key not in ('col_col', 'col_col_fine',
'col_col_x', 'col_col_y',
'col_col_fine_x', 'col_col_fine_y'):
raise KeyError("{} is not a valid key".format(key))
if key == 'col_col':
ccx = np.abs(self.__hdf5.get_data('col_col_x'))
ccy = np.abs(self.__hdf5.get_data('col_col_y'))
mesh = ccx + ccy
elif key == 'col_col_fine':
ccx = np.abs(self.__hdf5.get_data('col_col_fine_x'))
ccy = np.abs(self.__hdf5.get_data('col_col_fine_y'))
mesh = ccx + ccy
else:
mesh = self.__hdf5.get_data(key)
# find center and set to nearby value to prevent log scale crashing
shape = mesh.shape
center = shape[0] // 2
mesh[center, center] = mesh[center, center + 1]
xx, yy = np.meshgrid(np.arange(0, mesh.shape[0]+1),
np.arange(0, mesh.shape[1] + 1))
if mesh.max()/mesh.min() > 10:
vmin = mesh.min()
vmax = mesh.max()
if 'vmin' in kwargs:
vmin = kwargs.pop('vmin')
if 'vmax' in kwargs:
vamx = kwargs.pop('vmax')
p = ax.pcolormesh(xx, yy, mesh,
norm=LogNorm(vmin=mesh.min(),
vmax=mesh.max()),
*args, **kwargs)
else:
p = ax.pcolormesh(xx, yy, mesh,
*args, **kwargs)
ax.set_ylim([0, mesh.shape[0]])
ax.set_xlim([0, mesh.shape[1]])
center = mesh.shape[0] / 2.
ax.plot([center], [center], 'ko')
return p
class ColloidVelocity(object):
"""
Method to return colloid velocity and statistics
relating to colloid velocity for a simulation. Class
needs to be rebuilt to work with timeseries and pathline
files for a more precise velocity measurement
Parameters:
----------
:param str filename: endpoint file name
"""
def __init__(self, filename):
if not filename.endswith(".endpoint"):
raise FileTypeError('.endpoint file must be supplied')
reader = ASCIIReader(filename)
self.timestep = reader.timestep
self.resolution = reader.resolution
self.xlen = reader.xlen
self.ylen = reader.ylen
self.df = reader.df
self.ncol = reader.df.shape[0]
self.max_time = max(reader.df['nts']) * self.timestep
self.velocity = None
self.__get_velocity_array()
def __get_velocity_array(self):
"""
Built in method to calculate the mean velocity of
each colloid in the simulation
"""
colloid = []
velocity = []
for index, row in self.df.iterrows():
if np.isnan(row['y-position']):
velocity.append((self.ylen * self.resolution) /
(row['delta-ts'] * self.timestep))
else:
velocity.append((row['y-position'] * self.resolution) /
(row['nts'] * self.timestep))
colloid.append(index)
arr = np.recarray(len(colloid,), dtype=[('colloid', np.int),
('velocity', np.float)])
for idx, value in enumerate(colloid):
arr[idx] = tuple([value, velocity[idx]])
self.velocity = arr
@property
def max(self):
"""
:return: maximum colloid velocity
"""
return self.velocity['velocity'].max()
@property
def min(self):
"""
:return: minimum colloid velocity
"""
return self.velocity['velocity'].min()
@property
def mean(self):
"""
:return: mean colloid velocity
"""
return self.velocity['velocity'].mean()
@property
def var(self):
"""
:return: variance of colloid velocities
"""
return np.var(self.velocity['velocity'])
@property
def stdev(self):
"""
:return: standard deviation of colloid velocities
"""
return np.std(self.velocity['velocity'])
@property
def cv(self):
"""
:return: coeficient of variance of colloid velocities
"""
return (self.stdev / self.mean) * 100
def plot(self, *args, **kwargs):
"""
Method to plot distribution of velocities by
colloid for array of velocity.
Parameters
----------
:param *args: matplotlib plotting args
:param **kwargs: matplotlib plotting kwargs
"""
plt.scatter(self.velocity['colloid'],
self.velocity['velocity'],
*args, **kwargs)
def plot_histogram(self, nbin=10, width=0.01,
*args, **kwargs):
"""
User method to plot a histogram of velocities using
a bar chart.
Parameters:
----------
:param int nbin: number of specific bins for plotting
:param float width: matplotlib bar width.
:param *args: matplotlib plotting args
:param **kwargs: matplotlib plotting kwargs
"""
adjuster = 0.00001
bins = np.linspace(self.min - adjuster, self.max, nbin)
ncols = []
velocity = []
lower_v = self.min - adjuster
upper_v = 0
for upper_v in bins:
ncol = 0
for v in self.velocity['velocity']:
if lower_v < v <= upper_v:
ncol += 1
velocity.append((lower_v + upper_v)/2.)
ncols.append(ncol)
lower_v = upper_v - adjuster
velocity.append(upper_v + adjuster)
ncols.append(0)
plt.bar(velocity, ncols, width, *args, **kwargs)
# todo: think about this one. Does it belong here? Finish class. Integrate into LB
class LBOutput(object):
"""
Class to anaylze LB fluid/solid properties
Parameters:
----------
:param str hdf: hdf5 output filename
"""
data_paths = {'velocity_x': None,
'velocity_y': None,
'lb_velocity_x': None,
'lb_velocity_y': None,
'resolution': None,
'porosity': None,
'pore_diameter': None,
'conversion_factor': None,
'reynolds_number': None}
def __init__(self, hdf5):
if not hdf5.endswith('.hdf') and not\
hdf5.endswith('.hdf5'):
raise FileTypeError('hdf or hdf5 file must be supplied')
self.__hdf5 = Hdf5Reader(hdf5)
@property
def keys(self):
"""
:return: Lattice boltzmann data keys
"""
return LBOutput.data_paths.keys()
def get_data(self, key):
"""
Method to select data from hdf5 file based on key, instead
of data path
Parameters:
----------
:param str key: lattice boltzmann data key
Returns:
-------
:return: data
"""
if key in ("velocity_x", "velocity_y"):
factor = self.__hdf5.get_data("conversion_factor")
key = "lb_{}".format(key)
data = self.__hdf5.get_data(key) * factor
else:
data = self.__hdf5.get_data(key)
return data
class ASCIIReader(object):
"""
Class to read in text based output files <endpoint, timestep, pathline>
to a pandas dataframe
Parameters:
----------
:param str filename: output filename (ie. endpoint, timestep, or pathline)
"""
dtypes = {'colloid': np.int,
'flag': np.int,
'nts': np.int,
'x-position': np.float,
'y-position': np.float,
'x-model': np.float,
'y-model': np.float,
'start-ts': np.int,
'end-ts': np.int,
'delta-ts': np.int,
'continuous': np.int}
def __init__(self, filename):
self.timestep = 0
self.ncol = 0
self.resolution = 0
self.xlen = 0
self.ylen = 0
self.ux = 0
self.uy = 0
self.velocity_factor = 1.
self.continuous = 0
self.__data_startline = 0
self.__header = []
if filename.split('.')[-1] not in ('endpoint', 'timeseries', 'pathline'):
raise FileTypeError("{}: not in supported filetypes".format(filename))
else:
self.read_header(filename)
self.df = self.read_ascii(filename)
def read_header(self, filename):
"""
Method to read the header from ascii output files for LB-Colloids
Parameters:
----------
:param str filename: colloid model output filename (ie. endpoint, timestep, or pathline)
"""
with open(filename) as f:
for idx, line in enumerate(f):
if line.startswith("Timestep"):
t = line.split()
self.timestep = float(t[-1].rstrip())
elif line.startswith("Ncols"):
t = line.split()
self.ncol = int(t[-1].rstrip())
elif line.startswith('Resolution'):
t = line.split()
self.resolution = float(t[-1].rstrip())
elif line.startswith('xlen'):
t = line.split()
self.xlen = float(t[-1].rstrip())
elif line.startswith('ylen'):
t = line.split()
self.ylen = float(t[-1].rstrip())
elif line.startswith('ux'):
t = line.split()
self.ux = float(t[-1].rstrip())
elif line.startswith('uy'):
t = line.split()
self.uy = float(t[-1].rstrip())
elif line.startswith('velocity_factor'):
t = line.split()
self.velocity_factor = float(t[-1].rstrip())
elif line.startswith('Continuous'):
t = line.split()
self.continuous = int(t[-1].rstrip())
elif line.startswith("#"*10):
self.__data_startline = idx + 1
break
else:
pass
def read_ascii(self, filename):
"""
Method to read endpoint file data from from ascii files for LB-Colloids
Sets data to pandas dataframe
Parameters:
----------
:param str filename: colloid model output filename (ie. endpoint, timestep, or pathline)
"""
with open(filename) as f:
t = []
for idx, line in enumerate(f):
if idx < self.__data_startline:
pass
elif idx == self.__data_startline:
self.__header = [i.rstrip() for i in line.split()
if i not in ('\t', '', ' ', '\n')]
else:
t.append([self.__try_float(i.rstrip()) for i
in line.split() if i not in ('\t', '', ' ', '\n')])
temp = np.array(t).T
temp = {self.__header[idx]: data for idx, data in enumerate(temp)}
df = pd.DataFrame(temp)
df = df.reindex_axis(self.__header, axis=1)
df = df.set_index('colloid')
return df
@staticmethod
def __try_float(val):
try:
return float(val)
except ValueError:
return float('nan')
class Hdf5Reader(object):
"""
Reader object to read in HDF5 stored outputs
from colloid models. Contains a data_paths dictionary
which allows the user to use keys to access data
Parameters:
----------
:param str hdf5: LB-Colloid hdf5 file name
"""
data_paths = {'ac': "colloids/model_dict/ac",
'image': 'Binary_image',
'lb_velocity_x': 'results/uarray',
'lb_velocity_y': 'results/uarray',
'lb_mean_velocity_x': 'results/mean_ux',
'lb_mean_velocity_y': 'results/mean_uy',
'conversion_factor': 'results/velocity_factor',
'pore_diameter': 'results/pore_diameter',
'porosity': 'results/porosity',
'reynolds_number': 'results/reynolds_number',
'brownian_x': 'colloids/brownian/x',
'brownian_y': 'colloids/brownian/y',
'lvdw_x': 'colloids/lvdw/x',
'lvdw_y': 'colloids/lvdw/y',
'edl_x': 'colloids/edl/x',
'edl_y': 'colloids/edl/y',
'attractive_x': 'colloids/attractive/x',
'attractive_y': 'colloids/attractive/y',
'lewis_x': 'colloids/lewis_acid_base/x',
'lewis_y': 'colloids/lewis_acid_base/y',
'velocity_x': 'colloids/ux',
'velocity_y': 'colloids/uy',
'gravity': 'colloids/gravity',
'bouyancy': 'colloids/bouyancy',
'ionic': 'colloids/chemical_dict/I',
'distance_array': 'colloids/distance_arr',
'dlvo_x': None,
'dlvo_y': None,
'col_col_x': 'colloid_colloid/x',
'col_col_y': 'colloid_colloid/y',
'col_col': None,
'distance_x': 'colloid_colloid/distance/x',
'distance_y': 'colloid_colloid/distance/y',
'distance_fine_x': 'colloid_colloid/fine/distance/x',
'distance_fine_y': 'colloid_colloid/fine/distance/y',
'col_col_fine_x': 'colloid_colloid/fine/x',
'col_col_fine_y': 'colloid_colloid/fine/y',
'col_col_fine': None,
'edl_fine': 'colloids/edl_fine',
'attractive_fine': 'colloids/attractive_fine',
'dlvo_fine': None,
'distance_fine': 'colloids/distance_fine'}
def __init__(self, hdf5):
if not hdf5.endswith('hdf') and\
not hdf5.endswith('hdf5'):
raise FileTypeError('hdf or hdf5 file must be supplied')
self.file_name = hdf5
@property
def keys(self):
"""
:return: list of valid hdf5 data keys
"""
return [i for i in Hdf5Reader.data_paths]
def get_data(self, key):
"""
Method to retrieve hdf5 data by dict. key
Parameters:
----------
:param str key: valid dictionary key from self.keys
Returns:
-------
:return: data <varies>
"""
if key not in Hdf5Reader.data_paths:
raise KeyError('Dictionary key not in valid keys. Use get_data_by_path')
hdf = H.File(self.file_name, 'r')
if key == 'lb_velocity_x':
data = hdf[Hdf5Reader.data_paths[key]][()][1]
elif key == 'lb_velocity_y':
data = hdf[Hdf5Reader.data_paths[key]][()][0]
elif key == 'dlvo_x':
data = hdf[Hdf5Reader.data_paths['edl_x']][()] +\
hdf[Hdf5Reader.data_paths['attractive_x']][()]
# hdf[Hdf5Reader.data_paths['lewis_x']][()] +\
# hdf[Hdf5Reader.data_paths['lvdw_x']][()]
data = data[0]
elif key == 'dlvo_y':
data = hdf[Hdf5Reader.data_paths['edl_y']][()] +\
hdf[Hdf5Reader.data_paths['attractive_y']][()]
# hdf[Hdf5Reader.data_paths['lewis_y']][()] +\
# hdf[Hdf5Reader.data_paths['lvdw_y']][()]
data = data[0]
elif key == 'dlvo_fine':
data = hdf[Hdf5Reader.data_paths['edl_fine']][()] + \
hdf[Hdf5Reader.data_paths['attractive_fine']][()]
data = data[0]
elif key in ('lvdw_x', 'lvdw_y',
'lewis_x', 'lewis_y',
'edl_x', 'edl_y',
'dlvo_x', 'dlvo_y',
'attractive_x',
'attractive_y',
'distance_array',
'edl_fine',
'attractive_fine',
'distance_fine'):
data = hdf[Hdf5Reader.data_paths[key]][()][0]
else:
data = hdf[Hdf5Reader.data_paths[key]][()]
hdf.close()
return data
def get_data_by_path(self, path):
"""
Method to retrieve hdf5 data by specific hdf5 path
Parameters:
----------
:param str path: hdf5 directory path to data
Returns:
------
:return: data <varies>
"""
hdf = H.File(self.file_name, 'r')
data = hdf[path][()]
hdf.close()
return data
class FileTypeError(Exception):
pass
| 3.375 | 3 |
tests/parse_fragment_unit_tests.py | Wynjones1/pycompile | 0 | 12798301 | import unittest
import sys
import os
sys.path.append(os.path.join(os.path.dirname(__file__), ".."))
from src import parser
from src import ast
class ParserTestCase(unittest.TestCase):
def assertParsesTo(self, func, data, type_):
self.assertIsInstance(parser.parse(data, func), type_)
class Test_parse_fragment_unit_tests(ParserTestCase):
def test_while_0(self):
self.assertParsesTo(parser.while_, "while(x < 10){x := x + 1;}", ast.While)
def test_if_0(self):
self.assertParsesTo(parser.if_, "if(a){return a;}", ast.If)
def test_decl_0(self):
self.assertParsesTo(parser.decl, "decl(int) x := 10", ast.Declare)
def test_braced_stmt_list_0(self):
self.assertParsesTo(parser.braced_stmt_list, "{}", list)
def test_function_0(self):
self.assertParsesTo(parser.function, "function a(){}", ast.Function)
def test_function_1(self):
self.assertParsesTo(parser.function, "function a() -> int {}", ast.Function)
def test_function_2(self):
self.assertParsesTo(parser.function, "function a(int x) -> int {}", ast.Function)
def test_function_3(self):
self.assertParsesTo(parser.function, "function a(int x, int y) -> int {}", ast.Function)
def test_func_call_0(self):
self.assertParsesTo(parser.func_call, "call()", ast.FuncCall)
def test_func_call_1(self):
self.assertParsesTo(parser.func_call, "call(other())", ast.FuncCall)
def test_func_call_2(self):
self.assertParsesTo(parser.func_call, "call(1,2,3)", ast.FuncCall)
def test_expression_0(self):
self.assertParsesTo(parser.expression, "1 + 2", ast.Binop)
def test_expression_1(self):
self.assertParsesTo(parser.expression, "1 + 2 - 3 * 4 / 5 % 6", ast.Binop)
def test_if_0(self):
self.assertParsesTo(parser.if_, "if(x){}else{}", ast.If)
def test_if_1(self):
self.assertParsesTo(parser.if_, "if(x){}elif(x){}else{}", ast.If)
if __name__ == '__main__':
unittest.main()
| 3.015625 | 3 |
scripts/leviosam_utils.py | maxrossi91/levioSAM | 23 | 12798302 | <filename>scripts/leviosam_utils.py
'''
Utils for levioSAM
<NAME>
Johns Hopkins University
2021
'''
import pysam
'''
Read a FASTA file as a dict if a file name is given. If not, return an empty dict.
'''
def read_fasta(ref_fn: str) -> dict:
ref = {}
if ref_fn != '':
f = pysam.FastaFile(ref_fn)
for r in f.references:
ref[r] = f[r].upper()
return ref
| 2.734375 | 3 |
conf/path_config.py | atom-zh/SA_Classification | 2 | 12798303 | # -*- coding: UTF-8 -*-
# !/usr/bin/python
# @time :2019/6/5 21:04
# @author :Mo
# @function :file of path
import os
# 项目的根目录
path_root = os.path.abspath(os.path.join(os.path.dirname(__file__), os.pardir))
path_root = path_root.replace('\\', '/')
# train out
path_out = path_root + "/out/"
# path of embedding
path_embedding = path_out + 'data/embeddings'
path_embedding_user_dict = path_embedding + '/user_dict.txt'
path_embedding_random_char = path_embedding + '/term_char.txt'
path_embedding_random_word = path_embedding + '/term_word.txt'
path_embedding_vector_word2vec_char = path_embedding + '/multi_label_char.vec'
path_embedding_vector_word2vec_word = path_embedding + '/multi_label_word.vec'
path_embedding_vector_word2vec_char_bin = path_embedding + '/multi_label_char.bin'
path_embedding_vector_word2vec_word_bin = path_embedding + '/multi_label_word.bin'
path_dataset = path_root +'/dataset'
path_category = path_dataset + '/category2labels.json'
path_l2i_i2l = path_dataset + '/l2i_i2l.json'
# classfiy multi labels 2021
path_multi_label = path_out + 'data/multi_label'
path_multi_label_train = path_multi_label + '/train.csv'
path_multi_label_valid = path_multi_label + '/valid.csv'
path_multi_label_labels = path_multi_label + '/labels.csv'
path_multi_label_tests = path_multi_label + '/tests.csv'
path_multi_label_error = path_multi_label + '/error.csv'
# 路径抽象层
path_label = path_multi_label_labels
path_train = path_multi_label_train
path_valid = path_multi_label_valid
path_tests = path_multi_label_tests
path_edata = path_multi_label_error
# 模型目录
path_model_dir = path_out + "data/model"
# 语料地址
path_model = path_model_dir + '/model_fast_text.h5'
# 超参数保存地址
path_hyper_parameters = path_model_dir + '/hyper_parameters.json'
# embedding微调保存地址
path_fineture = path_model_dir + "/embedding_trainable.h5"
| 2.3125 | 2 |
12403/save_setu.py | sc458/uHunt-solutions | 0 | 12798304 | res = 0
T = int(input())
for i in range(0,T):
inp = input()
if(inp == 'report'):
print(res)
else:
inp_arr = inp.split(' ')
res += int(inp_arr[1])
| 3.421875 | 3 |
config.py | ajith-ramanath/AzureCognitiveSearch | 0 | 12798305 | <gh_stars>0
from os import environ as env
from azure.identity import ClientSecretCredential
from azure.keyvault.secrets import SecretClient
__tenant_id = env.get("AZURE_TENANT_ID", "")
__client_id = env.get("AZURE_CLIENT_ID", "")
__client_secret = env.get("AZURE_CLIENT_SECRET", "")
__key_vault_name = env.get("AZURE_KEYVAULT_NAME", "")
def get_kv_client(kv_uri):
_credential = ClientSecretCredential(
tenant_id=__tenant_id,
client_id=__client_id,
client_secret=__client_secret
)
return SecretClient(vault_url=kv_uri, credential=_credential)
def retreive_secret(key):
kv_uri = f"https://{__key_vault_name}.vault.azure.net"
client = get_kv_client(kv_uri)
return client.get_secret(key).value
COG_SEARCH_KEY = retreive_secret("cog-search-admin-key")
STORAGE_CONN_STR = retreive_secret("storage-conn-string")
COG_SEARCH_END_POINT = env.get("AZURE_COG_SEARCH_END_POINT", "")
COG_SEARCH_API_VERSION = "?api-version=2020-06-30"
COG_SEARCH_API_HEADERS = {'Content-Type': 'application/json', 'api-key': COG_SEARCH_KEY}
COG_SEARCH_API_PARAMS = { 'api-version': '2020-06-30' }
BLOB_CONTAINER = "collateral" | 2.125 | 2 |
isimip_data/caveats/tests/test_admin.py | ISI-MIP/isimip-data | 3 | 12798306 | <filename>isimip_data/caveats/tests/test_admin.py<gh_stars>1-10
from django.conf import settings
from django.core import mail
from django.urls import reverse
from isimip_data.caveats.models import Caveat, Comment
def test_annotation_add_get(db, client):
client.login(username='admin', password='<PASSWORD>')
url = reverse('admin:caveats_caveat_add')
response = client.get(url)
assert response.status_code == 200
def test_annotation_add_post(db, client):
client.login(username='admin', password='<PASSWORD>')
url = reverse('admin:caveats_caveat_add')
response = client.post(url, {
'title': 'New Caveat',
'description': 'Lorem ipsum dolor sit amet, consetetur sadipscing elitr',
'creator': 1,
'severity': 'low',
'status': 'new',
'specifiers_model': 'model',
'Caveat_figures-TOTAL_FORMS': 0,
'Caveat_figures-INITIAL_FORMS': 0,
'Caveat_figures-MIN_NUM_FORMS': 0,
'Caveat_figures-MAX_NUM_FORMS': 1000,
'Caveat_figures-__prefix__-id': '',
'Caveat_figures-__prefix__-caveat': '',
'Caveat_figures-__prefix__-figure': '',
'Caveat_downloads-TOTAL_FORMS': 0,
'Caveat_downloads-INITIAL_FORMS': 0,
'Caveat_downloads-MIN_NUM_FORMS': 0,
'Caveat_downloads-MAX_NUM_FORMS': 1000,
'Caveat_downloads-__prefix__-id': '',
'Caveat_downloads-__prefix__-caveat': '',
'Caveat_downloads-__prefix__-download': ''
})
assert response.status_code == 302
assert Caveat.objects.get(title='New Caveat').specifiers == {
'model': ['model']
}
def test_annotation_change_get(db, client):
client.login(username='admin', password='<PASSWORD>')
url = reverse('admin:caveats_caveat_change', args=[1])
response = client.get(url)
assert response.status_code == 200
def test_annotation_change_post(db, client):
client.login(username='admin', password='<PASSWORD>')
url = reverse('admin:caveats_caveat_change', args=[1])
response = client.post(url, {
'title': 'Caveat',
'description': 'Lorem ipsum dolor sit amet, consetetur sadipscing elitr',
'creator': 1,
'severity': 'low',
'status': 'new',
'specifiers_model': 'model3',
'Caveat_figures-TOTAL_FORMS': 0,
'Caveat_figures-INITIAL_FORMS': 0,
'Caveat_figures-MIN_NUM_FORMS': 0,
'Caveat_figures-MAX_NUM_FORMS': 1000,
'Caveat_figures-__prefix__-id': '',
'Caveat_figures-__prefix__-caveat': '',
'Caveat_figures-__prefix__-figure': '',
'Caveat_downloads-TOTAL_FORMS': 0,
'Caveat_downloads-INITIAL_FORMS': 0,
'Caveat_downloads-MIN_NUM_FORMS': 0,
'Caveat_downloads-MAX_NUM_FORMS': 1000,
'Caveat_downloads-__prefix__-id': '',
'Caveat_downloads-__prefix__-caveat': '',
'Caveat_downloads-__prefix__-download': ''
})
assert response.status_code == 302
assert Caveat.objects.get(pk=1).specifiers == {
'model': ['model3']
}
def test_caveat_add_get(db, client):
client.login(username='admin', password='<PASSWORD>')
url = reverse('admin:caveats_caveat_change', args=[1])
response = client.get(url)
assert response.status_code == 200
def test_caveat_send_get(db, client):
client.login(username='admin', password='<PASSWORD>')
url = reverse('admin:caveats_caveat_send', args=[1])
response = client.get(url)
assert response.status_code == 200
def test_caveat_send_post(db, client):
client.login(username='admin', password='<PASSWORD>')
caveat = Caveat.objects.get(pk=1)
caveat.email = False
caveat.save()
url = reverse('admin:caveats_caveat_send', args=[1])
response = client.post(url, {
'subject': 'Subject',
'message': 'Message',
'recipients': '<EMAIL>\n<EMAIL>',
'_send': 'Send email'
})
assert response.status_code == 302
assert len(mail.outbox) == 2
assert mail.outbox[0].subject == 'Subject'
assert mail.outbox[0].body == 'Message'
assert mail.outbox[0].from_email == settings.DEFAULT_FROM_EMAIL
assert mail.outbox[0].to == ['<EMAIL>']
assert mail.outbox[1].to == ['<EMAIL>']
assert mail.outbox[0].cc == []
assert mail.outbox[0].bcc == []
assert mail.outbox[0].attachments == []
def test_caveat_send_post_error(db, client):
client.login(username='admin', password='<PASSWORD>')
url = reverse('admin:caveats_caveat_send', args=[1])
response = client.post(url, {
'subject': 'Subject',
'message': 'Message',
'recipients': '<EMAIL>\n<EMAIL>',
'_send': 'Send email'
})
assert response.status_code == 200
assert b'No email can been send, since the email flag was set before.' in response.content
assert len(mail.outbox) == 0
def test_caveat_send_post_back(db, client):
client.login(username='admin', password='<PASSWORD>')
url = reverse('admin:caveats_caveat_send', args=[1])
response = client.post(url, {
'subject': 'Subject',
'message': 'Message',
'recipients': '<EMAIL>\n<EMAIL>',
'_back': 'Back'
})
assert response.status_code == 302
assert len(mail.outbox) == 0
def test_comment_send_get(db, client):
client.login(username='admin', password='<PASSWORD>')
url = reverse('admin:caveats_comment_send', args=[1])
response = client.get(url)
assert response.status_code == 200
def test_comment_send_post(db, client):
client.login(username='admin', password='<PASSWORD>')
comment = Comment.objects.get(pk=1)
comment.email = False
comment.save()
url = reverse('admin:caveats_comment_send', args=[1])
response = client.post(url, {
'subject': 'Subject',
'message': 'Message',
'recipients': '<EMAIL>\n<EMAIL>',
'_send': 'Send email'
})
assert response.status_code == 302
assert len(mail.outbox) == 2
assert mail.outbox[0].subject == 'Subject'
assert mail.outbox[0].body == 'Message'
assert mail.outbox[0].from_email == settings.DEFAULT_FROM_EMAIL
assert mail.outbox[0].to == ['<EMAIL>']
assert mail.outbox[1].to == ['<EMAIL>']
assert mail.outbox[0].cc == []
assert mail.outbox[0].bcc == []
assert mail.outbox[0].attachments == []
def test_comment_send_post_error(db, client):
client.login(username='admin', password='<PASSWORD>')
url = reverse('admin:caveats_comment_send', args=[1])
response = client.post(url, {
'subject': 'Subject',
'message': 'Message',
'recipients': '<EMAIL>\<EMAIL>',
'_send': 'Send email'
})
assert response.status_code == 200
assert b'No email can been send, since the email flag was set before.' in response.content
assert len(mail.outbox) == 0
def test_comment_send_post_back(db, client):
client.login(username='admin', password='<PASSWORD>')
url = reverse('admin:caveats_comment_send', args=[1])
response = client.post(url, {
'subject': 'Subject',
'message': 'Message',
'recipients': '<EMAIL>\<EMAIL>',
'_back': 'Back'
})
assert response.status_code == 302
assert len(mail.outbox) == 0
| 2.15625 | 2 |
backend/common/tests.py | marcosflp/commitz | 1 | 12798307 | from rest_framework.authtoken.models import Token
from rest_framework.test import APITestCase
from users.models import User, GitHubProfile
class BaseTestCase(APITestCase):
USER = 'admin'
PASSWORD = '<PASSWORD>'
EMAIL = '<EMAIL>'
def setUp(self):
super(BaseTestCase, self).setUp()
user_authentication_token = Token.objects.create(user=self.user)
self.client.credentials(HTTP_AUTHORIZATION=f'Token {user_authentication_token.key}')
@classmethod
def setUpTestData(cls):
cls.user = User.objects.create_user(username=cls.USER, password=cls.PASSWORD, email=cls.EMAIL)
GitHubProfile.objects.create(user=cls.user)
| 2.0625 | 2 |
docs/examples/ckan.py | Defra-Data-Science-Centre-of-Excellence/sdg-build | 7 | 12798308 | """
This is an example of importing data from a CKAN instance and converting it
into the JSON output suitable for the Open SDG reporting platform.
"""
import os
import sdg
import pandas as pd
# Input data from CKAN
endpoint = 'https://inventory.data.gov/api/action/datastore_search'
indicator_id_map = {
# The resource ID for indicator 4.2.2.
'f78445b3-e017-43b2-857f-b39d2004546b': '4-2-2'
}
data_input = sdg.inputs.InputCkan(
endpoint=endpoint,
indicator_id_map=indicator_id_map
)
# The data in this example is not exactly what sdg-build expects, so we need to
# add a "data alteration" function to correct it.
def data_alteration(df):
# The data in this example is in a "wide" format, so we need to convert it
# to the "tidy" format expected by sdg-build.
df = pd.melt(df, id_vars=['year'], var_name='gender', value_name='value')
# We also rename some columns to match what sdg-build expects.
df = df.rename(columns={'year': 'Year', 'value': 'Value'})
return df
data_input.add_data_alteration(data_alteration)
# Combine the inputs into one list.
inputs = [data_input]
# Use a Prose.io file for the metadata schema.
schema_path = os.path.join('tests', '_prose.yml')
schema = sdg.schemas.SchemaInputOpenSdg(schema_path=schema_path)
# Use SDG Translations for translations
tag = '0.8.1'
translations = sdg.translations.TranslationInputSdgTranslations(tag=tag)
# Create an "output" from these inputs and schema, for JSON for Open SDG.
opensdg_output = sdg.outputs.OutputOpenSdg(
inputs=inputs,
schema=schema,
output_folder='_site',
translations=translations)
# Validate the indicators.
validation_successful = opensdg_output.validate()
# If everything was valid, perform the build.
if validation_successful:
# Here are several ways you can generate the build:
# 1. Translated into a single language, like English: opensdg_output.execute('en')
# (the build will appear in '_site/en')
# 2. Translated into several languages: opensdg_output.execute_per_language(['es', 'ru', 'en'])
# (three builds will appear in '_site/es', '_site/ru', and '_site/en')
# 3. Untranslated: opensdg_output.execute()
# (the build will appear in '_site')
opensdg_output.execute_per_language(['es', 'ru', 'en'])
else:
raise Exception('There were validation errors. See output above.')
| 2.984375 | 3 |
prep_pop_points.py | ITDP/two_step_access | 1 | 12798309 | import geopandas as gpd
# required for MAUP: https://github.com/geopandas/geopandas/issues/2199
gpd.options.use_pygeos = False
import pandas as pd
import numpy as np
import shapely
import shapely.geometry
from shapely.geometry import Polygon, Point
from tqdm import tqdm
import maup
import os
#INTRO - need to edit values here for new city deployment
data_source = "census" #"census" or "ghsl"
city_crs = 32712
blocks_gdf_crs = gpd.read_file('prep_pop/tabblock2010_04_pophu.zip').to_crs(city_crs)
block_groups_gdf_crs = gpd.read_file('prep_pop/tl_2019_04_bg.zip').to_crs(city_crs)
veh_avail = pd.read_csv('prep_pop/B25044.csv').iloc[1:,]
bounds_gdf_latlon = gpd.GeoDataFrame(geometry = [
shapely.geometry.box(-111.124649,32.059300,-110.690002,32.366043)],
crs = 4326)
bounds_gdf_crs = bounds_gdf_latlon.to_crs(city_crs)
#define exception polygon
#this is the are within which the grid will be higher-resolution
point = Point(-71.411479,41.823544)
point_latlon = gpd.GeoDataFrame(geometry=[point], crs = 4326)
point_crs = point_latlon.to_crs(city_crs)
poly_crs = point_crs.buffer(1000).unary_union
exception_gdf_crs = gpd.GeoDataFrame(geometry = [poly_crs], crs=city_crs)
high_res = 250 #m to a side
low_res = 1000 #m to a side
# END INTRO actual code
def summarize_veh_avail(row):
total_pop = int(row['B25044_001E'])
if total_pop < 1:
return 0,0,1 #if no population, assume all 0 households have 2 cars
pct_carfree = (int(row['B25044_003E']) + int(row['B25044_010E'])) / total_pop
pct_onecar = (int(row['B25044_004E']) + int(row['B25044_011E'])) / total_pop
pct_twopluscars = 1 - pct_carfree - pct_onecar
return pct_carfree, pct_onecar, pct_twopluscars
def build_grid(bounds_poly_crs, low_resolution, exception_gdf_crs=None, high_resolution=None):
xmin,ymin,xmax,ymax = bounds_poly_crs.bounds
# thank you Faraz (https://gis.stackexchange.com/questions/269243/creating-polygon-grid-using-geopandas)
rows = int(np.ceil((ymax-ymin) / low_resolution))
cols = int(np.ceil((xmax-xmin) / low_resolution))
XleftOrigin = xmin
XrightOrigin = xmin + low_resolution
YtopOrigin = ymax
YbottomOrigin = ymax - low_resolution
lowres_cells = []
exception_cells = []
for i in range(cols):
Ytop = YtopOrigin
Ybottom = YbottomOrigin
for j in range(rows):
cell = Polygon([(XleftOrigin, Ytop), (XrightOrigin, Ytop), (XrightOrigin, Ybottom), (XleftOrigin, Ybottom)])
cell = cell.intersection(bounds_poly_crs)
if exception_gdf_crs is not None:
if not cell.intersects(exception_gdf_crs.unary_union):
lowres_cells.append(cell)
else:
exception_cells.append(cell)
else:
lowres_cells.append(cell)
Ytop = Ytop - low_resolution
Ybottom = Ybottom - low_resolution
XleftOrigin = XleftOrigin + low_resolution
XrightOrigin = XrightOrigin + low_resolution
highres_cells = []
if exception_gdf_crs is not None:
for exception_cell in exception_cells:
highres_cells += build_grid(exception_cell, high_resolution)
return lowres_cells + highres_cells
def populate_grid(grid, blocks, block_groups):
#blocks first, for simple population
blocks_pieces = maup.intersections(blocks, grid, area_cutoff=0)
blocks_weights = blocks['POP10'].groupby(maup.assign(blocks, blocks_pieces)).sum()
blocks_weights = maup.normalize(blocks_weights, level=0)
grid['POP10'] = maup.prorate(
blocks_pieces,
blocks['POP10'],
weights=blocks_weights,
)
#then block groups for car ownership
bg_pieces = maup.intersections(block_groups, grid)
bg_weights = grid['POP10'].groupby(maup.assign(grid, bg_pieces)).sum()
bg_weights = maup.normalize(bg_weights, level=0)
columns = ['pct_carfree', 'pct_onecar','pct_twopluscars']
grid[columns] = maup.prorate(
bg_pieces,
block_groups[columns],
weights=bg_weights,
aggregate_by='mean',
)
return grid
#clip blocks and block groups
blocks_gdf_crs = gpd.clip(blocks_gdf_crs, bounds_gdf_crs)
block_groups_gdf_crs = gpd.clip(block_groups_gdf_crs, bounds_gdf_crs)
#assign veh_avail and block_groups the same index
block_groups_gdf_crs.index = block_groups_gdf_crs.GEOID
newidx = []
for bgidx in veh_avail.GEO_ID:
newidx.append(bgidx[9:])
veh_avail.index = newidx
for bgidx in block_groups_gdf_crs.index:
pct_carfree, pct_onecar, pct_twopluscars = summarize_veh_avail(veh_avail.loc[bgidx])
total_pop = float(veh_avail.loc[bgidx,'B25044_001E'])
block_groups_gdf_crs.loc[bgidx,'total_pop'] = total_pop
block_groups_gdf_crs.loc[bgidx,'pct_carfree'] = pct_carfree
block_groups_gdf_crs.loc[bgidx,'pct_onecar'] = pct_onecar
block_groups_gdf_crs.loc[bgidx,'pct_twopluscars'] = pct_twopluscars
grid_cells = build_grid(bounds_gdf_crs.unary_union, 1000, exception_gdf_crs, 250)
grid_gdf_crs = gpd.GeoDataFrame(geometry=grid_cells, crs=city_crs)
grid_gdf_latlon = grid_gdf_crs.to_crs(4326)
grid_pop_gdf_crs = populate_grid(
grid_gdf_crs,
blocks_gdf_crs,
block_groups_gdf_crs,
)
grid_pop_gdf_crs['pop_dens'] = grid_pop_gdf_crs['POP10'] / grid_pop_gdf_crs.geometry.area
grid_pop_gdf_latlon = grid_pop_gdf_crs.to_crs(4326)
points = pd.DataFrame()
for idx in grid_pop_gdf_latlon.index:
if not np.isnan(grid_pop_gdf_latlon.loc[idx,'POP10']):
grid_pop_gdf_latlon.loc[idx,'id'] = idx
points.loc[idx,'id'] = idx
centroid = grid_pop_gdf_latlon.loc[idx,'geometry'].centroid
points.loc[idx,'lat'] = centroid.y
points.loc[idx,'lon'] = centroid.x
for col in ['POP10','pct_carfree','pct_onecar','pct_twopluscars','pop_dens']:
points.loc[idx, col] = grid_pop_gdf_latlon.loc[idx, col]
points.to_csv('pop_points.csv')
grid_pop_gdf_latlon.to_file('grid_pop.geojson',driver='GeoJSON') | 2.296875 | 2 |
examples/Lists.py | mnishitha/INF502-Fall2020 | 8 | 12798310 | <reponame>mnishitha/INF502-Fall2020
list_of_numbers = [1,2,3,4,5]
len(list_of_numbers)
list_of_numbers[0]
print(list_of_numbers)
list_of_numbers[4]
print(list_of_numbers)
list_of_numbers[-2]
print(list_of_numbers)
#extending it
list_of_numbers.extend([6,7,8])
print(list_of_numbers)
#slicing it
piece = list_of_numbers[:4] #beginning to 4
print (piece)
piece = list_of_numbers[2:6] #from position 2 to 6
print (piece)
#shrinking it
del list_of_numbers [2:5]
print(list_of_numbers)
#merging
list1 = [1,2,3]
list2 = [4,5,6]
list3 = list1 + list2
print(list3)
list1.extend(list2)
print(list1)
#sorting
list1 = [-1,4,0,9,2,7]
list1.sort()
print (list1) | 4.03125 | 4 |
backend/app/company/permissions.py | adithyanps/djangoAuthorization | 0 | 12798311 | from rest_framework import permissions
BASE_SAFE_METHODS = ['GET','HEAD','OPTIONS']
MEDIUM_SAFE_METHODS = ['GET', 'PUT','PATCH','HEAD','OPTIONS']
TOP_SAFE_METHODS = ['GET','DELETE','PUT','PATCH','HEAD','OPTIONS']
class Permission(permissions.BasePermission):
"""manage permissions based on user choice"""
def has_object_permission(self, request, view, obj):
print(request.method in permissions.SAFE_METHODS,'--')
if obj.user.user_choice == "BASE":
print('base')
if (request.method in BASE_SAFE_METHODS
):
return True
elif obj.user.user_choice == "MEDIUM":
print('medium')
if (request.method in MEDIUM_SAFE_METHODS
):
return True
elif obj.user.user_choice == "TOP":
print('Top')
if (request.method in TOP_SAFE_METHODS
):
return True
| 2.640625 | 3 |
lib/fama/pe_functional_pipeline.py | aekazakov/FamaProfiling | 0 | 12798312 | <reponame>aekazakov/FamaProfiling
"""Runs Fama functional profiling pipeline"""
import os
import gzip
from fama.utils.const import ENDS, STATUS_GOOD
from fama.se_functional_pipeline import run_fastq_pipeline
from fama.utils.utils import run_external_program
from fama.project.sample import Sample
from fama.diamond_parser.diamond_parser import DiamondParser
from fama.output.report import generate_fastq_report, generate_sample_report
from fama.output.pdf_report import generate_pdf_report
from fama.output.krona_xml_writer import make_functions_chart
from fama.output.json_util import export_annotated_reads, export_sample
from fama.third_party.microbe_census import run_pipeline, report_results
from fama.diamond_parser.hit_utils import parse_fastq_seqid
def run_ref_search(parser, command):
"""Runs pre-selection DIAMOND search
Args:
parser (:obj:DiamondParser): parser object processing an input sequence file
command (str): either 'blastx' or 'blastp' (see DIAMOND manual)
"""
print('Starting DIAMOND')
diamond_args = [parser.config.diamond_path,
command,
'--db',
parser.config.get_reference_diamond_db(
parser.options.get_collection(parser.sample.sample_id)
),
'--query',
parser.options.get_fastq_path(parser.sample.sample_id, parser.end),
'--out',
os.path.join(
parser.options.get_project_dir(parser.sample.sample_id),
parser.sample.sample_id + '_' + parser.end + '_'
+ parser.options.ref_output_name
),
'--max-target-seqs',
'50',
'--evalue',
str(parser.config.get_evalue_cutoff(
parser.options.get_collection(parser.sample.sample_id)
)),
# '--threads',
# parser.config.threads,
'--outfmt', '6', 'qseqid', 'sseqid', 'pident', 'length',
'mismatch', 'slen', 'qstart', 'qend', 'sstart', 'send',
'evalue', 'bitscore']
run_external_program(diamond_args)
print('DIAMOND finished')
def run_bgr_search(parser, command):
"""Runs classification DIAMOND search
Args:
parser (:obj:DiamondParser): parser object processing an input sequence file
command (str): either 'blastx' or 'blastp' (see DIAMOND manual)
"""
print('Starting DIAMOND')
diamond_args = [parser.config.diamond_path,
command,
'--db',
parser.config.get_background_diamond_db(
parser.options.get_collection(parser.sample.sample_id)
),
'--query',
os.path.join(
parser.options.get_project_dir(parser.sample.sample_id),
parser.sample.sample_id + '_' + parser.end + '_'
+ parser.options.ref_hits_fastq_name
),
'--out',
os.path.join(
parser.options.get_project_dir(parser.sample.sample_id),
parser.sample.sample_id + '_' + parser.end + '_'
+ parser.options.background_output_name
),
'--max-target-seqs',
'100',
'--evalue',
str(
parser.config.get_background_db_size(
parser.options.get_collection(parser.sample.sample_id)
) * parser.config.get_evalue_cutoff(
parser.options.get_collection(parser.sample.sample_id)
) / parser.config.get_reference_db_size(
parser.options.get_collection(parser.sample.sample_id)
)),
# '--threads',
# parser.config.threads,
'--outfmt', '6', 'qseqid', 'sseqid', 'pident', 'length', 'mismatch',
'slen', 'qstart', 'qend', 'sstart', 'send', 'evalue', 'bitscore']
run_external_program(diamond_args)
print('DIAMOND finished')
def run_microbecensus(sample, config):
"""Runs MicrobeCensus
Args:
sample (:obj:Sample): sample analyzed
config (:obj:ProgramConfig): program configuration object
"""
args = {}
if sample.is_paired_end:
args['seqfiles'] = [sample.fastq_fwd_path, sample.fastq_rev_path]
else:
args['seqfiles'] = [sample.fastq_fwd_path]
args['verbose'] = True
args['diamond'] = config.diamond_path
args['data_dir'] = config.microbecensus_datadir
args['outfile'] = os.path.join(sample.work_directory, 'microbecensus.out.txt')
args['threads'] = int(config.threads)
args['no_equivs'] = True
if sample.fastq_fwd_readcount < 1500000:
# MicrobeCensus subsamples 2M reads by default, but sequence library
# must have more reads as some reads are always discarded by filtering
args['nreads'] = sample.fastq_fwd_readcount // 2
elif sample.fastq_fwd_readcount < 3000000:
args['nreads'] = sample.fastq_fwd_readcount - 1000000
else:
args['nreads'] = 2000000
print(args)
est_ags, args = run_pipeline(args)
report_results(args, est_ags, None)
def import_fastq_pe(parser1, parser2):
"""Reads uncompressed or gzipped FASTQ file, finds sequences of
selected reads and stores them
Returns:
read_count (int): number of reads in the file
base_count (int): total number of bases in all reads
"""
fastq_file1 = parser1.options.get_fastq_path(parser1.sample.sample_id, parser1.end)
line_counter = 0
read_count1 = 0
base_count1 = 0
current_read = None
infile_handle = None
if fastq_file1.endswith('.gz'):
infile_handle = gzip.open(fastq_file1, 'rb')
else:
infile_handle = open(fastq_file1, 'rb')
for line in infile_handle:
# count lines as each FASTQ entry has exactly four lines
line_counter += 1
if line_counter == 5:
line_counter = 1
line = line.decode('utf8').rstrip('\n\r')
if line_counter == 1:
read_count1 += 1
(read_id, _) = parse_fastq_seqid(line)
current_read = read_id
if current_read in parser1.reads:
parser1.reads[current_read].read_id_line = line
if current_read in parser2.reads:
parser2.reads[current_read].pe_id = line
elif line_counter == 2:
base_count1 += len(line)
if current_read in parser1.reads:
parser1.reads[current_read].sequence = line
if current_read in parser2.reads:
parser2.reads[current_read].pe_sequence = line
elif line_counter == 3:
if current_read in parser1.reads:
parser1.reads[current_read].line3 = line
if current_read in parser2.reads:
parser2.reads[current_read].pe_line3 = line
elif line_counter == 4:
if current_read in parser1.reads:
parser1.reads[current_read].quality = line
if current_read in parser2.reads:
parser2.reads[current_read].pe_quality = line
infile_handle.close()
fastq_file2 = parser1.options.get_fastq_path(parser2.sample.sample_id, parser2.end)
line_counter = 0
read_count2 = 0
base_count2 = 0
current_read = None
if fastq_file2.endswith('.gz'):
infile_handle = gzip.open(fastq_file2, 'rb')
else:
infile_handle = open(fastq_file2, 'rb')
for line in infile_handle:
# count lines as each FASTQ entry has exactly four lines
line_counter += 1
if line_counter == 5:
line_counter = 1
line = line.decode('utf8').rstrip('\n\r')
if line_counter == 1:
read_count2 += 1
(read_id, _) = parse_fastq_seqid(line)
current_read = read_id
if current_read in parser1.reads:
parser1.reads[current_read].pe_id = line
if current_read in parser2.reads:
parser2.reads[current_read].read_id_line = line
elif line_counter == 2:
base_count2 += len(line)
if current_read in parser1.reads:
parser1.reads[current_read].pe_sequence = line
if current_read in parser2.reads:
parser2.reads[current_read].sequence = line
elif line_counter == 3:
if current_read in parser1.reads:
parser1.reads[current_read].pe_line3 = line
if current_read in parser2.reads:
parser2.reads[current_read].line3 = line
elif line_counter == 4:
if current_read in parser1.reads:
parser1.reads[current_read].pe_quality = line
if current_read in parser2.reads:
parser2.reads[current_read].quality = line
infile_handle.close()
return (parser1, parser2, read_count1, read_count2, base_count1, base_count2)
def export_paired_end_reads_fastq(parser):
""" For paired-end sequence reads, write paired-end reads for pre-selected
reads into a separate FASTQ file
"""
outdir = parser.sample.work_directory
read_ids = {}
for read_id in sorted(parser.reads.keys()):
read_ids[read_id] = read_id
fastq_outfile = os.path.join(outdir,
parser.sample.sample_id + '_'
+ parser.end + '_'
+ parser.options.pe_reads_fastq_name + '.gz')
with gzip.open(fastq_outfile, 'wt') as outfile:
for read_id in sorted(parser.reads.keys()):
outfile.write(parser.reads[read_id].pe_id + '\n')
outfile.write(parser.reads[read_id].pe_sequence + '\n')
outfile.write(parser.reads[read_id].pe_line3 + '\n')
outfile.write(parser.reads[read_id].pe_quality + '\n')
def fastq_pe_pipeline(project, sample_identifier=None, end_identifier=None):
"""Functional profiling pipeline for entire project
Args:
project (:obj:Project): current project
sample_identifier (str, optional): sample identifier
end_identifier (str, optional): end identifier
"""
for sample_id in project.list_samples():
if sample_identifier and sample_identifier != sample_id:
continue
sample = Sample(sample_id)
sample.load_sample(project.options)
project.samples[sample_id] = sample
if end_identifier:
project.samples[sample_id].reads[end_identifier] = \
run_fastq_pipeline(project,
sample=project.samples[sample_id],
end_id=end_identifier)
else:
project.samples[sample_id].reads = \
run_pe_fastq_pipeline(project,
sample=project.samples[sample_id])
export_sample(project.samples[sample_id])
# Generate output for the sample or delete sample from memory
project.options.set_sample_data(project.samples[sample_id])
metric = None
for sample_id in project.list_samples():
if project.is_paired_end():
metric = 'efpkg'
for sample_id in project.list_samples():
if project.samples[sample_id].rpkg_scaling_factor == 0.0:
metric = 'fragmentcount'
else:
metric = 'erpkg'
for sample_id in project.list_samples():
if project.samples[sample_id].rpkg_scaling_factor == 0.0:
metric = 'readcount'
# Generate output for all samples
for sample_id in project.list_samples():
generate_sample_report(project, sample_id, metric=metric)
# Generate output for the project
if sample_identifier is None:
# Skip project report if the pipeline is running for only one sample
project.generate_report()
# Rename existing project file and save current version
project.save_project_options()
return project
def run_pe_fastq_pipeline(project, sample):
"""Functional profiling pipeline for single FASTQ file processing
Args:
project (:obj:Project): current project
sample (:obj:Sample): current sample
"""
result = {}
parser1 = DiamondParser(config=project.config,
options=project.options,
taxonomy_data=project.taxonomy_data,
ref_data=project.ref_data,
sample=sample,
end=ENDS[0])
parser2 = DiamondParser(config=project.config,
options=project.options,
taxonomy_data=project.taxonomy_data,
ref_data=project.ref_data,
sample=sample,
end=ENDS[1])
if not os.path.isdir(project.options.get_project_dir(sample.sample_id)):
os.makedirs(project.options.get_project_dir(sample.sample_id), exist_ok=True)
if not os.path.isdir(os.path.join(project.options.get_project_dir(sample.sample_id),
project.options.get_output_subdir(sample.sample_id))):
os.mkdir(os.path.join(project.options.get_project_dir(sample.sample_id),
project.options.get_output_subdir(sample.sample_id)))
# Search in reference database
if not os.path.exists(
os.path.join(
parser1.options.get_project_dir(parser1.sample.sample_id),
parser1.sample.sample_id + '_' + parser1.end + '_' + parser1.options.ref_output_name
)
):
run_ref_search(parser1, 'blastx')
if not os.path.exists(
os.path.join(
parser2.options.get_project_dir(parser2.sample.sample_id),
parser2.sample.sample_id + '_' + parser2.end + '_' + parser2.options.ref_output_name
)
):
run_ref_search(parser2, 'blastx')
# Process output of reference DB search
parser1.parse_reference_output()
parser2.parse_reference_output()
# Import sequence data for selected sequence reads
print('Reading FASTQ file')
(parser1, parser2, read_count1, read_count2, base_count1, base_count2) = import_fastq_pe(
parser1, parser2
)
if sample.fastq_fwd_readcount == 0:
sample.fastq_fwd_readcount = read_count1
if sample.fastq_fwd_basecount == 0:
sample.fastq_fwd_basecount = base_count1
if sample.fastq_rev_readcount == 0:
sample.fastq_rev_readcount = read_count2
if sample.fastq_rev_basecount == 0:
sample.fastq_rev_basecount = base_count2
if sample.rpkg_scaling_factor == 0.0:
sample.import_rpkg_scaling_factor()
if sample.rpkg_scaling_factor == 0.0:
run_microbecensus(sample=sample, config=project.config)
sample.import_rpkg_scaling_factor()
project.options.set_sample_data(sample)
if parser1.reads:
parser1.export_hit_fastq()
print('Hits for forward end reads exported in FASTQ format')
parser1.export_hit_list()
print('List of hits fo forward end reads exported')
if not os.path.exists(
os.path.join(
parser1.options.get_project_dir(parser1.sample.sample_id),
parser1.sample.sample_id + '_' + parser1.end + '_'
+ parser1.options.background_output_name
)
):
run_bgr_search(parser1, 'blastx')
print('Classification DB search finished')
parser1.parse_background_output()
print('Classification DB search results imported')
parser1.export_read_fastq()
print('Classified forward end reads exported in FASTQ format')
export_paired_end_reads_fastq(parser1)
print('Paired reads for classified forward end reads exported')
export_annotated_reads(parser1)
print('Classified forward end reads exported in JSON format')
generate_fastq_report(parser1)
print('Text report for forward end reads created')
generate_pdf_report(parser1)
print('PDF report for forward end reads created')
make_functions_chart(parser1)
print('Krona chart for forward end reads created')
result[ENDS[0]] = {read_id: read for (read_id, read) in
parser1.reads.items() if read.status == STATUS_GOOD}
else:
# No hits found
print('Pre-selection search did not find any hits for forward end reads')
result[ENDS[0]] = {}
if parser2.reads:
parser2.export_hit_fastq()
print('Hits for reverse end reads exported in FASTQ format')
parser2.export_hit_list()
print('List of hits for reverse end reads exported')
if not os.path.exists(
os.path.join(
parser2.options.get_project_dir(parser2.sample.sample_id),
parser2.sample.sample_id + '_' + parser2.end + '_'
+ parser2.options.background_output_name
)
):
run_bgr_search(parser2, 'blastx')
print('Classification DB search for reverse end reads finished')
parser2.parse_background_output()
print('Classification DB search results for reverse end reads imported')
parser2.export_read_fastq()
print('Classified reverse end reads exported in FASTQ format')
export_paired_end_reads_fastq(parser2)
print('Paired reads for classified reverse end reads exported')
export_annotated_reads(parser2)
print('Classified reverse end reads exported in JSON format')
generate_fastq_report(parser2)
print('Text report for reverse end reads created')
generate_pdf_report(parser2)
print('PDF report for reverse end reads created')
make_functions_chart(parser2)
print('Krona chart for reverse end reads created')
result[ENDS[1]] = {read_id: read for (read_id, read) in
parser2.reads.items() if read.status == STATUS_GOOD}
else:
# No hits found
print('Pre-selection search did not find any hits for reverse end reads')
result[ENDS[1]] = {}
return result
def main():
"""Main function"""
print('This program is not intended to run directly.')
if __name__ == '__main__':
main()
| 2.140625 | 2 |
Python3/1287.py | rakhi2001/ecom7 | 854 | 12798313 | <reponame>rakhi2001/ecom7
__________________________________________________________________________________________________
sample 76 ms submission
class Solution:
def findSpecialInteger(self, arr: List[int]) -> int:
for idx, num in enumerate(arr):
if arr[idx] == arr[idx+len(arr)//4]: return num
__________________________________________________________________________________________________
sample 80 ms submission
class Solution:
def findSpecialInteger(self, arr: List[int]) -> int:
size = int((len(arr)) / 4)
loose = max(1, size)
for index in range(0, len(arr), loose):
candidate = arr[index]
left = bisect.bisect_left(arr, candidate, max(0, index - loose), min(len(arr), index + loose))
right = bisect.bisect_right(arr, candidate, max(0, index - loose), min(len(arr), index + loose))
if right - left > size:
return arr[index]
assert(False)
__________________________________________________________________________________________________
| 2.703125 | 3 |
IntroProblems/Permutations/source.py | 0x5eba/CSES-solutions | 27 | 12798314 | N=int(input())
if N==4:print("2 4 1 3");exit()
if N==1:print("1");exit()
if N<4:print("NO SOLUTION");exit()
[print(i) for i in range(1,N+1,2)]
[print(i) for i in range(2,N+1,2)]
| 3.46875 | 3 |
8 kyu/Remove First and Last Character.py | mwk0408/codewars_solutions | 6 | 12798315 | <filename>8 kyu/Remove First and Last Character.py
def remove_char(s):
s2=s[1:len(s)-1]
return s2 | 3.40625 | 3 |
laas/actions/actions/parse_network_data.py | opnfv/laas-reflab | 1 | 12798316 | ##############################################################################
# Copyright 2018 <NAME> and Others #
# #
# Licensed under the Apache License, Version 2.0 (the "License"); #
# you may not use this file except in compliance with the License. #
# You may obtain a copy of the License at #
# #
# http://www.apache.org/licenses/LICENSE-2.0 #
# #
# Unless required by applicable law or agreed to in writing, software #
# distributed under the License is distributed on an "AS IS" BASIS, #
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. #
# See the License for the specific language governing permissions and #
# limitations under the License. #
##############################################################################
import json
from st2actions.runners.pythonrunner import Action
class ParseNetworkAction(Action):
def run(self, task_data):
task_data.pop("lab_token", None) # We dont care, just remove if there
if len(task_data) > 1:
print("There should only be one host here!")
return None
ret = {
'host': list(task_data.keys())[0], # hostname
'mappings': '+'.join(self.get_mappings(task_data)), # mappings as understood by host task
'default': self.get_default_interface(task_data), # interface that should be def route
'empty': self.detect_empty(task_data)
}
return ret
def detect_empty(self, task_data):
for hostname, iface_dict in task_data.items():
for mac, vlan_list in iface_dict.items():
if vlan_list:
return False
return True
def get_mappings(self, task_data):
mappings = []
for hostname, iface_dict in task_data.items():
for mac, vlan_list in iface_dict.items():
for vlan in vlan_list:
if vlan['tagged']:
mapping = mac + "-" + str(vlan['vlan_id'])
mappings.append(mapping)
return mappings
def get_default_vlans(self):
vlan_list = json.loads(
self.action_service.get_value("default_vlans", local=False)
)
return vlan_list
def get_default_interface(self, task_data):
default = set(self.get_default_vlans())
for hostname, iface_dict in task_data.items():
for mac, vlan_list in iface_dict.items():
for vlan in vlan_list:
if int(vlan['vlan_id']) in default:
default_interface = mac
if vlan['tagged']:
default_interface += "." + str(vlan['vlan_id'])
return default_interface
| 2.140625 | 2 |
serving_agent/model_agent.py | HughWen/ServingAgent | 19 | 12798317 | import time
import pickle
import redis
class ModelAgent:
def __init__(
self,
redis_broker='localhost:6379',
redis_queue='broker',
model_class=None,
model_config={},
batch_size=64,
model_sleep=0.1,
collection=False,
collection_limit=6000,
):
parse = lambda x: {'host': x.split(':')[0], 'port': int(x.split(':')[1])}
self.db = redis.StrictRedis(**parse(redis_broker))
self.redis_queue = redis_queue
assert 'predict' in dir(model_class), 'No predict function in model class'
self.model_class = model_class
self.model_config = model_config
self.batch_size = batch_size
self.model_sleep = model_sleep
self.collection = collection
self.collection_limit = collection_limit
def run(self, pre_process=lambda x: x, post_process=lambda x: x):
model = self.model_class(**self.model_config)
mq_miss = 0
print('model init')
while True:
with self.db.pipeline() as pipe:
pipe.lrange(self.redis_queue, 0, self.batch_size - 1)
pipe.ltrim(self.redis_queue, self.batch_size, -1)
queue, _ = pipe.execute()
if queue:
mq_miss = 0
if not model:
model = self.model_class(**self.model_config)
messages = [pickle.loads(x) for x in queue]
keys = [message.get('key') for message in messages]
model_inputs = [pre_process(message.get('model_input')) for message in messages]
results = [post_process(x) for x in model.predict(model_inputs)]
self.db.mset({key: pickle.dumps(result) for key, result in zip(keys, results)})
else:
mq_miss += 1
if mq_miss and mq_miss % self.collection_limit == 0:
mq_miss = 0
if self.collection and model:
model = None
print('model is collected')
time.sleep(self.model_sleep)
| 2.1875 | 2 |
nautobot_golden_config/utilities/graphql.py | chadell/nautobot-plugin-golden-config | 0 | 12798318 | <filename>nautobot_golden_config/utilities/graphql.py<gh_stars>0
"""Example code to execute GraphQL query from the ORM."""
import logging
from django.utils.module_loading import import_string
from graphene_django.settings import graphene_settings
from graphql import get_default_backend
from graphql.error import GraphQLSyntaxError
from nautobot_golden_config.models import GoldenConfigSettings
from .constant import PLUGIN_CFG
LOGGER = logging.getLogger(__name__)
def graph_ql_query(request, device, query):
"""Function to run graphql and transposer command."""
LOGGER.debug("GraphQL - request for `%s`", str(device))
backend = get_default_backend()
schema = graphene_settings.SCHEMA
LOGGER.debug("GraphQL - set query variable to device.")
variables = {"device": device}
try:
LOGGER.debug("GraphQL - test query: `%s`", str(query))
document = backend.document_from_string(schema, query)
except GraphQLSyntaxError as error:
LOGGER.warning("GraphQL - test query Failed: `%s`", str(query))
return (400, {"error": str(error)})
LOGGER.debug("GraphQL - execute query with variables")
result = document.execute(context_value=request, variable_values=variables)
if result.invalid:
LOGGER.warning("GraphQL - query executed unsuccessfully")
return (400, result.to_dict())
data = result.data
global_settings = GoldenConfigSettings.objects.get(id="aaaaaaaa-0000-0000-0000-000000000001")
if global_settings.shorten_sot_query is True:
data = data["devices"][0]
if PLUGIN_CFG.get("sot_agg_transposer"):
LOGGER.debug("GraphQL - tansform data with function: `%s`", str(PLUGIN_CFG.get("sot_agg_transposer")))
try:
data = import_string(PLUGIN_CFG.get("sot_agg_transposer"))(data)
except Exception as error: # pylint: disable=broad-except
return (400, {"error": str(error)})
LOGGER.debug("GraphQL - request successful")
return (200, data)
| 2.53125 | 3 |
etc/tf_tutorial/Tensorflow-101-master/cnn_mnist_simple.py | zhangbo2008/facenet | 0 | 12798319 | #!/usr/bin/env python
# coding: utf-8
# ## SIMPLE CONVOLUTIONAL NEURAL NETWORK
# In[1]:
import numpy as np
import tensorflow as tf
import matplotlib.pyplot as plt
from tensorflow.examples.tutorials.mnist import input_data
get_ipython().run_line_magic('matplotlib', 'inline')
print ("PACKAGES LOADED")
# # LOAD MNIST
# In[2]:
mnist = input_data.read_data_sets('data/', one_hot=True)
trainimg = mnist.train.images
trainlabel = mnist.train.labels
testimg = mnist.test.images
testlabel = mnist.test.labels
print ("MNIST ready")
# # SELECT DEVICE TO BE USED
# In[3]:
device_type = "/gpu:1"
# # DEFINE CNN
# In[4]:
with tf.device(device_type): # <= This is optional
n_input = 784
n_output = 10
weights = {
'wc1': tf.Variable(tf.random_normal([3, 3, 1, 64], stddev=0.1)),
'wd1': tf.Variable(tf.random_normal([14*14*64, n_output], stddev=0.1))
}
biases = {
'bc1': tf.Variable(tf.random_normal([64], stddev=0.1)),
'bd1': tf.Variable(tf.random_normal([n_output], stddev=0.1))
}
def conv_simple(_input, _w, _b):
# Reshape input
_input_r = tf.reshape(_input, shape=[-1, 28, 28, 1])
# Convolution
_conv1 = tf.nn.conv2d(_input_r, _w['wc1'], strides=[1, 1, 1, 1], padding='SAME')
# Add-bias
_conv2 = tf.nn.bias_add(_conv1, _b['bc1'])
# Pass ReLu
_conv3 = tf.nn.relu(_conv2)
# Max-pooling
_pool = tf.nn.max_pool(_conv3, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='SAME')
# Vectorize
_dense = tf.reshape(_pool, [-1, _w['wd1'].get_shape().as_list()[0]])
# Fully-connected layer
_out = tf.add(tf.matmul(_dense, _w['wd1']), _b['bd1'])
# Return everything
out = {
'input_r': _input_r, 'conv1': _conv1, 'conv2': _conv2, 'conv3': _conv3
, 'pool': _pool, 'dense': _dense, 'out': _out
}
return out
print ("CNN ready")
# # DEFINE COMPUTATIONAL GRAPH
# In[5]:
# tf Graph input
x = tf.placeholder(tf.float32, [None, n_input])
y = tf.placeholder(tf.float32, [None, n_output])
# Parameters
learning_rate = 0.001
training_epochs = 10
batch_size = 100
display_step = 1
# Functions!
with tf.device(device_type): # <= This is optional
_pred = conv_simple(x, weights, biases)['out']
cost = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(_pred, y))
optm = tf.train.AdamOptimizer(learning_rate=learning_rate).minimize(cost)
_corr = tf.equal(tf.argmax(_pred,1), tf.argmax(y,1)) # Count corrects
accr = tf.reduce_mean(tf.cast(_corr, tf.float32)) # Accuracy
init = tf.initialize_all_variables()
# Saver
save_step = 1;
savedir = "nets/"
saver = tf.train.Saver(max_to_keep=3)
print ("Network Ready to Go!")
# # OPTIMIZE
# ## DO TRAIN OR NOT
# In[6]:
do_train = 1
sess = tf.Session(config=tf.ConfigProto(allow_soft_placement=True))
sess.run(init)
# In[7]:
if do_train == 1:
for epoch in range(training_epochs):
avg_cost = 0.
total_batch = int(mnist.train.num_examples/batch_size)
# Loop over all batches
for i in range(total_batch):
batch_xs, batch_ys = mnist.train.next_batch(batch_size)
# Fit training using batch data
sess.run(optm, feed_dict={x: batch_xs, y: batch_ys})
# Compute average loss
avg_cost += sess.run(cost, feed_dict={x: batch_xs, y: batch_ys})/total_batch
# Display logs per epoch step
if epoch % display_step == 0:
print ("Epoch: %03d/%03d cost: %.9f" % (epoch, training_epochs, avg_cost))
train_acc = sess.run(accr, feed_dict={x: batch_xs, y: batch_ys})
print (" Training accuracy: %.3f" % (train_acc))
test_acc = sess.run(accr, feed_dict={x: testimg, y: testlabel})
print (" Test accuracy: %.3f" % (test_acc))
# Save Net
if epoch % save_step == 0:
saver.save(sess, "nets/cnn_mnist_simple.ckpt-" + str(epoch))
print ("Optimization Finished.")
# # RESTORE
# In[8]:
if do_train == 0:
epoch = training_epochs-1
saver.restore(sess, "nets/cnn_mnist_simple.ckpt-" + str(epoch))
print ("NETWORK RESTORED")
# # LET'S SEE HOW CNN WORKS
# In[9]:
with tf.device(device_type):
conv_out = conv_simple(x, weights, biases)
input_r = sess.run(conv_out['input_r'], feed_dict={x: trainimg[0:1, :]})
conv1 = sess.run(conv_out['conv1'], feed_dict={x: trainimg[0:1, :]})
conv2 = sess.run(conv_out['conv2'], feed_dict={x: trainimg[0:1, :]})
conv3 = sess.run(conv_out['conv3'], feed_dict={x: trainimg[0:1, :]})
pool = sess.run(conv_out['pool'], feed_dict={x: trainimg[0:1, :]})
dense = sess.run(conv_out['dense'], feed_dict={x: trainimg[0:1, :]})
out = sess.run(conv_out['out'], feed_dict={x: trainimg[0:1, :]})
# # Input
# In[10]:
# Let's see 'input_r'
print ("Size of 'input_r' is %s" % (input_r.shape,))
label = np.argmax(trainlabel[0, :])
print ("Label is %d" % (label))
# Plot !
plt.matshow(input_r[0, :, :, 0], cmap=plt.get_cmap('gray'))
plt.title("Label of this image is " + str(label) + "")
plt.colorbar()
plt.show()
# # Conv1 (convolution)
# In[11]:
# Let's see 'conv1'
print ("Size of 'conv1' is %s" % (conv1.shape,))
# Plot !
for i in range(3):
plt.matshow(conv1[0, :, :, i], cmap=plt.get_cmap('gray'))
plt.title(str(i) + "th conv1")
plt.colorbar()
plt.show()
# # Conv2 (+bias)
# In[12]:
# Let's see 'conv2'
print ("Size of 'conv2' is %s" % (conv2.shape,))
# Plot !
for i in range(3):
plt.matshow(conv2[0, :, :, i], cmap=plt.get_cmap('gray'))
plt.title(str(i) + "th conv2")
plt.colorbar()
plt.show()
# # Conv3 (ReLU)
# In[13]:
# Let's see 'conv3'
print ("Size of 'conv3' is %s" % (conv3.shape,))
# Plot !
for i in range(3):
plt.matshow(conv3[0, :, :, i], cmap=plt.get_cmap('gray'))
plt.title(str(i) + "th conv3")
plt.colorbar()
plt.show()
# # Pool (max_pool)
# In[14]:
# Let's see 'pool'
print ("Size of 'pool' is %s" % (pool.shape,))
# Plot !
for i in range(3):
plt.matshow(pool[0, :, :, i], cmap=plt.get_cmap('gray'))
plt.title(str(i) + "th pool")
plt.colorbar()
plt.show()
# # Dense
# In[15]:
# Let's see 'dense'
print ("Size of 'dense' is %s" % (dense.shape,))
# Let's see 'out'
print ("Size of 'out' is %s" % (out.shape,))
# # Convolution filters
# In[16]:
# Let's see weight!
wc1 = sess.run(weights['wc1'])
print ("Size of 'wc1' is %s" % (wc1.shape,))
# Plot !
for i in range(3):
plt.matshow(wc1[:, :, 0, i], cmap=plt.get_cmap('gray'))
plt.title(str(i) + "th conv filter")
plt.colorbar()
plt.show()
| 3.234375 | 3 |
src/view/view_analisys.py | jcemelanda/PyGPA2.0 | 0 | 12798320 | <filename>src/view/view_analisys.py
from PyQt4 import QtGui, QtCore
from widgets.window_analisys import Analysis_Window
class Analise_View(QtGui.QMainWindow):
def __init__(self, controle):
QtGui.QMainWindow.__init__(self)
self.controle = controle
self.ui = Analysis_Window()
self.ui.setup(self)
self.count = 3
self.shortcut_right = QtGui.QShortcut(QtGui.QKeySequence("l"), self, self.controle.incrementa_view)
self.shortcut_left = QtGui.QShortcut(QtGui.QKeySequence("j"), self, self.controle.decrementa_view)
self.shortcut_end = QtGui.QShortcut(QtGui.QKeySequence("end"), self, self.controle.last_view)
self.shortcut_home = QtGui.QShortcut(QtGui.QKeySequence("home"), self, self.controle.first_view)
QtCore.QObject.connect(self.ui.actionAbrir_Conjunto_de_Matrizes, QtCore.SIGNAL(
'triggered()'), self.controle.abrir_arquivo)
QtCore.QObject.connect(self.ui.horizontalSlider, QtCore.SIGNAL(
'valueChanged(int)'), self.controle.set_view)
QtCore.QObject.connect(self.ui.Tabs, QtCore.SIGNAL(
'currentChanged(int)'), self.controle.set_current_tab)
if __name__ == '__main__':
import sys
app = QtGui.QApplication(sys.argv)
av = Analise_View()
av.add_widgets()
av.showMaximized()
sys.exit(app.exec_())
| 2.359375 | 2 |
M1/Aula1.py | DouglasCarvalhoPereira/Interact-OS-PYTHON | 0 | 12798321 | <reponame>DouglasCarvalhoPereira/Interact-OS-PYTHON<filename>M1/Aula1.py
name = input('Digite se nome: ')
cont = 0
while cont < 10:
print(f'Hello word {name}')
cont+=1 | 3.15625 | 3 |
Sudoku.py | lavieduynguyen/Sudoku | 0 | 12798322 | import kivy
kivy.require('1.10.0')
from kivy.app import App
from kivy.uix.label import Label
from kivy.uix.button import Button
from kivy.uix.checkbox import CheckBox
from kivy.uix.dropdown import DropDown
from kivy.uix.gridlayout import GridLayout
from kivy.uix.screenmanager import Screen, ScreenManager
from sudokuboard import SudokuBoard
class GameScreen(Screen):
pass
class SolverScreen(Screen):
pass
class HelpScreen(Screen):
pass
class CreditsScreen(Screen):
pass
class MenuScreen(Screen):
pass
class Numpad(GridLayout):
def __init__(self, **kwargs):
super().__init__(**kwargs)
self.cols = 3
self.rows = 3
for i in range(self.cols * self.rows):
button = Button(text=str(i + 1))
button.font_size = button.height * 0.25
self.add_widget(button)
class ColorFilter(GridLayout):
def __init__(self, **kwargs):
super().__init__(**kwargs)
self.spacing = 8
self.padding = 10
self.cols = 8
self.rows = 5
for i in range(20):
self.add_widget(CheckBox(active=True, size_hint=(None, None), height=20, width=20))
self.add_widget(Button(size_hint=(0.5, None),size_hint_max_x=60, height=20))
class SudokuApp(App):
def build(self):
sm = ScreenManager()
sm.add_widget(MenuScreen(name='menu'))
sm.add_widget(GameScreen(name='game'))
sm.add_widget(HelpScreen(name='help'))
sm.add_widget(CreditsScreen(name='credits'))
sm.current = 'menu'
return sm
if __name__ == '__main__':
SudokuApp().run() | 2.984375 | 3 |
src/bfg/module.py | rvrsh3ll/bl-bfg | 6 | 12798323 | import argparse
import inspect
import re
def bindSignatureArgs(func, src:dict) -> dict:
'''
Args:
func: Function/method from which the signature will be sourced.
src: Source dictionary that will provide values for dest.
Returns:
A new dictionary with argument values from src set
in dest.
'''
dest = {}
# Iterate over paramaters and values in the function's
# signature
for k,v in inspect.signature(func).parameters.items():
# Skip "self" references
if k == 'self': continue
# Extract the user supplied value when provided
if k in src: dest[k]=src[k]
# Use the default value other wise
else: dest[k]=v
return dest
class Module:
'''# Base Module Class
This class serves as a template for brute force modules. It builds
the interface subcommands by inspecting the __init__ method while
also enforcing restrictions on the __call__ method to ensure
BruteLoops can make authentication callbacks.
# The __init__ Method
This method can be used to set static values supporting a brute
force module. It's useful in situations when an upstream server
needs to be targeted.
# The __call__ Method
This method is called for each authentication attempt by BruteLoops
and should check the validity of a username and password. The method
signature must look like:
```
def __call__(self, username, password, *args, **kwargs):
success = False
# Do authentication and update success to True if successful
if success: return dict(outcome=1,username=username,password=password)
else: return dict(outcome=0,username=username,password=password,
```
Note the structure returned in the declaration above. The leading
integer value determines if authentication was successful, indicating
valid credentials: 1 means success, 0 means failure.
'''
# Name for the module that'll be shown in logging
name = None
# Brief description to display in the help menu
brief_description = None
# Description of the module that'll be shown in the interface
description = None
@classmethod
def initialize(cls, args):
'''Initialize and return the underlying brute force module.
'''
print('Initializing module!')
# Translate the argparse arguments to a dictionary
args = vars(args)
# Initialize a dictionary to hold all of the necessary argument
# to initialize the brute force module.
dct = bindSignatureArgs(func=cls.__init__, src=args)
# Initialize and return the module
instance = cls(**dct)
if hasattr(instance, '__post_init__'):
instance.__post_init__(
**bindSignatureArgs(
func=instance.__post_init__,
src=args))
return instance
@classmethod
def validate(cls):
# ==============================
# VALIDATING THE __call__ METHOD
# ==============================
# Ensure that it's declared
assert getattr(cls,'__call__'),('Modules must be callable. '
'Declare a __call__ method on the module: ' \
f'{cls.get_handle}')
# Get a list of parameter names
call_params = list(inspect.signature(cls.__call__).parameters \
.keys())
if call_params and call_params[0] == 'self':
call_params = call_params[1:3]
# Ensure there are two or greater params to be received
assert len(call_params) == 2,('__call__ must receive at ' \
'least two arguments: username, password')
# Ensure that the first two are 'username' and 'password'
assert ['username','password'] == call_params,('__call__ ' \
'must receive the first two arguments as username, ' \
f'password -- not: {call_params}')
@classmethod
def get_handle(cls):
'''Return a simple string to use as a module identifier.
'''
return '.'.join(cls.__module__.split('.')[-3:][:2])
@classmethod
def build_interface(cls,
subparsers: 'Argparse subparsers that will receive the subcommand') \
-> argparse.ArgumentParser:
'''Use the inspect module to iterate over each parameter
declared in __init__ and build an interface via argparse.
'''
epilog = None
if hasattr(cls, 'contributors'):
# ==========================
# FORMAT MODULE CONTRIBUTORS
# ==========================
epilog = 'Contributors:\n\n'
if not isinstance(cls.contributors, list):
raise ValueError(
'Module contributors must be a list of dictionary '
f'values, not {type(cls.contributors)}')
for cont in cls.contributors:
if not isinstance(cont, dict):
raise ValueError(
'contributor records must be dictionaries, '
f'not {type(cont)}')
name = cont.get('name')
additional = cont.get('additional')
if not name:
raise ValueError(
'contributor records must have a "name" field')
epilog += f'\n- {name}'
if additional:
if not isinstance(additional, dict):
raise ValueError(
'additional field of contributor records '
f'must be a dict, not {type(additional)}')
for k,v in additional.items():
epilog += f'\n {k}: {v}'
epilog += '\n'
if hasattr(cls, 'references'):
# ========================
# FORMAT MODULE REFERENCES
# ========================
epilog += f'\nReferences:\n'
references = cls.references
if not isinstance(references, list):
raise ValueError(
f'References must be a list, got {type(references)}')
for ref in references:
epilog += f'\n- {ref}'
# ======================
# BUILD MODULE ARGUMENTS
# ======================
'''Here we create a new argparse argument parser for the command
assoicated with the newly created module. This is how we bind
the name that the user will refernce at the commandline, along
with providing a mechanism to assign values to module parameters.
'''
parser = subparsers.add_parser(cls.get_handle(),
description=cls.description,
help=cls.brief_description,
parents=cls.args,
formatter_class=argparse.RawDescriptionHelpFormatter,
epilog=epilog)
parser.set_defaults(module=cls)
parser.add_argument('--database', '-db',
required=True,
help='Database to target.')
return parser
| 3.125 | 3 |
oraexec.py | bakink/oracle-imagecopy-backup | 20 | 12798324 | import os, sys
from subprocess import Popen, PIPE
from backupcommon import BackupLogger, info, debug, error, exception
from datetime import datetime, timedelta
from tempfile import mkstemp, TemporaryFile
class OracleExec(object):
oraclehome = None
tnspath = None
oraclesid = None
def __init__(self, oraclehome, tnspath, sid=None):
self.oraclehome = oraclehome
self.tnspath = tnspath
if sid is not None:
self.oraclesid = sid
debug("Oracle home: %s" % self.oraclehome)
def _setenv(self):
if self.oraclesid is None and os.environ.get('ORACLE_SID'):
del os.environ['ORACLE_SID']
if self.oraclesid is not None:
os.environ['ORACLE_SID'] = self.oraclesid
os.environ['ORACLE_HOME'] = self.oraclehome
os.environ['NLS_DATE_FORMAT'] = 'yyyy-mm-dd hh24:mi:ss'
os.environ['TNS_ADMIN'] = self.tnspath
def rman(self, finalscript):
self._setenv()
debug("RMAN execution starts")
BackupLogger.close()
starttime = datetime.now()
with TemporaryFile() as f:
p = Popen([os.path.join(self.oraclehome, 'bin', 'rman'), "log", BackupLogger.logfile, "append"], stdout=f, stderr=f, stdin=PIPE)
# Send the script to RMAN
p.communicate(input=finalscript)
endtime = datetime.now()
BackupLogger.init()
debug("RMAN execution time %s" % (endtime-starttime))
# If RMAN exists with any code except 0, then there was some error
if p.returncode != 0:
error("RMAN execution failed with code %d" % p.returncode)
raise Exception('rman', "RMAN exited with code %d" % p.returncode)
else:
debug("RMAN execution successful")
def sqlplus(self, finalscript, silent=False):
self._setenv()
with TemporaryFile() as f:
args = [os.path.join(self.oraclehome, 'bin', 'sqlplus')]
if silent:
args.append('-S')
args.append('/nolog')
debug("SQL*Plus execution starts")
BackupLogger.close()
p = Popen(args, stdout=f, stderr=f, stdin=PIPE)
p.communicate(input=finalscript)
BackupLogger.init()
if p.returncode != 0:
error("SQL*Plus exited with code %d" % p.returncode)
raise Exception('sqlplus', "sqlplus exited with code %d" % p.returncode)
else:
debug("SQL*Plus execution successful")
if silent:
f.seek(0,0)
return f.read()
def sqlldr(self, login, finalscript):
self._setenv()
debug("SQLLDR execution starts")
f1 = mkstemp(suffix=".ctl")
ftmp = os.fdopen(f1[0], "w")
ftmp.write(finalscript)
ftmp.close()
f2 = mkstemp(suffix=".log")
os.close(f2[0])
with TemporaryFile() as f:
p = Popen([os.path.join(self.oraclehome, 'bin', 'sqlldr'), login, "control=%s" % f1[1], "log=%s" % f2[1], "errors=0", "silent=all"], stdout=f, stderr=None, stdin=None)
p.communicate()
if p.returncode != 0:
error("SQLLDR exited with code %d" % p.returncode)
raise Exception('sqlldr', "sqlldr exited with code %d" % p.returncode)
else:
debug("SQLLDR execution successful")
os.unlink(f1[1])
os.unlink(f2[1])
def adrci(self, inputscriptfilename, outputfilehandle):
self._setenv()
p = Popen([os.path.join(self.oraclehome, 'bin', 'adrci'), "script=%s" % inputscriptfilename], stdout=outputfilehandle, stderr=None, stdin=None)
p.wait()
if p.returncode != 0:
raise Exception('adrci','Exit code was not 0.')
| 2.328125 | 2 |
example/XGB.py | abhineet123/paramparse | 0 | 12798325 | class XGB:
class Params:
def __init__(self):
self.max_depth = 2
self.eta = 1
self.objective = 'binary:logistic'
self.nthread = 4
self.eval_metric = 'auc'
self.num_round = 10
self.verbose = 1
self.help = {
}
| 2.078125 | 2 |
final_project/main.py | DroogieDroog/python | 0 | 12798326 | <filename>final_project/main.py<gh_stars>0
"""
pirple/python/final_project/main.py
Final Project
Create a Go Fish game
"""
from random import (shuffle, randint)
from time import sleep
from os import system, name
class Player:
def __init__(self, name, computer = False):
self.Name = name
self.Computer = computer
self.new_hand()
def new_hand(self):
self.Hand = {}
self.HandCounts = {}
self.Sets = []
self.Wishes = []
def hand_counts(self):
for denom in self.Hand.keys():
self.HandCounts.update({denom: len(self.Hand[denom])})
def check_wishes(self):
if set(self.Wishes) == set(self.Hand.keys()):
self.Wishes = []
elif len(self.Wishes) == 5:
self.Wishes.pop(0)
def print_hand(self):
if self.Name.upper().endswith('S'):
print('{}\' hand: '.format(self.Name), end=' ')
else:
print('{}\'s hand: '.format(self.Name), end=' ')
for denom in sorted(self.Hand):
for suit in self.Hand[denom]:
if self.Computer:
print('\u2733', end=' ')
else:
print(card_map[denom] + suit, end=' ')
print()
if self.Sets != []:
self.print_sets()
def print_sets(self):
if self.Name.upper().endswith('S'):
print('{}\' sets: '.format(self.Name), end=' ')
else:
print('{}\'s sets: '.format(self.Name), end=' ')
for denom in sorted(self.Sets):
print(card_map[denom] + 's', end=' ')
print()
def cast(self, opp_hand, card_deck, wish):
if wish in opp_hand.keys():
print('Fish, fish, you got your wish!')
opp_cards = opp_hand.pop(wish)
if wish in self.Hand.keys():
for card in opp_cards:
self.Hand[wish].append(card)
else:
self.Hand.update({wish: opp_cards})
return True
else:
print('Nope! Go fish.')
sleep(2)
got_wish = self.fish(card_deck, wish)
return got_wish
def fish(self, card_deck, wish):
draw_card = card_deck.pop()
if draw_card[0] in self.Hand.keys():
self.Hand[draw_card[0]].append(draw_card[1])
else:
self.Hand.update({draw_card[0]: [draw_card[1]]})
if draw_card[0] == wish:
print('Fish, fish, you got your wish!')
return True
else:
print('Booooo, you didn\'t get your wish.')
sleep(3)
return False
def lay_set(self):
new_set = -1
for card, suits in self.Hand.items():
if len(suits) == 4:
new_set = card
if new_set != -1:
self.Hand.pop(new_set)
self.Sets.append(new_set)
if len(self.Hand) == 0:
return True
else:
return False
def clear_screen():
# for windows
if name == 'nt':
_ = system('cls')
# for mac and linux(here, os.name is 'posix')
else:
_ = system('clear')
def create_deck():
deck = []
map = {}
card_denominations = ['A', '2', '3', '4', '5', '6', '7', '8', '9', '10', 'J', 'Q', 'K']
#suits = [hearts, diamonds, spades, clubs]
suits = ['\u2665', '\u2666', '\u2660', '\u2663']
for card in range(13):
map.update({card: card_denominations[card]})
for suit in suits:
deck.append([card, suit])
shuffle(deck)
return deck, map
def choose_dealer(new_game, winner):
if new_game or winner == 2:
r = randint(1, 100)
if r <= 50:
dealer = 0
else:
dealer = 1
else:
if winner == 0:
dealer = 1
else:
dealer = 0
return dealer
def deal_hand(deck, players, new_game=True, winner=None):
hands = []
hands.append({})
hands.append({})
for card in range(10):
for hand in range(2):
top_card = deck.pop()
if top_card[0] in hands[hand].keys():
hands[hand][top_card[0]].append(top_card[1])
else:
hands[hand].update({top_card[0]: [top_card[1]]})
dealer = choose_dealer(new_game, winner)
if dealer == 0:
print('{} is the dealer. {} goes first. Dealing the hands . . .'.format(players[0].Name, players[1].Name))
sleep(2)
return dealer, hands[1], hands[0]
else:
print('{} is the dealer. {} goes first. Dealing the hands . . .'.format(players[1].Name, players[0].Name))
sleep(2)
return dealer, hands[0], hands[1]
def start_new_game():
while(True):
player_name = '--help'
while player_name.upper() == '--HELP':
player_name = input('Please enter your name (or --help for the rules): ')
if player_name.upper() == '--HELP':
print_rules()
yn = input('You\'ve chosen {} for your name. Is that correct (y/n)? '.format(player_name))
if yn.upper() == 'Y':
print('OK, then let\'s play!')
break
else:
print('Oops! Try again.')
players = [Player(player_name), Player('Computer', True)]
card_deck, card_map = create_deck()
dealer, players[0].Hand, players[1].Hand = deal_hand(card_deck, players)
return players, dealer, card_deck, card_map
def play_again(players, winner):
players[0].new_hand()
players[1].new_hand()
card_deck, card_map = create_deck()
dealer, players[0].Hand, players[1].Hand = deal_hand(card_deck, players, False, winner)
return dealer, card_deck, card_map
def display_current_status(players, card_deck):
break_line = '\u274C' * 25
cards_left = len(card_deck)
clear_screen()
print(break_line + '\n')
players[1].print_hand()
print()
print('Draw pile: {} cards'.format(cards_left))
print()
players[0].print_hand()
print('\n' + break_line)
def determine_winner(players, card_deck):
display_current_status(players, card_deck)
sets0 = len(players[0].Sets)
sets1 = len(players[1].Sets)
if sets0 > sets1:
print('{} wins, {} sets to {}'.format(players[0].Name, sets0, sets1))
winner = 0
elif sets1 > sets0:
print('{} wins, {} sets to {}'.format(players[1].Name, sets1, sets0))
winner = 1
else:
print('It\'s a tie!')
winner = 2
print('Good game!')
return winner
def play_game(players, dealer, card_deck):
game_over = False
if dealer == 0:
current_player = 1
else:
current_player = 0
while not game_over:
game_over = play_hand(players, current_player, card_deck, card_map)
if players[current_player].Computer:
while True:
wait = input('Enter a C to continue . . .')
if wait.upper() == 'C':
break
if current_player == 0:
current_player = 1
else:
current_player = 0
return determine_winner(players, card_deck)
def play_hand(players, current_player, card_deck, card_map):
got_wish = True
game_over = False
card_map_keys = list(card_map.keys())
card_map_values = list(card_map.values())
if current_player == 0:
opp_hand = players[1].Hand
opp_sets = players[1].Sets
else:
opp_hand = players[0].Hand
opp_sets = players[0].Sets
display_current_status(players, card_deck)
player = players[current_player]
print('Your turn, {}.'.format(player.Name))
if player.Computer:
player.HandCounts = {}
while got_wish and not game_over:
if player.Computer:
player.hand_counts()
wish = generate_wish(player, opp_sets, card_map)
else:
wish = request_wish(player, card_map, card_map_values)
got_wish = player.cast(opp_hand, card_deck, card_map_keys[wish])
if card_deck == []:
game_over = True
else:
game_over = player.lay_set()
if got_wish and not game_over and not player.Computer:
player.print_hand()
return game_over
def request_wish(player, card_map, card_map_values):
while True:
wish = input('What is your wish? ')
if wish.upper() not in card_map.values():
print('You must wish for a valid card value (2-10, J, Q, K, A)')
else:
value_position = card_map_values.index(wish.upper())
if value_position not in player.Hand.keys():
print('You must wish for a card value in your hand.')
else:
break
return value_position
def generate_wish(player, opp_sets, card_map):
player.check_wishes()
highest_count = 0
most_cards = -1
while most_cards == -1:
for denom, count in player.HandCounts.items():
if (count > highest_count) and (denom not in player.Wishes)\
and (denom not in opp_sets) and (denom not in player.Sets):
most_cards = denom
highest_count = count
if most_cards == -1:
player.Wishes = []
player.HandCounts = {}
player.hand_counts()
player.Wishes.append(most_cards)
print('{} is wishing for a {}.'.format(player.Name, card_map[most_cards]))
return most_cards
def print_rules():
clear_screen()
with open('data/rules.txt', 'r') as rules:
rules_text = rules.read()
print(rules_text)
while True:
wait = input('Enter an R to return to the game . . .')
if wait.upper() == 'R':
clear_screen()
break
def main():
global card_deck, card_map
clear_screen()
print('Welcome to JFL Go Fish! This is a one-player game against the Computer.')
players, dealer, card_deck, card_map = start_new_game()
play = True
while play:
winner = play_game(players, dealer, card_deck)
yn = input('Would you like to play again (y/n)? ')
if yn.upper() != 'Y':
play = False
else:
dealer, card_deck, card_map = play_again(players, winner)
print('Goodbye!')
if __name__ == '__main__':
main()
| 3.875 | 4 |
micadoparser/parser.py | micado-scale/micado-parser | 0 | 12798327 | <filename>micadoparser/parser.py
import logging
from toscaparser.tosca_template import ToscaTemplate
from toscaparser.common.exception import ValidationError as TOSCAParserError
from yaml.error import YAMLError
from micadoparser import validator
from micadoparser.exceptions import ValidationError
from micadoparser.utils.csar import handle_csar
from micadoparser.utils.yaml import handle_yaml
from micadoparser.utils.utils import resolve_get_functions
logger = logging.getLogger("micadoparser." + __name__)
def set_template(path, parsed_params=None):
"""Parses any ADT and returns a ToscaTemplate
:params: path, parsed_params
:type: string, dictionary
:return: template
| parsed_params: dictionary containing the input to change
| path: local or remote path to the file to parse
"""
errors = None
if path.endswith(".csar"):
template = handle_csar(path, parsed_params)
else:
template = handle_yaml(path, parsed_params)
validator.validation(template)
_find_other_inputs(template)
_normalise_node_names(template)
return template
def get_template(path, parsed_params):
"""Return a ToscaTemplate object
Args:
path (string): path to the saved ADT
parsed_params (dict): tosca inputs
Raises:
ValueError: If the tosca-parser has trouble parsing
Returns:
ToscaTemplate: Parsed template object
"""
error = ""
try:
template = ToscaTemplate(
path=path, parsed_params=parsed_params, a_file=True
)
except TOSCAParserError as e:
error = [
line
for line in e.message.splitlines()
if all([line, not line.startswith("\t\t")])
]
error = "\n".join(error)
except AttributeError as e:
error = f"{e}\n HINT: This might be due to a wrong type - check your imports."
except YAMLError as e:
error = f"YAML Error\n {e}"
except Exception as e:
error = (
f"Unknown Error:\n {e}\n\n"
"Please raise a ticket at https://github.com/micado-scale/ansible-micado/issues."
)
if error:
raise ValidationError(error, "TOSCA Parser could not parse the ADT...")
return template
def _find_other_inputs(template):
"""Find `get_input` tags in the template, then resolve and update"""
resolve_get_functions(
template.tpl,
"get_input",
lambda x: x is not None,
_get_input_value,
template,
)
# Update nodetemplate properties
for node in template.nodetemplates:
node._properties = node._create_properties()
def _get_input_value(key, template):
"""Custom get_input resolution using parsed_params"""
try:
return template.parsed_params[key]
except (KeyError, TypeError):
logger.debug(f"Input '{key}' not given, using default")
try:
return [
param.default for param in template.inputs if param.name == key
][0]
except IndexError:
logger.error(f"Input '{key}' has no default")
def _normalise_node_names(template):
"""Remove underscores and periods from node names and refs"""
# tpl and entity_tpl are not ever (I think) used to pull node names
# so update the name property of the nodetemplate object
for node in template.nodetemplates:
node.name = node.name.replace("_", "-").replace(".", "-")
_normalise_requirement_node_refs(node._requirements)
# the targets property just looks at entity_tpl, so update that
# references to renamed nodes exist in targets_list, so use that
for policy in template.policies:
policy.entity_tpl["targets"] = [
node.name for node in policy.targets_list
]
def _normalise_requirement_node_refs(requirements):
"""Remove underscores and periods from node references"""
for requirement in requirements:
key = list(requirement)[0]
# for shorthand requirement notation, just replace the string
try:
requirement[key] = (
requirement[key].replace("_", "-").replace(".", "-")
)
# otherwise get the key and update 'node' in the inner dictionary
except AttributeError:
requirement[key]["node"] = (
requirement[key]["node"].replace("_", "-").replace(".", "-")
)
| 2.5 | 2 |
database.py | Anve94/DiscordBot-public | 0 | 12798328 | <reponame>Anve94/DiscordBot-public
import os
import sys
from sqlalchemy import Column, ForeignKey, Integer, String, Boolean
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy import create_engine
Base = declarative_base()
# Abuser class to insert people abusing the bugreporting feature
class Abuser(Base):
__tablename__ = 'abusers'
id = Column(Integer, primary_key = True)
discord_id = Column(String, nullable = False)
# Entry class for voice-related giveaway entries
class Entry(Base):
__tablename__ = 'entries'
# Table column definitions
id = Column(Integer, primary_key = True)
discord_id = Column(String, nullable = False)
score = Column(Integer, nullable = False)
# EventMessage class for stuff_happening messages
class EventMessage(Base):
__tablename__ = 'message'
id = Column(Integer, primary_key = True)
token = Column(String, nullable = False)
content = Column(String, nullable = False)
image_url = Column(String, nullable = True)
position = Column(Integer, nullable = False)
# Giveaway class to keep track of community giveaways
class Giveaway(Base):
__tablename__ = 'giveaway'
id = Column(Integer, primary_key = True)
discord_id = Column(String, nullable = False)
# Create the engine to the sqlite database
engine = create_engine('sqlite:///database/database.sqlite')
# Handles the creation of tables (if none exist etc.)
Base.metadata.create_all(engine)
| 2.4375 | 2 |
speech_recognition/cmusphinx-code/sphinxtrain/python/cmusphinx/fstutils.py | Ohara124c41/TUB-MSc_Thesis | 1 | 12798329 | <reponame>Ohara124c41/TUB-MSc_Thesis<filename>speech_recognition/cmusphinx-code/sphinxtrain/python/cmusphinx/fstutils.py
#!/usr/bin/env python
# Copyright (c) 2010 Carnegie Mellon University
#
# You may copy and modify this freely under the same terms as
# Sphinx-III
"""
FST utility functions
"""
__author__ = "<NAME> <<EMAIL>>"
__version__ = "$Revision $"
import sys
import os
import tempfile
import openfst
import sphinxbase
import subprocess
class AutoFst(openfst.StdVectorFst):
"""
FST class which automatically adds states, input and output symbols as required.
This is meant to behave somewhat like the Dot language.
"""
def __init__(self, isyms=None, osyms=None, ssyms=None):
openfst.StdVectorFst.__init__(self)
if isyms == None:
isyms = openfst.SymbolTable("inputs")
isyms.AddSymbol("ε")
if osyms == None:
osyms = openfst.SymbolTable("outputs")
osyms.AddSymbol("ε")
if ssyms == None:
ssyms = openfst.SymbolTable("states")
ssyms.AddSymbol("__START__")
self.ssyms = ssyms
self.SetInputSymbols(isyms)
self.SetOutputSymbols(osyms)
self.SetStart(self.AddState())
def AddArc(self, src, isym, osym, weight, dest):
if not isinstance(isym, int):
isym = self.isyms.AddSymbol(isym)
if not isinstance(osym, int):
osym = self.osyms.AddSymbol(osym)
if not isinstance(src, int):
src = self.ssyms.AddSymbol(src)
if not isinstance(dest, int):
dest = self.ssyms.AddSymbol(dest)
while src >= self.NumStates():
self.AddState()
while dest >= self.NumStates():
self.AddState()
openfst.StdVectorFst.AddArc(self, src, isym, osym, weight, dest)
def Write(self, *args):
openfst.StdVectorFst.SetInputSymbols(self, self.isyms)
openfst.StdVectorFst.SetOutputSymbols(self, self.osyms)
openfst.StdVectorFst.Write(self, *args)
def SetFinal(self, state, weight=0):
if not isinstance(state, int):
state = self.ssyms.AddSymbol(state)
openfst.StdVectorFst.SetFinal(self, state, weight)
def SetInputSymbols(self, isyms):
self.isyms = isyms
openfst.StdVectorFst.SetInputSymbols(self, self.isyms)
def SetOutputSymbols(self, osyms):
self.osyms = osyms
openfst.StdVectorFst.SetOutputSymbols(self, self.osyms)
def add_mgram_states(fst, symtab, lm, m, sidtab, bo_label=0):
"""
Add states and arcs for all M-grams in the language model, where M<N.
"""
for mg in lm.mgrams(m):
wsym = symtab.Find(mg.words[m])
if wsym == -1:
continue # skip mgrams ending in OOV
if m > 0 and mg.words[0] == '</s>':
continue # skip >1-grams starting with </s>
if m == 0:
src = 0 # 1-grams start in backoff state
elif tuple(mg.words[0:m]) not in sidtab:
continue # this means it has an OOV
else:
src = sidtab[tuple(mg.words[0:m])]
if mg.words[m] == '</s>':
# only one final state is allowed
final = True
newstate = False
if ('</s>',) in sidtab:
dest = sidtab[('</s>',)]
else:
dest = fst.AddState()
fst.SetFinal(dest, 0)
sidtab[('</s>',)] = dest
#print "Final state", dest
#print "Entered state ID mapping (</s>,) =>", dest
else:
final = False
newstate = True
dest = fst.AddState()
if mg.words[m] == '<s>':
# <s> is a non-event
if m == 0:
# The destination state will be the initial state
fst.SetStart(dest)
#print "Initial state", dest
else:
fst.AddArc(src, openfst.StdArc(wsym, wsym, -mg.log_prob, dest))
#print "Added %d-gram arc %d => %d %s/%.4f" % (m+1, src, dest,
#mg.words[m], -mg.log_prob)
if newstate:
# Add a new state to the mapping if needed
sidtab[tuple(mg.words)] = dest
#print "Entered state ID mapping", tuple(mg.words), "=>", dest
if not final:
# Create a backoff arc to the suffix M-1-gram
# Note taht if mg.log_bowt == 0 it's particularly important to do this!
if m == 0:
bo_state = 0 # backoff state
elif tuple(mg.words[1:]) in sidtab:
bo_state = sidtab[tuple(mg.words[1:])]
else:
continue # Not a 1-gram, no suffix M-gram
fst.AddArc(dest, openfst.StdArc(bo_label, bo_label, -mg.log_bowt, bo_state))
#print "Adding backoff arc %d => %d %.4f" % (dest, bo_state, -mg.log_bowt)
def add_ngram_arcs(fst, symtab, lm, n, sidtab):
"""
Add states and arcs for all N-grams in the language model, where
N=N (the order of the model, that is).
"""
for ng in lm.mgrams(n-1):
wsym = symtab.Find(ng.words[n-1])
if wsym == -1: # OOV
continue
if ng.words[n-1] == '<s>': # non-event
continue
if '</s>' in ng.words[0:n-1]:
continue
for w in ng.words[:n-1]: # skip OOVs
if symtab.Find(w) == -1:
#print w, "not found"
continue
src = sidtab[tuple(ng.words[:n-1])]
# Find longest suffix N-gram that exists
spos = 1
while tuple(ng.words[spos:]) not in sidtab:
spos += 1
if spos == n:
raise RuntimeError, "Unable to find suffix N-gram for", ng.wids
dest = sidtab[tuple(ng.words[spos:])]
fst.AddArc(src, openfst.StdArc(wsym, wsym, -ng.log_prob, dest))
#print "Adding %d-gram arc %d => %d %s/%.4f" % (n, src, dest, ng.words[n-1], -ng.log_prob)
def build_lmfst(lm, use_phi=False):
"""
Build an FST recognizer from an N-gram backoff language model.
"""
fst = openfst.StdVectorFst()
symtab = openfst.SymbolTable("words")
epsilon = symtab.AddSymbol("ε")
if use_phi:
phi = symtab.AddSymbol("φ")
bo_label = phi
else:
bo_label = epsilon
for ug in lm.mgrams(0):
wsym = symtab.AddSymbol(ug.words[0])
fst.SetInputSymbols(symtab)
fst.SetOutputSymbols(symtab)
# The algorithm goes like this:
#
# Create a backoff state
# For M in 1 to N-1:
# For each M-gram w(1,M):
# Create a state q(1,M)
# Create an arc from state q(1,M-1) to q(1,M) with weight P(w(1,M))
# Create an arc from state q(1,M) to q(2,M) with weight bowt(w(1,M-1))
# For each N-gram w(1,N):
# Create an arc from state q(1,N-1) to q(2,N) with weight P(w(1,N))
# Table holding M-gram to state mappings
sidtab = {}
fst.AddState() # guaranteed to be zero (we hope)
for m in range(lm.get_size() - 1):
add_mgram_states(fst, symtab, lm, m, sidtab, bo_label)
add_ngram_arcs(fst, symtab, lm, lm.get_size(), sidtab)
# Connect and arc-sort the resulting FST
openfst.Connect(fst)
openfst.ArcSortInput(fst)
return fst
class SphinxProbdef(object):
"""
Probability definition file used for Sphinx class language models.
"""
def __init__(self, infile=None):
self.classes = {}
if infile != None:
self.read(infile)
def read(self, infile):
"""
Read probability definition from a file.
"""
if not isinstance(infile, file):
infile = file(infile)
inclass = None
for spam in infile:
spam = spam.strip()
if spam.startswith('#') or spam.startswith(';'):
continue
if spam == "":
continue
if inclass:
parts = spam.split()
if len(parts) == 2 \
and parts[0] == "END" and parts[1] == classname:
inclass = None
else:
prob = 1.0
if len(parts) > 1:
prob = float(parts[1])
self.add_class_word(inclass, parts[0], prob)
else:
if spam.startswith('LMCLASS'):
foo, classname = spam.split()
self.add_class(classname)
inclass = classname
def add_class(self, name):
"""
Add a class to this probability definition.
"""
self.classes[name] = {}
def add_class_word(self, name, word, prob):
"""
Add a word to a class in this probability definition.
"""
self.classes[name][word] = prob
def write(self, outfile):
"""
Write out probability definition to a file.
"""
if not isinstance(outfile, file):
outfile = file(outfile)
for c in self.classes:
outfile.write("LMCLASS %s\n" % c)
for word, prob in self.classes[c]:
outfile.write("%s %g\n" % (word, prob))
outfile.write("END %s\n" % c)
outfile.write("\n")
def normalize(self):
"""
Normalize probabilities.
"""
for c in self.classes:
t = sum(self.classes[c].itervalues())
if t != 0:
for w in self.classes[c]:
self.classes[c][w] /= t
def build_classfst(probdef, isyms=None):
"""
Build an FST from the classes in a Sphinx probability definition
file. This transducer maps words to classes, and can either be
composed with the input, or pre-composed with the language model.
In the latter case you can project the resulting transducer to its
input to obtain an equivalent non-class-based model.
"""
if not isinstance(probdef, SphinxProbdef):
probdef = SphinxProbdef(probdef)
fst = openfst.StdVectorFst()
if isyms:
symtab = isyms
else:
symtab = openfst.SymbolTable("words")
symtab.AddSymbol("ε")
st = fst.AddState()
fst.SetStart(st)
fst.SetFinal(st, 0)
for word, label in symtab:
if label == openfst.epsilon:
continue
fst.AddArc(st, label, label, 0, st)
for c in probdef.classes:
clabel = symtab.AddSymbol(c)
for word, prob in probdef.classes[c].iteritems():
wlabel = symtab.AddSymbol(word)
fst.AddArc(st, wlabel, clabel, -math.log(prob), st)
fst.SetOutputSymbols(symtab)
fst.SetInputSymbols(symtab)
return fst
def build_class_lmfst(lm, probdef, use_phi=False):
"""
Build an FST from a class-based language model. By default this
returns the lazy composition of the class definition transducer
and the language model. To obtain the full language model, create
a VectorFst from it and project it to its input.
"""
lmfst = build_lmfst(lm, use_phi)
classfst = build_classfst(probdef, lmfst.InputSymbols())
openfst.ArcSortInput(lmfst)
openfst.ArcSortInput(classfst)
return openfst.StdComposeFst(classfst, lmfst)
def build_dictfst(lmfst):
"""
Build a character-to-word FST based on the symbol table of lmfst.
"""
insym = openfst.SymbolTable("letters")
insym.AddSymbol("ε")
outsym = lmfst.InputSymbols()
fst = openfst.StdVectorFst()
start = fst.AddState()
fst.SetStart(start)
final = fst.AddState()
fst.SetFinal(final, 0)
for w, wsym in outsym:
if wsym == 0: continue
# Use a single symbol for end-of-sentence
if w == '</s>':
w = [w,]
for c in w:
csym = insym.AddSymbol(c)
for w, wsym in outsym:
if wsym == 0: continue
wsym = outsym.Find(w)
# Add an epsilon:word arc to the first state of this word
prev = fst.AddState()
fst.AddArc(start, openfst.StdArc(0, wsym, 0, prev))
# Use a single symbol for end-of-sentence
if w == '</s>':
w = [w,]
for c in w:
csym = insym.Find(c)
next = fst.AddState()
fst.AddArc(prev, openfst.StdArc(csym, 0, 0, next))
prev = next
# And an epsilon arc to the final state
fst.AddArc(prev, openfst.StdArc(0, 0, 0, final))
fst.SetInputSymbols(insym)
fst.SetOutputSymbols(outsym)
return fst
def fst2pdf(fst, outfile, acceptor=False):
"""
Draw an FST as a PDF using fstdraw and dot.
"""
tempdir = tempfile.mkdtemp()
fstfile = os.path.join(tempdir, "output.fst")
fst.Write(fstfile)
if acceptor:
acceptor = "--acceptor"
else:
acceptor = ""
rv = os.system("fstdraw %s '%s' | dot -Tpdf > '%s'"
% (acceptor, fstfile, outfile))
os.unlink(fstfile)
os.rmdir(tempdir)
return rv
def sent2fst(txt, fstclass=openfst.StdVectorFst, isyms=None, omitstart=True):
"""
Convert a list of words, or a string of whitespace-separated
tokens, to a sentence FST.
"""
fst = fstclass()
start = fst.AddState()
fst.SetStart(start)
if isyms:
symtab = isyms
else:
symtab = openfst.SymbolTable("words")
symtab.AddSymbol("ε")
prev = start
if isinstance(txt, str):
txt = txt.split()
for c in txt:
if omitstart and c == '<s>':
continue
nxt = fst.AddState()
if isyms:
sym = isyms.Find(c)
if sym == -1:
#print "Warning, unknown word", c
continue
else:
sym = symtab.AddSymbol(c)
#print prev, sym, nxt
fst.AddArc(prev, sym, sym, 0, nxt)
prev = nxt
fst.SetFinal(nxt, 0)
fst.SetInputSymbols(symtab)
fst.SetOutputSymbols(symtab)
return fst
def str2fst(txt, fstclass=openfst.StdVectorFst):
"""
Convert a text string to an FST.
"""
fst = fstclass()
start = fst.AddState()
fst.SetStart(start)
symtab = openfst.SymbolTable("chars")
symtab.AddSymbol("ε")
prev = start
for c in txt:
nxt = fst.AddState()
sym = symtab.AddSymbol(c)
fst.AddArc(prev, sym, sym, 0, nxt)
prev = nxt
fst.SetFinal(nxt, 0)
fst.SetInputSymbols(symtab)
fst.SetOutputSymbols(symtab)
return fst
def strset2fst(strs, fstclass=openfst.StdVectorFst):
"""
Build a dictionary lookup FST for a set of strings.
"""
fst = fstclass()
isyms = openfst.SymbolTable("chars")
osyms = openfst.SymbolTable("words")
isyms.AddSymbol("ε")
osyms.AddSymbol("ε")
start = fst.AddState()
fst.SetStart(start)
for s in strs:
prev = start
for c in s:
nxt = fst.AddState()
isym = isyms.AddSymbol(c)
fst.AddArc(prev, isym, 0, 0, nxt)
prev = nxt
nxt = fst.AddState()
osym = osyms.AddSymbol(s)
fst.AddArc(prev, 0, osym, 0, nxt)
fst.SetFinal(nxt, 0)
dfst = fstclass()
openfst.Determinize(fst, dfst)
openfst.RmEpsilon(dfst)
dfst.SetInputSymbols(isyms)
dfst.SetOutputSymbols(osyms)
return dfst
import math
def lmfst_eval(lmfst, sent):
sentfst = sent2fst(sent, openfst.StdVectorFst, lmfst.InputSymbols())
phi = lmfst.InputSymbols().Find("φ")
if phi != -1:
opts = openfst.StdPhiComposeOptions()
opts.matcher1 = openfst.StdPhiMatcher(sentfst, openfst.MATCH_NONE)
opts.matcher2 = openfst.StdPhiMatcher(lmfst, openfst.MATCH_INPUT, phi)
c = openfst.StdComposeFst(sentfst, lmfst, opts)
else:
c = openfst.StdComposeFst(sentfst, lmfst)
o = openfst.StdVectorFst()
openfst.ShortestPath(c, o, 1)
st = o.Start()
ll = 0
while st != -1 and o.NumArcs(st):
a = o.GetArc(st, 0)
# print o.InputSymbols().Find(a.ilabel), \
# o.OutputSymbols().Find(a.olabel), \
# -a.weight.Value() / math.log(10)
ll -= a.weight.Value()
st = a.nextstate
return ll
def lm_eval(lm, sent):
sent = [x for x in sent.split() if not x.startswith('++')]
ll = 0
for i in xrange(len(sent)):
if sent[i] == '<s>':
continue
prob = lm.prob(sent[i::-1])
#print sent[i::-1], prob / math.log(10), bo
ll += prob
return ll
if __name__ == '__main__':
lmf, fstf = sys.argv[1:]
lm = sphinxbase.NGramModel(lmf)
fst = build_lmfst(lm)
fst.Write(fstf)
| 2.125 | 2 |
python/lab/ref.py | tao12345666333/Talk-Is-Cheap | 4 | 12798330 | #!/usr/bin/env python
# coding=utf-8
def add(a, b):
return a + b
c = add(288, 500)
| 2.390625 | 2 |
tests/testconfig.py | neurosis69/chia-blockchain | 0 | 12798331 | <gh_stars>0
from __future__ import annotations
from typing import List, Union
from typing_extensions import Literal
Oses = Literal["macos", "ubuntu", "windows"]
# Github actions template config.
oses: List[Oses] = ["macos", "ubuntu", "windows"]
# Defaults are conservative.
parallel: Union[bool, int, Literal["auto"]] = False
checkout_blocks_and_plots = False
install_timelord = False
check_resource_usage = False
job_timeout = 30
custom_vars: List[str] = []
os_skip: List[Oses] = []
| 1.8125 | 2 |
example/rcnn/rcnn/metric.py | Liuxg16/BrainMatrix | 4 | 12798332 | import mxnet as mx
import numpy as np
from rcnn.config import config
class LogLossMetric(mx.metric.EvalMetric):
def __init__(self):
super(LogLossMetric, self).__init__('LogLoss')
def update(self, labels, preds):
pred_cls = preds[0].asnumpy()
label = labels[0].asnumpy().astype('int32')
cls = pred_cls[np.arange(label.shape[0]), label]
cls += config.EPS
cls_loss = -1 * np.log(cls)
cls_loss = np.sum(cls_loss)
self.sum_metric += cls_loss
self.num_inst += label.shape[0]
class SmoothL1LossMetric(mx.metric.EvalMetric):
def __init__(self):
super(SmoothL1LossMetric, self).__init__('SmoothL1Loss')
def update(self, labels, preds):
bbox_loss = preds[0].asnumpy()
label = labels[0].asnumpy()
bbox_loss = np.sum(bbox_loss)
self.sum_metric += bbox_loss
self.num_inst += label.shape[0]
| 2.328125 | 2 |
mblogger/soa_app_ml/soa_app_ml.py | mlatcl/fbp-vs-oop | 6 | 12798333 | <reponame>mlatcl/fbp-vs-oop
import requests
from mblogger.record_types import *
base_url = 'http://127.0.0.1:5000/'
class App():
def evaluate(self):
followers = self._get_followers()
followings = self._get_followings()
timelines = self._get_timelines()
generated_posts = self._get_generated_posts()
return self.get_outputs(followers, followings, timelines, generated_posts)
# Client to get list of followers
def _get_followers(self):
url = base_url + 'author-request/list_followers'
response = requests.post(url, json={})
followers = response.json()
return followers
# Client to get list of followings
def _get_followings(self):
url = base_url + 'author-request/list_followings'
response = requests.post(url, json={})
followings = response.json()
return followings
# Client to get list of followings
def _get_timelines(self):
url = base_url + 'post-request/get_timelines'
response = requests.post(url, json={})
followings = response.json()
return followings
# Client to get list of generated posts
def _get_generated_posts(self):
url = base_url + 'post-request/get_generated_posts'
response = requests.post(url, json={})
generated_posts = response.json()
return generated_posts
def add_data(self, followings, followers, follow_requests, posts, input_record):
self._add_follow_requests(follow_requests)
self._add_posts(posts)
self._add_generated_post(input_record)
# Client to add follows data
def _add_follow_requests(self, follow_requests):
if len(follow_requests) > 0:
follows = []
for follow in follow_requests:
f = follow.to_dict()
follows.append(f)
url = base_url + 'author-request/follows'
response = requests.post(url, json=follows)
# print(response.json())
# Client to add follows data
def _add_posts(self, posts):
if len(posts) > 0:
ps = []
for post in posts:
p = post.to_dict()
p['timestamp'] = str(post.timestamp)
ps.append(p)
url = base_url + 'post-request/create_posts'
response = requests.post(url, json=ps)
# print(response.json())
# Client to add a generated post
def _add_generated_post(self, input_record):
for ir in input_record:
req = {}
req['user_id'] = ir.user_id
req['length'] = ir.length
url = base_url + 'post-request/generate_post'
response = requests.post(url, json=req)
# Parsing data for main program
def get_outputs(self, followers, followings, timelines, generated_posts):
followers = self._parse_followers(followers)
followings = self._parse_followings(followings)
timelines = self._parse_timelines(timelines)
generated_posts = self._parse_generated_posts(generated_posts)
return followers, followings, timelines, generated_posts
# Parses followers
def _parse_followers(self, followers):
fs = []
for follower in followers:
fls = follower['followers']
f = FollowersRecord.from_dict(follower)
f.followers = fls
fs.append(f)
return fs
# Parses followings
def _parse_followings(self, followings):
fs = []
for following in followings:
fls = following['followings']
f = FollowingsRecord.from_dict(following)
f.followings = fls
fs.append(f)
return fs
# Parses timelines
def _parse_timelines(self, timelines):
ts = []
for timeline in timelines:
posts = timeline['posts']
for post in posts:
post['timestamp'] = datetime.strptime(post['timestamp'], '%Y-%m-%d %H:%M:%S.%f')
timeline['posts'] = posts
t = Timeline.from_dict(timeline)
ts.append(t)
return ts
# Parses generated posts
def _parse_generated_posts(self, generated_posts):
ps = []
for post in generated_posts:
post['timestamp'] = datetime.strptime(post['timestamp'], '%Y-%m-%d %H:%M:%S.%f')
p = Post.from_dict(post)
ps.append(p)
return ps
if __name__ == "__main__":
app = App()
| 2.71875 | 3 |
conv/inc/data_class.py | Kdy0115/simulation | 0 | 12798334 | import pandas as pd
import os
class DataFile:
# 1日分のデータが格納された辞書
data_files = {}
def __init__(self,df,filepath,outpath,kind):
# 1ファイルの内容
self.df = df
# 入力ファイルパス
self.filename = filepath
# 出力先パス
self.output_dir = outpath
# データの種類
self.data_kind = kind
def edit_columns(self,column,start):
rem_list = []
for i in range(column-2):
rem_list.append(i)
for j in range(i+2,start-2):
rem_list.append(j)
edit_df = self.df.drop(self.df.index[rem_list])
edit_df.columns = edit_df.iloc[0].values
self.df = edit_df.drop(edit_df.index[0])
def conversion_time_column(self,time_name):
df = self.df.rename(columns={time_name: '時間'}) # 信号名称カラムが時間なので名前を変換
df['時間'] = pd.to_datetime(df['時間']) # 新しく時間列として定義
df = df.set_index('時間') # 時間列をインデックスとして定義
start_time = df.index[0] # 開始時間
end_time = df.index[-1] # 終了時間
day_gap = (end_time - start_time).days # 開始~終了までの日数を取得
df = pd.concat( # 最終整形データの定義
[
df.loc[str(start_time.year)+"-"+str(start_time.month)+"-"+str(start_time.day):str(end_time.year)+"-"+str(end_time.month)+"-"+str(end_time.day)].between_time('0:00','23:59',include_end=True)
]
)
print(df.index,start_time,end_time)
self.df = df
def select_input_data(self,floor):
print(self.df.columns)
df_C5F = self.df.loc[:,self.df.columns.str.contains('{}|信号名称|外気温'.format(floor))] # 5Fのカラムのみ抽出
df_bems = df_C5F.loc[:,df_C5F.columns.str.contains('吸込温度|設定温度|_運転|省エネレベル|運転モード|信号名称|風速|外気温')] # 5Fの中の特定のカラムのみ抽出
df_bems = df_bems.loc[:,df_bems.columns.str.contains('中|南|東|信号名称|省エネレベル|外気温')] # 抽出した中でもさらに絞り込み
self.df = df_bems
self.conversion_time_column('信号名称')
def control_mode_edit(self):
air_con_area = ['C5F 事務室中ペリ PACG_','C5F 事務室中 PACG_','C5F 事務室南ペリ PACG_','C5F 事務室南 PACG_','C5F 事務室東南 PAC_'] # 全てのカラムに含まれる接頭辞
for one in air_con_area:
self.df.loc[self.df[one+'運転']==0,one+'運転モード'] = 0 # 運連状態が0なら電源OFF(0)
self.df.loc[(self.df[one+'運転']==1) & ((self.df['C館 5F G50_省エネレベル'] == 2) | (self.df['C館 5F G50_省エネレベル'] == 3) | (self.df[one+'運転モード'] == 3)),one+'運転モード'] = 3 # 運転状態が1で省エネレベルが2,3または運転モードが3なら送風(3)
if self.df.index[0].month == 8: # 8月の場合
self.df.loc[(self.df[one+'運転']==1) & (self.df['C館 5F G50_省エネレベル'] == 1),one+'運転モード'] = 1 # 運転状態が1で省エネレベルが1の場合は冷房(1)
else: # 8月以外のとき
self.df.loc[(self.df[one+'運転']==1) & ((self.df['C館 5F G50_省エネレベル'] == 1) & (self.df[one+'運転モード'] == 2)),one+'運転モード'] = 2 # 運転状態が1で省エネレベルが1で運転モードが2のとき暖房(2)
if (one == 'C5F 事務室中 PACG_') or (one == 'C5F 事務室南 PACG_'): # 冬季のインペリ側
self.df.loc[(self.df[one+'運転']==1) & (self.df[one+'運転モード'] == 2),one+'吸込温度'] += 4 # インペリ側で運転ONかつ暖房のときは+4℃アップ制御
def convesion_airconditioning_data(self):
for column,data in self.df.iteritems():
if 'C5F 事務室南 PACG_' in column:
self.df.insert(self.df.columns.get_loc(column)+1,column+'_2',data)
self.df.insert(self.df.columns.get_loc(column)+2,column+'_3',data)
elif 'C5F 事務室東南 PAC_' not in column and 'B館 RF 外気温度' not in column:
self.df.insert(self.df.columns.get_loc(column)+1,column+'_2',data)
self.df.rename(columns={
"B館 RF 外気温度":"外気温",
"C5F 事務室中ペリ PACG_吸込温度":"吸込温度0",
"C5F 事務室中ペリ PACG_設定温度":"設定温度0",
"C5F 事務室中ペリ PACG_運転モード":"運転モード0",
"C5F 事務室中ペリ PACG_風速":"風速0",
"C5F 事務室中ペリ PACG_吸込温度_2":"吸込温度1",
"C5F 事務室中ペリ PACG_設定温度_2":"設定温度1",
"C5F 事務室中ペリ PACG_運転モード_2":"運転モード1",
"C5F 事務室中ペリ PACG_風速_2":"風速1",
"C5F 事務室中 PACG_吸込温度":"吸込温度2",
"C5F 事務室中 PACG_設定温度":"設定温度2",
"C5F 事務室中 PACG_運転モード":"運転モード2",
"C5F 事務室中 PACG_風速":"風速2",
"C5F 事務室中 PACG_吸込温度_2":"吸込温度3",
"C5F 事務室中 PACG_設定温度_2":"設定温度3",
"C5F 事務室中 PACG_運転モード_2":"運転モード3",
"C5F 事務室中 PACG_風速_2":"風速3",
"C5F 事務室南ペリ PACG_吸込温度":"吸込温度4",
"C5F 事務室南ペリ PACG_設定温度":"設定温度4",
"C5F 事務室南ペリ PACG_運転モード":"運転モード4",
"C5F 事務室南ペリ PACG_風速":"風速4",
"C5F 事務室南ペリ PACG_吸込温度_2":"吸込温度5",
"C5F 事務室南ペリ PACG_設定温度_2":"設定温度5",
"C5F 事務室南ペリ PACG_運転モード_2":"運転モード5",
"C5F 事務室南ペリ PACG_風速_2":"風速5",
"C5F 事務室南 PACG_吸込温度":"吸込温度6",
"C5F 事務室南 PACG_設定温度":"設定温度6",
"C5F 事務室南 PACG_運転モード":"運転モード6",
"C5F 事務室南 PACG_風速":"風速6",
"C5F 事務室南 PACG_吸込温度_2":"吸込温度7",
"C5F 事務室南 PACG_設定温度_2":"設定温度7",
"C5F 事務室南 PACG_運転モード_2":"運転モード7",
"C5F 事務室南 PACG_風速_2":"風速7",
"C5F 事務室南 PACG_吸込温度_3":"吸込温度8",
"C5F 事務室南 PACG_設定温度_3":"設定温度8",
"C5F 事務室南 PACG_運転モード_3":"運転モード8",
"C5F 事務室南 PACG_風速_3":"風速8",
"C5F 事務室東南 PAC_吸込温度":"吸込温度9",
"C5F 事務室東南 PAC_設定温度":"設定温度9",
"C5F 事務室東南 PAC_運転モード":"運転モード9",
"C5F 事務室東南 PAC_風速":"風速9"
}, inplace=True)
self.df = self.df.reindex(columns=[
"吸込温度0",
"設定温度0",
"運転モード0",
"風速0",
"吸込温度1",
"設定温度1",
"運転モード1",
"風速1",
"吸込温度2",
"設定温度2",
"運転モード2",
"風速2",
"吸込温度3",
"設定温度3",
"運転モード3",
"風速3",
"吸込温度4",
"設定温度4",
"運転モード4",
"風速4",
"吸込温度5",
"設定温度5",
"運転モード5",
"風速5",
"吸込温度6",
"設定温度6",
"運転モード6",
"風速6",
"吸込温度7",
"設定温度7",
"運転モード7",
"風速7",
"吸込温度8",
"設定温度8",
"運転モード8",
"風速8",
"吸込温度9",
"設定温度9",
"運転モード9",
"風速9",
"外気温",
])
def create_result_folder(self,month,day):
print("Creating a data output destination directory.........")
print("-------------------------------------------------------")
if month < 10:
month = "0" + str(month)
if day < 10:
day = "0" + str(day)
folder_name = "{0}-{1}".format(month,day)
output = "{0}\\{1}".format(self.output_dir,folder_name)
try:
os.makedirs(output)
print('Created folder' + output)
except FileExistsError:
pass
print("-------------------------------------------------------")
return output
def create_conversion_file(self,output,data,index):
def conversion_index(df):
df.index = list(map(lambda x:x[11:16],df.index.astype(str)))
df.index.name = '時間'
return df
print(output)
for key,value in data.items():
if key == "init_bems" and index == False:
result = value
result['時間'] = list(map(lambda x:x[11:16],result['時間'].astype(str)))
result.iloc[-1] = "EOF"
result.to_csv(output+"\\{}.csv".format(key),encoding='shift-jis',mode='w',index=index)
else:
result = conversion_index(value)
result.to_csv(output+"\\{}.csv".format(key),encoding='shift-jis',mode='w')
print("作成フォルダ:{}\nBEMSデータ整形完了しました".format(output))
print("Outputing formatted input data...")
print("-------------------------------------------------------")
def create_no_operation_conversion_data(self):
def formatted_no_operation_init_bems(df):
init_bems_list_time = []
for i in range(1,len(df)):
if i == 1:
pre_time = df.index[0]
curr_time = df.index[1]
init_bems_list_time.append(0)
else:
curr_time = df.index[i]
date_gap = (curr_time - pre_time).days
time_gap = (curr_time - pre_time).seconds
if date_gap != 0 or time_gap != 60:
init_bems_list_time.append(i)
pre_time = curr_time
df_re_index = df.reset_index()
df_re_index = df_re_index.loc[init_bems_list_time]
df_re_index.loc[-1] = "EOF"
# time_array = df_re_index['時間']
# time_array.append("EOF")
# df_re_index = df_re_index.drop('時間',axis=1)
# df_re_index.index = time_array
print(df_re_index)
return df_re_index
df_bems_control_list = []
self.df = self.df.loc[:,~self.df.columns.str.contains('ロスナイ|省エネ')]
self.convesion_airconditioning_data()
self.df = self.df[
(self.df['運転モード0'] == 0) &
(self.df['運転モード1'] == 0) &
(self.df['運転モード2'] == 0) &
(self.df['運転モード3'] == 0) &
(self.df['運転モード4'] == 0) &
(self.df['運転モード5'] == 0) &
(self.df['運転モード6'] == 0) &
(self.df['運転モード7'] == 0) &
(self.df['運転モード8'] == 0) &
(self.df['運転モード9'] == 0)
]
for month in range(1,13):
for day in range(1,32):
one_day_df = self.df[(self.df.index.month == month) & (self.df.index.day == day)]
# 含まれている時間がある時だけ処理
if len(one_day_df) > 1:
output_path_folder = self.create_result_folder(month,day)
df_bems_control = one_day_df.loc[:,one_day_df.columns.str.contains('設定温度|運転モード|風速')] # 制御ファイル
df_bems_init = one_day_df.loc[:,one_day_df.columns.str.contains('吸込温度|外気温')] # 初期ファイル
df_bems_init = formatted_no_operation_init_bems(df_bems_init) # 連続しない時間帯は初期値を先頭に設定する
df_bems_eval = one_day_df.loc[:,one_day_df.columns.str.contains('吸込温度')] # 評価用ファイル
result_data = {
'no_operation':df_bems_control,
'init_bems':df_bems_init,
'evaluation':df_bems_eval
}
self.create_conversion_file(output_path_folder,result_data,False)
def create_conversion_data(self):
df_bems_control_list = []
self.df = self.df.loc[:,~self.df.columns.str.contains('ロスナイ|省エネ')]
self.convesion_airconditioning_data()
for month in range(1,13):
for day in range(1,32):
one_day_df = self.df[(self.df.index.month == month) & (self.df.index.day == day)]
# 含まれている時間がある時だけ処理
if len(one_day_df) > 1:
output_path_folder = self.create_result_folder(month,day)
df_bems_control = one_day_df.loc[:,one_day_df.columns.str.contains('設定温度|運転モード|風速')] # 制御ファイル
df_bems_init = one_day_df.loc[:,one_day_df.columns.str.contains('吸込温度|外気温')] # 初期ファイル
df_bems_init = df_bems_init[df_bems_init.index.astype(str).str.contains(':00:00|:30:00')]
df_bems_eval = one_day_df.loc[:,one_day_df.columns.str.contains('吸込温度')] # 評価用ファイル
result_data = {
'control':df_bems_control,
'init_bems':df_bems_init,
'evaluation':df_bems_eval
}
self.create_conversion_file(output_path_folder,result_data,True)
class MeasureDataFile(DataFile):
def remove_null_data(self):
self.df = self.df.drop('@date()',axis=1).dropna(how='all')
def conversion_columns_name(self):
columns_list = []
for column in self.df.columns:
columns_list.append('温度取り_' + column)
self.df.columns = columns_list
def create_conversion_data(self):
print("Outputing formatted input data...")
print("-------------------------------------------------------")
df_bems_control_list = []
for month in range(1,13):
for day in range(1,32):
one_day_df = self.df[(self.df.index.month == month) & (self.df.index.day == day)]
# 含まれている時間がある時だけ処理
if len(one_day_df) > 1:
output_path_folder = self.create_result_folder(month,day)
result_data = {
'measure':one_day_df
}
self.create_conversion_file(output_path_folder,one_day_df,True) | 3.109375 | 3 |
molecules/ml/unsupervised/vae/vae.py | hengma1001/molecules | 0 | 12798335 | <reponame>hengma1001/molecules<filename>molecules/ml/unsupervised/vae/vae.py
import torch
from torch import nn
from torch.nn import functional as F
from .resnet import ResnetVAEHyperparams
from .symmetric import SymmetricVAEHyperparams
from molecules.ml.hyperparams import OptimizerHyperparams, get_optimizer
__all__ = ['VAE']
class VAEModel(nn.Module):
def __init__(self, input_shape, hparams):
super(VAEModel, self).__init__()
# Select encoder/decoder models by the type of the hparams
if isinstance(hparams, SymmetricVAEHyperparams):
from .symmetric import SymmetricEncoderConv2d, SymmetricDecoderConv2d
self.encoder = SymmetricEncoderConv2d(input_shape, hparams)
self.decoder = SymmetricDecoderConv2d(input_shape, hparams, self.encoder.encoder_dim)
elif isinstance(hparams, ResnetVAEHyperparams):
from .resnet import ResnetEncoder, ResnetDecoder
self.encoder = ResnetEncoder(input_shape, hparams)
self.decoder = ResnetDecoder(input_shape, hparams)
else:
raise TypeError(f'Invalid hparams type: {type(hparams)}.')
def reparameterize(self, mu, logvar):
std = torch.exp(0.5*logvar)
eps = torch.randn_like(std)
return mu + eps*std
def forward(self, x):
mu, logvar = self.encoder(x)
x = self.reparameterize(mu, logvar)
x = self.decoder(x)
# TODO: see if we can remove this to speed things up
# or find an inplace way. Only necessary for bad
# hyperparam config such as optimizer learning rate
# being large.
#x = torch.where(torch.isnan(x), torch.zeros_like(x), x)
return x, mu, logvar
def encode(self, x):
# mu layer
return self.encoder.encode(x)
def decode(self, embedding):
return self.decoder.decode(embedding)
def save_weights(self, enc_path, dec_path):
self.encoder.save_weights(enc_path)
self.decoder.save_weights(dec_path)
def load_weights(self, enc_path, dec_path):
self.encoder.load_weights(enc_path)
self.decoder.load_weights(dec_path)
def vae_loss(recon_x, x, mu, logvar):
"""
Effects
-------
Reconstruction + KL divergence losses summed over all elements and batch
See Appendix B from VAE paper:
Kingma and Welling. Auto-Encoding Variational Bayes. ICLR, 2014
https://arxiv.org/abs/1312.6114
"""
BCE = F.binary_cross_entropy(recon_x, x, reduction='sum')
# 0.5 * sum(1 + log(sigma^2) - mu^2 - sigma^2)
KLD = -0.5 * torch.sum(1 + logvar - mu.pow(2) - logvar.exp())
return BCE + KLD
# TODO: set weight initialization hparams
class VAE:
"""
Provides high level interface for training, testing and saving VAE
models. Takes arbitrary encoder/decoder models specified by the choice
of hyperparameters. Assumes the shape of the data is square.
Attributes
----------
input_shape : tuple
shape of incomming data.
model : torch.nn.Module (VAEModel)
Underlying Pytorch model with encoder/decoder attributes.
optimizer : torch.optim.Optimizer
Pytorch optimizer used to train model.
loss_func : function
Loss function used to train model.
Methods
-------
train(train_loader, valid_loader, epochs=1, checkpoint='', callbacks=[])
Train model
encode(x)
Embed data into the latent space.
decode(embedding)
Generate matrices from embeddings.
save_weights(enc_path, dec_path)
Save encoder/decoder weights.
load_weights(enc_path, dec_path)
Load saved encoder/decoder weights.
"""
def __init__(self, input_shape,
hparams=SymmetricVAEHyperparams(),
optimizer_hparams=OptimizerHyperparams(),
loss=None,
cuda=True,
verbose=True):
"""
Parameters
----------
input_shape : tuple
shape of incomming data.
Note: For use with SymmetricVAE use (1, num_residues, num_residues)
For use with ResnetVAE use (num_residues, num_residues)
hparams : molecules.ml.hyperparams.Hyperparams
Defines the model architecture hyperparameters. Currently implemented
are SymmetricVAEHyperparams and ResnetVAEHyperparams.
optimizer_hparams : molecules.ml.hyperparams.OptimizerHyperparams
Defines the optimizer type and corresponding hyperparameters.
loss: : function, optional
Defines an optional loss function with inputs (recon_x, x, mu, logvar)
and ouput torch loss.
cuda : bool
True specifies to use cuda if it is available. False uses cpu.
verbose : bool
True prints training and validation loss to stdout.
"""
hparams.validate()
optimizer_hparams.validate()
self.input_shape = input_shape
self.verbose = verbose
# TODO: consider passing in device (this will allow the ability to set the train/test
# data to cuda as well, since device will be a variable in the user space)
self.device = torch.device('cuda' if cuda and torch.cuda.is_available() else 'cpu')
self.model = VAEModel(input_shape, hparams).to(self.device)
# TODO: consider making optimizer_hparams a member variable
# RMSprop with lr=0.001, alpha=0.9, epsilon=1e-08, decay=0.0
self.optimizer = get_optimizer(self.model, optimizer_hparams)
self.loss_fnc = vae_loss if loss is None else loss
def __repr__(self):
return str(self.model)
def train(self, train_loader, valid_loader, epochs=1, checkpoint='',
callbacks=[]):
"""
Train model
Parameters
----------
train_loader : torch.utils.data.dataloader.DataLoader
Contains training data
valid_loader : torch.utils.data.dataloader.DataLoader
Contains validation data
epochs : int
Number of epochs to train for
checkpoint : str
Path to checkpoint file to load and resume training
from the epoch when the checkpoint was saved.
callbacks : list
Contains molecules.utils.callback.Callback objects
which are called during training.
"""
if callbacks:
logs = {'model': self.model, 'optimizer': self.optimizer}
else:
logs = {}
start_epoch = 1
if checkpoint:
start_epoch += self._load_checkpoint(checkpoint)
for callback in callbacks:
callback.on_train_begin(logs)
for epoch in range(start_epoch, epochs + 1):
for callback in callbacks:
callback.on_epoch_begin(epoch, logs)
self._train(train_loader, epoch, callbacks, logs)
self._validate(valid_loader, callbacks, logs)
for callback in callbacks:
callback.on_epoch_end(epoch, logs)
for callback in callbacks:
callback.on_train_end(logs)
def _train(self, train_loader, epoch, callbacks, logs):
"""
Train for 1 epoch
Parameters
----------
train_loader : torch.utils.data.dataloader.DataLoader
Contains training data
epoch : int
Current epoch of training
callbacks : list
Contains molecules.utils.callback.Callback objects
which are called during training.
logs : dict
Filled with data for callbacks
"""
self.model.train()
train_loss = 0.
for batch_idx, data in enumerate(train_loader):
if callbacks:
pass # TODO: add more to logs
for callback in callbacks:
callback.on_batch_begin(batch_idx, epoch, logs)
data = data.to(self.device)
self.optimizer.zero_grad()
recon_batch, mu, logvar = self.model(data)
loss = self.loss_fnc(recon_batch, data, mu, logvar)
loss.backward()
train_loss += loss.item()
self.optimizer.step()
if callbacks:
logs['train_loss'] = loss.item() / len(data)
logs['global_step'] = (epoch - 1) * len(train_loader) + batch_idx
for callback in callbacks:
callback.on_batch_end(batch_idx, epoch, logs)
if self.verbose:
print('Train Epoch: {} [{}/{} ({:.0f}%)]\tLoss: {:.6f}'.format(
epoch, batch_idx * len(data), len(train_loader.dataset),
100. * batch_idx / len(train_loader),
loss.item() / len(data)))
train_loss /= len(train_loader.dataset)
if callbacks:
logs['train_loss'] = train_loss
logs['global_step'] = epoch
if self.verbose:
print('====> Epoch: {} Average loss: {:.4f}'.format(epoch, train_loss))
def _validate(self, valid_loader, callbacks, logs):
"""
Test model on validation set.
Parameters
----------
valid_loader : torch.utils.data.dataloader.DataLoader
Contains validation data
callbacks : list
Contains molecules.utils.callback.Callback objects
which are called during training.
logs : dict
Filled with data for callbacks
"""
self.model.eval()
valid_loss = 0
with torch.no_grad():
for data in valid_loader:
data = data.to(self.device)
recon_batch, mu, logvar = self.model(data)
valid_loss += self.loss_fnc(recon_batch, data, mu, logvar).item()
valid_loss /= len(valid_loader.dataset)
if callbacks:
logs['valid_loss'] = valid_loss
if self.verbose:
print('====> Validation loss: {:.4f}'.format(valid_loss))
def _load_checkpoint(self, path):
"""
Loads checkpoint file containing optimizer state and
encoder/decoder weights.
Parameters
----------
path : str
Path to checkpoint file
Returns
-------
Epoch of training corresponding to the saved checkpoint.
"""
cp = torch.load(path)
self.model.encoder.load_state_dict(cp['encoder_state_dict'])
self.model.decoder.load_state_dict(cp['decoder_state_dict'])
self.optimizer.load_state_dict(cp['optimizer_state_dict'])
return cp['epoch']
def encode(self, x):
"""
Embed data into the latent space.
Parameters
----------
x : torch.Tensor
Data to encode, could be a batch of data with dimension
(batch-size, input_shape)
Returns
-------
torch.Tensor of embeddings of shape (batch-size, latent_dim)
"""
return self.model.encode(x)
def decode(self, embedding):
"""
Generate matrices from embeddings.
Parameters
----------
embedding : torch.Tensor
Embedding data, could be a batch of data with dimension
(batch-size, latent_dim)
Returns
-------
torch.Tensor of generated matrices of shape (batch-size, input_shape)
"""
return self.model.decode(embedding)
def save_weights(self, enc_path, dec_path):
"""
Save encoder/decoder weights.
Parameters
----------
enc_path : str
Path to save the encoder weights.
dec_path : str
Path to save the decoder weights.
"""
self.model.save_weights(enc_path, dec_path)
def load_weights(self, enc_path, dec_path):
"""
Load saved encoder/decoder weights.
Parameters
----------
enc_path : str
Path to save the encoder weights.
dec_path : str
Path to save the decoder weights.
"""
self.model.load_weights(enc_path, dec_path)
| 2.109375 | 2 |
tests/test_all.py | avidale/compress-fasttext | 111 | 12798336 | import os
import gensim
import pytest
import compress_fasttext
from sklearn.pipeline import make_pipeline
from sklearn.linear_model import LogisticRegression
from compress_fasttext.feature_extraction import FastTextTransformer
BIG_MODEL_FILE = os.path.join(os.path.dirname(os.path.dirname(__file__)), 'data/test_data/ft_leipzig_ru_mini.bin')
BASE_MODEL_URL = 'https://github.com/avidale/compress-fasttext/releases/download/'
def cosine_sim(x, y):
return sum(x * y) / (sum(x**2) * sum(y**2)) ** 0.5
@pytest.mark.parametrize('method, params', [
(compress_fasttext.quantize_ft, dict(qdim=32)),
(compress_fasttext.prune_ft_freq, dict(pq=False, new_ngrams_size=10_000, new_vocab_size=10_000)),
(compress_fasttext.prune_ft_freq, dict(pq=True, new_ngrams_size=10_000, new_vocab_size=10_000, qdim=16)),
(compress_fasttext.prune_ft, dict(new_ngrams_size=10_000, new_vocab_size=10_000)),
(compress_fasttext.svd_ft, dict(n_components=32)),
])
def test_prune_save_load(method, params):
word1 = 'синий'
word2 = 'белый'
big_ft = gensim.models.fasttext.FastTextKeyedVectors.load(BIG_MODEL_FILE)
vec0 = big_ft[word1]
small_model = method(big_ft, **params)
assert cosine_sim(vec0, small_model[word1]) > 0.75
out1 = small_model.most_similar(word1)
assert word2 in {w for w, sim in out1}
small_model.save('tmp_small.bin')
small_model2 = compress_fasttext.models.CompressedFastTextKeyedVectors.load('tmp_small.bin')
assert cosine_sim(vec0, small_model2[word1]) > 0.75
out2 = small_model2.most_similar(word1)
assert word2 in {w for w, sim in out2}
assert out1[0][1] == pytest.approx(out2[0][1])
@pytest.mark.parametrize('word1, word2, model_name', [
('белый', 'черный', 'gensim-4-draft/geowac_tokens_sg_300_5_2020-100K-20K-100.bin'),
('white', 'black', 'gensim-4-draft/ft_cc.en.300_freqprune_50K_5K_pq_100.bin'),
('white', 'black', 'v0.0.4/cc.en.300.compressed.bin'),
])
def test_loading_existing_models(word1, word2, model_name):
ft = compress_fasttext.models.CompressedFastTextKeyedVectors.load(BASE_MODEL_URL + model_name)
out = ft.most_similar(word1)
assert word2 in {w for w, sim in out}
def test_sklearn_wrapper():
small_model = compress_fasttext.models.CompressedFastTextKeyedVectors.load(
'https://github.com/avidale/compress-fasttext/releases/download/v0.0.4/cc.en.300.compressed.bin'
)
classifier = make_pipeline(
FastTextTransformer(model=small_model),
LogisticRegression()
).fit(
['banana', 'soup', 'burger', 'car', 'tree', 'city'],
[1, 1, 1, 0, 0, 0]
)
assert (classifier.predict(['jet', 'train', 'cake', 'apple']) == [0, 0, 1, 1]).all()
| 2.375 | 2 |
URI/1-Beginner/1074.py | vicenteneto/online-judge-solutions | 0 | 12798337 | # -*- coding: utf-8 -*-
for i in range(int(raw_input())):
x = int(raw_input())
if x == 0:
print 'NULL'
elif x < 0:
if x % 2 == 0:
print 'EVEN NEGATIVE'
else:
print 'ODD NEGATIVE'
else:
if x % 2 == 0:
print 'EVEN POSITIVE'
else:
print 'ODD POSITIVE'
| 4.125 | 4 |
tests/parser_test.py | malanb5/py_requirements_installer | 0 | 12798338 | import unittest, yaml
from pyreqgen.ReqParser import *
class TestParser(unittest.TestCase):
def test_files(self):
with open("../config.yaml", 'r+') as config_f:
configs = yaml.load(config_f, Loader=yaml.FullLoader)
print(configs)
py_files = ReqParser.__get_py_files(configs)
reqs = ReqParser.__get_reqs(py_files)
with open("requirements.txt", 'w+') as req_f:
req_f.write('\n'.join(reqs))
with open("requirements.txt", "r") as req_f:
lines = req_f.readlines()
print(lines.sort())
exp_lines = ['IPython\n',
'altair\n',
'bayes_opt\n',
'catboost\n',
'category_encoders\n',
'collections\n',
'datetime\n',
'eli5\n',
'gc\n',
'itertools\n',
'joblib\n',
'json\n',
'lightgbm\n',
'matplotlib\n',
'networkx\n',
'numba\n',
'numpy\n',
'os\n',
'pandas\n',
're\n',
'seaborn\n',
'shap\n',
'sklearn\n',
'time\n',
'tqdm\n',
'typing\n',
'warnings\n',
'xgboost\n']
lines = set(map(lambda x: x.strip(), lines))
exp_lines = set(map(lambda x: x.strip(), exp_lines))
self.assertEqual(lines, exp_lines)
if __name__ == "__main__":
unittest.main() | 2.640625 | 3 |
xcparse/Xcode/xcscheme.py | samdmarshall/xcparser | 59 | 12798339 | import os
import sys
import xml.etree.ElementTree as xml
from ..Helpers import path_helper
from ..Helpers import xcrun_helper
from ..Helpers import logging_helper
from .XCSchemeActions.BuildAction import BuildAction
from .XCSchemeActions.TestAction import TestAction
from .XCSchemeActions.LaunchAction import LaunchAction
from .XCSchemeActions.ProfileAction import ProfileAction
from .XCSchemeActions.AnalyzeAction import AnalyzeAction
from .XCSchemeActions.ArchiveAction import ArchiveAction
def XCSchemeHasSharedSchemes(path):
return os.path.exists(os.path.join(path, 'xcshareddata'));
def XCSchemeHasUserSchemes(path):
return os.path.exists(os.path.join(path, 'xcuserdata'));
def XCSchemeGetSharedPath(path):
return os.path.join(path, 'xcshareddata/xcschemes');
def XCSchemeGetUserPath(path):
return os.path.join(path, 'xcuserdata/'+os.getlogin()+'.xcuserdatad/xcschemes/');
def XCSchemeParseDirectory(dir_path):
schemes = [];
if os.path.exists(dir_path) == True:
for scheme_file in os.listdir(dir_path):
scheme_file_path = os.path.join(dir_path, scheme_file);
if not scheme_file.startswith('.') and scheme_file_path.endswith('.xcscheme') and os.path.isfile(scheme_file_path):
scheme_xml = xcscheme(scheme_file_path);
if scheme_xml.isValid() == True:
schemes.append(scheme_xml);
else:
logging_helper.getLogger().warn('[xcscheme]: Invalid scheme file at path "%s"' % scheme_file_path);
else:
# skipping the known management file
if scheme_file != 'xcschememanagement.plist':
logging_helper.getLogger().warn('[xcscheme]: "%s" is not an xcscheme file!' % scheme_file_path);
else:
logging_helper.getLogger().warn('[xcscheme]: "%s" path does not exist!' % dir_path);
return schemes;
class xcscheme(object):
def __init__(self, path):
self.shared = False;
self.container = '';
self.path = path_helper(path, '');
self.name = os.path.basename(path).split('.xcscheme')[0];
self.contents = None;
try:
self.contents = xml.parse(self.path.obj_path);
except:
logging_helper.getLogger().error('[xcscheme]: Could not load contents of xcscheme file!');
def __repr__(self):
if self.isValid():
return '(%s : %s : %s)' % (type(self), self.name, self.path);
else:
return '(%s : INVALID OBJECT)' % (type(self));
def __attrs(self):
return (self.name, self.path);
def __eq__(self, other):
return isinstance(other, xcscheme) and self.name == other.name and self.path.root_path == other.path.root_path;
def __hash__(self):
return hash(self.__attrs());
def isValid(self):
return self.contents != None;
def actionLookup(self, action_name):
"""
This method returns the method for the passed action type, None otherwise.
"""
action_name = action_name.lower();
lookup = {
'build': self.buildAction,
'test': self.testAction,
'launch': self.launchAction,
'profile': self.profileAction,
'analyze': self.analyzeAction,
'archive': self.archiveAction
};
action = None;
if action_name in lookup.keys():
action = lookup[action_name];
return action;
def getAction(self, action_type):
"""
This method returns all the object for the passed action type, otherwise None.
"""
action = None;
if self.isValid():
action = filter(lambda action: action.tag == action_type, list(self.contents.getroot()))[0];
return action;
def buildAction(self, container):
"""
Returns the 'build' action for this scheme.
"""
action = None;
if self.isValid():
action = BuildAction(self.getAction('BuildAction'));
return action;
def testAction(self, container):
"""
Returns the 'test' action for this scheme.
"""
action = None;
if self.isValid():
action = TestAction(self.getAction('TestAction'));
action.root = BuildAction(self.getAction('BuildAction'))
return action;
def launchAction(self, container):
"""
Returns the 'launch' action for this scheme.
"""
action = None;
if self.isValid():
action = LaunchAction(self.getAction('LaunchAction'));
return action;
def profileAction(self, container):
"""
Returns the 'profile' action for this scheme.
"""
action = None;
if self.isValid():
action = ProfileAction(self.getAction('ProfileAction'));
return action;
def analyzeAction(self, container):
"""
Returns the 'analyze' action for this scheme.
"""
action = None;
if self.isValid():
action = AnalyzeAction(self.getAction('AnalyzeAction'));
action.root = BuildAction(self.getAction('BuildAction'))
return action;
def archiveAction(self, container):
"""
Returns the 'archive' action for this scheme.
"""
action = None;
if self.isValid():
action = ArchiveAction(self.getAction('ArchiveAction'));
action.root = BuildAction(self.getAction('BuildAction'))
return action;
| 2.046875 | 2 |
itk_invitations/migrations/0001_initial.py | eric-scott-owens/loopla | 0 | 12798340 | <filename>itk_invitations/migrations/0001_initial.py
# -*- coding: utf-8 -*-
# Generated by Django 1.11.1 on 2017-06-09 18:33
from __future__ import unicode_literals
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
import localflavor.us.models
class Migration(migrations.Migration):
initial = True
dependencies = [
('auth', '0008_alter_user_username_max_length'),
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Invitation',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('key', models.CharField(max_length=64, unique=True)),
('is_coordinator', models.BooleanField(default=False)),
('sent_timestamp', models.DateTimeField(blank=True)),
('response_timestamp', models.DateTimeField(blank=True)),
('is_accepted', models.BooleanField(default=False)),
('is_declined', models.BooleanField(default=False)),
('response_message', models.TextField(blank=True)),
('confirmed_invitee', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='confirmed_invitee', to=settings.AUTH_USER_MODEL)),
('group', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, to='auth.Group')),
],
),
migrations.CreateModel(
name='Invitation_Visit',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('first_action_timestamp', models.DateTimeField(blank=True)),
('last_action_timestamp', models.DateTimeField(blank=True)),
('invitation', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='itk_invitations.Invitation')),
],
),
migrations.CreateModel(
name='InvitationMessage',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('message', models.TextField()),
],
),
migrations.CreateModel(
name='UnregisteredUser',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('first_name', models.CharField(blank=True, max_length=30)),
('last_name', models.CharField(blank=True, max_length=30)),
('email', models.EmailField(max_length=254)),
('phone_number', localflavor.us.models.PhoneNumberField(blank=True, max_length=20)),
],
),
migrations.AddField(
model_name='invitation',
name='invitation_message',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, to='itk_invitations.InvitationMessage'),
),
migrations.AddField(
model_name='invitation',
name='invitee',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, to='itk_invitations.UnregisteredUser'),
),
migrations.AddField(
model_name='invitation',
name='inviter',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='inviter', to=settings.AUTH_USER_MODEL),
),
]
| 1.640625 | 2 |
tests/__init__.py | Nazze/ha_best_bottrop_garbage_collection | 0 | 12798341 | """Tests for the BEST bottrop custom component."""
| 1.125 | 1 |
src/basics.py | dandjo/tensorflow-playground | 0 | 12798342 | <reponame>dandjo/tensorflow-playground
import tensorflow as tf
node1 = tf.constant(3.0, tf.float32)
node2 = tf.constant(4.0)
print(node1) # Tensor("Const:0", shape=(), dtype=float32)
print(node2) # Tensor("Const_1:0", shape=(), dtype=float32)
node3 = node1 * node2
print(node3) # Tensor("mul:0", shape=(), dtype=float32)
with tf.Session() as sess:
output = sess.run([node1, node2])
print(output) # [3.0, 4.0]
with tf.Session() as sess:
output = sess.run(node3)
print(output) # 12
file_writer = tf.summary.FileWriter('graph', sess.graph)
# run `tensorboard --logdir="graph"` in command line to show the result
| 3.0625 | 3 |
dedupsqlfs/app/actions/recompress.py | tabulon-ext/dedupsqlfs | 22 | 12798343 | # -*- coding: utf8 -*-
"""
Special action to recompress all data
"""
__author__ = 'sergey'
import sys
from multiprocessing import cpu_count
def do_recompress(options, _fuse):
"""
@param options: Commandline options
@type options: object
@param _fuse: FUSE wrapper
@type _fuse: dedupsqlfs.fuse.dedupfs.DedupFS
"""
isVerbose = _fuse.getOption("verbosity") > 0
tableHash = _fuse.operations.getTable("hash")
tableHashCT = _fuse.operations.getTable("hash_compression_type")
tableBlock = _fuse.operations.getTable("block")
tableSubvol = _fuse.operations.getTable("subvolume")
hashCount = tableHash.get_count()
if isVerbose:
print("Ready to recompress %s blocks." % hashCount)
cur = tableHash.getCursor(True)
_fuse.operations.getManager().setAutocommit(False)
tableBlock.begin()
tableHashCT.begin()
_fuse.operations.getManager().setAutocommit(True)
# Every 100*100 (4x symbols)
cntNth = int(hashCount/10000.0)
if cntNth < 1:
cntNth = 1
# Process Nth blocks and then - commit
maxBatch = 1000
offBatch = 0
cnt = cntNext = upd = 0
cpu_n = cpu_count() * 4
try:
toCompress = {}
toCompressM = {}
while cnt < hashCount:
cur.execute("SELECT `id` FROM `%s` LIMIT %s OFFSET %s" % (tableHash.getName(), maxBatch, offBatch,))
offBatch += maxBatch
for hashItem in iter(cur.fetchone, None):
cnt += 1
hashId = hashItem["id"]
blockItem = tableBlock.get(hashId)
hashCT = tableHashCT.get(hashId)
curMethod = _fuse.operations.getCompressionTypeName(hashCT["type_id"])
blockData = _fuse.decompressData(curMethod, blockItem["data"])
toCompress[ hashId ] = blockData
toCompressM[ hashId ] = curMethod
if cnt % cpu_n == 0:
for hashId, item in _fuse.compressData(toCompress):
cData, cMethod = item
curMethod = toCompressM[ hashId ]
if cMethod != curMethod:
cMethodId = _fuse.operations.getCompressionTypeId(cMethod)
res = tableBlock.update(hashId, cData)
res2 = tableHashCT.update(hashId, cMethodId)
if res and res2:
upd += 1
toCompress = {}
toCompressM = {}
if isVerbose:
if cnt >= cntNext:
cntNext += cntNth
prc = "%6.2f%%" % (cnt*100.0/hashCount)
sys.stdout.write("\r%s " % prc)
sys.stdout.flush()
# For ends - blocks commit
_fuse.operations.getManager().setAutocommit(False)
tableBlock.commit()
tableHashCT.commit()
tableBlock.shrinkMemory()
tableHash.shrinkMemory()
tableHashCT.shrinkMemory()
tableBlock.begin()
tableHashCT.begin()
_fuse.operations.getManager().setAutocommit(True)
if len(toCompress.keys()):
for hashId, item in _fuse.compressData(toCompress):
cData, cMethod = item
curMethod = toCompressM[hashId]
if cMethod != curMethod:
cMethodId = _fuse.operations.getCompressionTypeId(cMethod)
res = tableBlock.update(hashId, cData)
res2 = tableHashCT.update(hashId, cMethodId)
if res and res2:
upd += 1
except:
pass
if isVerbose:
sys.stdout.write("\n")
sys.stdout.flush()
if isVerbose:
print("Processed %s blocks, recompressed %s blocks." % (cnt, upd,))
if hashCount != cnt:
_fuse.operations.getManager().setAutocommit(False)
tableBlock.rollback()
tableHashCT.rollback()
_fuse.operations.getManager().setAutocommit(True)
print("Something went wrong? Changes are rolled back!")
return 1
_fuse.operations.getManager().setAutocommit(False)
tableBlock.commit()
tableHashCT.commit()
_fuse.operations.getManager().setAutocommit(True)
tableBlock.shrinkMemory()
tableHash.shrinkMemory()
tableHashCT.shrinkMemory()
subvCount = tableSubvol.get_count()
if isVerbose:
print("Recalculate filesystem and %s subvolumes statistics." % subvCount)
cur = tableSubvol.getCursor(True)
cur.execute("SELECT * FROM `%s`" % tableSubvol.getName())
_fuse.operations.getManager().setAutocommit(False)
tableSubvol.begin()
_fuse.operations.getManager().setAutocommit(True)
from dedupsqlfs.fuse.subvolume import Subvolume
sv = Subvolume(_fuse.operations)
cnt = cntNext = 0
cntNth = subvCount / 10000.0 / 3
if cntNth < 1:
cntNth = 1
for subvItem in iter(cur.fetchone, None):
sv.clean_stats(subvItem["name"])
cnt += 1
if isVerbose:
if cnt >= cntNext:
cntNext += cntNth
prc = "%6.2f%%" % (cnt * 100.0 / subvCount / 3)
sys.stdout.write("\r%s " % prc)
sys.stdout.flush()
sv.get_usage(subvItem["name"], True)
cnt += 1
if isVerbose:
if cnt >= cntNext:
cntNext += cntNth
prc = "%6.2f%%" % (cnt * 100.0 / subvCount / 3)
sys.stdout.write("\r%s " % prc)
sys.stdout.flush()
sv.get_root_diff(subvItem["name"])
cnt += 1
if isVerbose:
if cnt >= cntNext:
cntNext += cntNth
prc = "%6.2f%%" % (cnt * 100.0 / subvCount / 3)
sys.stdout.write("\r%s " % prc)
sys.stdout.flush()
if isVerbose:
sys.stdout.write("\n")
sys.stdout.flush()
_fuse.operations.getManager().setAutocommit(False)
tableSubvol.commit()
_fuse.operations.getManager().setAutocommit(True)
return 0
| 2.3125 | 2 |
get_positions.py | gregorytadams/Model_UN | 0 | 12798344 | # get_positions.py
import pandas as pd
from math import ceil
from sys import argv
'''
Current known problems:
- do schools at different times (ew)
- Bias towards double delegate committees
'''
class Team:
def __init__(self, name, num_delegates, preferences):
'''
num_delegats is an int of the total number of delegates
preferences is the ranked preferences as a list, in order (all committees must be present)
picks is the picks we assign to make the draft fair
assigned committees will be the committees assigned to be outputted
'''
self.name = name
self.num_delegates = num_delegates
self.preferences = preferences
self.picks = self._get_picks(list(range(len(preferences))), num_delegates)
self.assigned_committees = []
self.num_dels_assigned = 0
def _get_picks(self, sequence, num):
'''
Intersperses picks for small delegations.
Takes a list of possible rounds the number of picks and returns a list of picks that they get.
Thanks stack overflow!
http://stackoverflow.com/questions/9873626/choose-m-evenly-spaced-elements-from-a-sequence-of-length-n
'''
picks = []
length = float(len(sequence))
for i in range(num):
picks.append(sequence[int(ceil(i * length / num))])
return picks
class Committee:
def __init__(self, name, num_spots, delegation_size):
'''
name: name of committee
num_spots: maximum number of delegates that can be assigned to that committee
delegation size: 1 for single, 2 for double, and so on
assigned schools: the schools who have a spot on the committee
'''
self.name = name
self.num_spots = num_spots
self.delegation_size = delegation_size
self.assigned_schools = []
def read_info(school_info_filename, committee_info_filename):
'''
Takes the filepaths and returns the dataframes
'''
schools = pd.read_csv(school_info_filename)
comms = pd.read_csv(committee_info_filename)
return schools, comms
def format_for_main(schools, comms):
'''
Creates all the objects and fills in the information from the dataframes
inputs:
schools, comms: pandas dataframes from read_info
outputs:
teams, a list of Team objects
committees, a dict mapping committee names to Committee objects
'''
teams = []
committees = {}
max_at_conf = 0
comms.columns = ['Committee', 'Number of Spots', 'Delegation Size']
schools.columns = ['School', 'Number of Delegates'] + \
["Preference {}".format(str(i)) for i in range(len(comms))]
for index, row in comms.iterrows():
comm = Committee(row['Committee'], row['Number of Spots'], row['Delegation Size'])
committees[row['Committee']] = comm
max_at_conf += row['Delegation Size']
for index, row in schools.iterrows():
prefs = [j for j in row[2:]]
for i in range(ceil(row['Number of Delegates'] / max_at_conf)): # handling more delegates requested
# than there are committees.
num_dels = row['Number of Delegates'] - i * max_at_conf
if num_dels > max_at_conf:
team = Team(row['School']+str(i+2), max_at_conf, prefs)
teams.append(team)
else:
team = Team(row['School'], row['Number of Delegates'], prefs)
teams.append(team)
return teams, committees
def assign(teams, committees):
'''
My algorithm! Draft-based assignment. Takes the teams' constraints/preferences and committees and
simulates a draft. Each team got picks assigned at initialization (first round, fourth round, etc.),
and it iterates through each round of the draft until either all delegates are assigned or all
committees are filled.
Inputs:
teams, a list of Team objects from format_for_main
committees, a dict of committees (name : Committee object) from format_for_main
Outputs:
teams, a list of Team objects with assignments
committees, a dict of committees (formatted the same) with assignments
'''
for r in range(len(committees)):
print("round {}".format(r))
for team in teams:
if r in team.picks and len(team.assigned_committees) < team.num_delegates:
# print(team.name, team.preferences)
for pref in team.preferences:
p = team.preferences.pop(team.preferences.index(pref))
c = committees[p]
if len(c.assigned_schools) < c.num_spots and team.num_dels_assigned < team.num_delegates \
- 1 + c.delegation_size:
c.assigned_schools.append(team.name)
team.assigned_committees.append(c.name)
team.num_dels_assigned += c.delegation_size
if team.num_dels_assigned > team.num_delegates:
for i, val in enumerate(team.assigned_committees):
if committees[val].delegation_size == 1:
index_to_drop = i #no break so I can grab the last value
c_to_drop = val
committees[c_to_drop].assigned_schools.pop(committees[c_to_drop]\
.assigned_schools.index(team.name))
team.assigned_committees.pop(index_to_drop)
print("assigned {} to {}".format(team.name, c.name))
break
else:
continue
else:
continue
return teams, committees
def output(teams, committees):
'''
Outputs the master documents.
Inputs from assign
'''
all_school_assignments = []
all_comm_assignments = []
for team in teams:
all_school_assignments.append([team.name, team.num_delegates] + team.assigned_committees)
for comm in committees:
all_comm_assignments.append([comm, committees[comm].num_spots, committees[comm].delegation_size] \
+ committees[comm].assigned_schools)
schools_df = pd.DataFrame(all_school_assignments)
schools_df.rename(columns = {0:'School', 1:'Number of Delegates'}, inplace = True)
comm_df = pd.DataFrame(all_comm_assignments)
schools_df.to_csv('all_school_assignments.csv')
comm_df.to_csv("all_committees_assignments.csv")
for index, row in schools_df.iterrows():
row.to_csv("school_assignments/{}'s_assignments.csv".format(row['School']))
def go(school_filename, committee_filename):
'''
Runs the whole darn thing.
'''
schools, comms = read_info(school_filename, committee_filename)
teams, committees = format_for_main(schools, comms)
teams, committees = assign(teams, committees)
output(teams, committees)
s = 0
for i in teams: s += i.num_delegates
s2 = 0
for key in committees: s2 += len(committees[key].assigned_schools)*committees[key].delegation_size
if s == s2:
print("It worked! :)")
else:
print("There's a bug. Bad computer. :(")
if __name__ == "__main__":
try:
go(argv[1], argv[2])
except:
print("Something went wrong. Please make sure your usage is correct and files are formatted correctly.")
print("Usage: python3 get_positions.py [school_info_filepath] [committee info filepath]")
| 3.125 | 3 |
settingsmanager/base.py | rgossington/settings-manager | 0 | 12798345 | import re
class BaseClass:
def _get_keys(self):
attrs = self.get_attributes()
return attrs.keys()
def get_attributes(self):
attrs = self.__dict__
attrs_filtered = {k: v for k, v in attrs.items() if not k.startswith("_")}
return attrs_filtered
@staticmethod
def convert_string_to_list(string):
items = []
if string is not None and len(string) > 0:
items = re.split(r", |,", string)
return items
@staticmethod
def _is_key_or_section_name_valid(name, suppress_exceptions=False):
if name is None:
if not suppress_exceptions:
raise ValueError(f"Key or section name must be a string, not None")
else:
return False
if not isinstance(name, str):
if not suppress_exceptions:
raise ValueError(f"Key or section name must be a string. {name} is type {type(name)}")
else:
return False
if len(name) == 0:
if not suppress_exceptions:
raise ValueError(f"Key or section name must not be blank.")
else:
return False
if name[0] == "_":
if not suppress_exceptions:
raise ValueError(f"Key or section name must not begin with '_'")
else:
return False
if name[0].isnumeric():
if not suppress_exceptions:
raise ValueError(f"Key or section name must not begin with a number")
else:
return False
if re.search(r"[^a-zA-Z_0-9]", name) is not None:
if not suppress_exceptions:
raise ValueError(f"Key or section name must only contain letters, numbers and underscores")
else:
return False
return True
@staticmethod
def _is_line_a_heading(line):
if len(line) <= 2:
return False
return line[0] == "[" and line[-1] == "]"
@staticmethod
def _get_heading_from_line(line):
return line[1:-1]
@staticmethod
def _clean_line(line_raw):
line_cleaned = line_raw.rstrip()
line_cleaned = line_cleaned.replace("= ", "=")
line_cleaned = line_cleaned.replace(" =", "=")
return line_cleaned
@classmethod
def _is_line_an_entry(cls, line):
line = cls._clean_line(line)
try:
equal_index = line.index("=")
except ValueError:
return False
# check if line to left of equal sign is a valid key
return cls._is_key_or_section_name_valid(line[:equal_index], suppress_exceptions=True)
@classmethod
def _get_key_from_line(cls, line):
if not cls._is_line_an_entry(line):
return None
line = cls._clean_line(line)
equal_index = line.index("=")
return line[:equal_index]
@classmethod
def _get_value_from_line(cls, line, parse_bool=True, parse_float=True, parse_int=True):
if not cls._is_line_an_entry(line):
return None
line = cls._clean_line(line)
equal_index = line.index("=")
value = line[equal_index + 1:]
if parse_bool:
value = cls._attempt_parse_bool(value)
if parse_float:
value = cls._attempt_parse_float(value)
if parse_int and not isinstance(value, float):
value = cls._attempt_parse_int(value)
return value
@staticmethod
def _attempt_parse_bool(value):
if isinstance(value, str):
line_lower = value.lower()
if line_lower == "true":
return True
if line_lower == "false":
return False
return value
@staticmethod
def _attempt_parse_int(value):
if isinstance(value, str):
if value.count(".") == 0:
try:
return int(value)
except ValueError:
pass
return value
@staticmethod
def _attempt_parse_float(value):
if isinstance(value, str):
if value.count(".") > 0:
try:
return float(value)
except ValueError:
pass
return value
@staticmethod
def _generate_file_line(key, value):
return f"{key} = {value}\n"
| 2.9375 | 3 |
src/database/querys/clients.py | g4-mobile/g4-mobile-api | 0 | 12798346 | <filename>src/database/querys/clients.py
from typing import List
from src.database import DBConnectionHendler
from src.database.db_connection import db_connector
from src.database.models import Client
class ClientQuerys:
"""Criando um novo cliente"""
@classmethod
def new(cls, nome):
"""someting"""
with DBConnectionHendler() as db_connection:
try:
client = Client(name=nome.upper())
db_connection.session.add(client)
db_connection.session.commit()
except:
db_connection.session.rollback()
raise
finally:
db_connection.session.close()
@classmethod
def get_all(cls) -> List:
"""Retorna uma lista de todos os clients"""
with DBConnectionHendler() as db_connection:
try:
return db_connection.session.query(Client).all()
except:
db_connection.session.rollback()
raise
finally:
db_connection.session.close()
@classmethod
def get_id(cls, client_id):
"""someting"""
with DBConnectionHendler() as db_connection:
try:
return (
db_connection.session.query(Client)
.filter_by(id=client_id)
.first()
)
except:
db_connection.session.rollback()
raise
finally:
db_connection.session.close()
""" Create a new user """
@classmethod
@db_connector
def delete(cls, connection, client_id: int) -> None:
client = (
connection.session.query(Client)
.filter_by(id=client_id)
.first()
)
connection.session.delete(client)
connection.session.commit()
| 2.84375 | 3 |
tests/unit/test_bucket.py | Kapuca/ccxt_microservice | 0 | 12798347 | from ccxt_microservice.bucket import Bucket
import pytest
@pytest.fixture
def the_bucket():
return Bucket(10, 1, 5)
def test_state(the_bucket):
assert 4.99 < the_bucket.state() < 5
def test_push(the_bucket):
the_bucket.push(5)
assert 9.99 < the_bucket.state() < 10
def test_timeToWait(the_bucket):
assert 4.99 < the_bucket.timeToWait(10) < 5
def test_add():
pass
def test_wait():
pass
| 2.125 | 2 |
logL.py | mkherman/HRS-logL | 1 | 12798348 | """
Author: <NAME>
Created: 2020-10-28
Last Modified: 2021-05-11
Description: Calculates the 6-D log likelihood map for a series of atmospheric
models cross-correlated with planetary emission spectra. Parameters are log VMR,
day-night contrast, peak phase offset, scaled line contrast, orbital velocity,
and systemic velocity.
NOTE: Because this computes the full likelihood map, not MCMC chains, this file
is very computationally expensive to run when the full parameter grid is used,
and the output can be multiple Gigabytes. Either run the file on a server that
can handle this or reduce the ranges and/or stepsizes for the parameter arrays.
"""
from astropy.io import fits
import numpy as np
from scipy.interpolate import interp1d
from astropy.convolution import convolve, Gaussian1DKernel
import argparse
from scipy.optimize import curve_fit
from scipy.signal import butter, sosfiltfilt
def planck(wavelength,temp):
"""
Calculates the Planck function for a given temperature over a
given wavelength range.
"""
c1 = 1.1911e-12
c2 = 1.439
y = 1e4/wavelength
a = c1*(y**5.)
tmp = c2*y/temp
b = np.exp(tmp) - 1.
bbsor = a/b
return bbsor
def remove_env(wave, spec, px):
"""
Subtracts the lower envelope from a model spectrum by finding
the minimum value in the given stepsize, then interpolating.
"""
low_wave, low_spec = [], []
for i in range(len(spec)/px - 1):
idx = np.nanargmin(spec[i*px:(i+1)*px])
low_spec.append(spec[idx+i*px])
low_wave.append(wave[idx+i*px])
interp = interp1d(low_wave, low_spec, fill_value='extrapolate')
envelope = interp(wave)
corrected = spec - envelope
return corrected
def butterworth(x, order, freq, filt_type='highpass'):
"""
Applies a high-pass Butterworth filter, with a given order and
cut-off frequency, to the given model.
"""
butterfilt = butter(order, freq, btype=filt_type, output='sos')
x_filtered = sosfiltfilt(butterfilt, x)
return x_filtered
def wavegrid(wavemin,wavemax,res):
"""
Creates a wavelength array evenly spaced in resolution.
"""
c=299792458.
dx=np.log(1.+1./res)
x=np.arange(np.log(wavemin),np.log(wavemax),dx)
wavelength=np.exp(x)
#waveno=1e4/wavelength
return wavelength #,waveno
def correlate(wave,spec,stdev,vgrid,minwave,maxwave,model_interp):
"""
Calculates the cross-correlation map for a given spectral order,
along with the other two terms of the log likelihood equation:
the spectra squared, and the base model squared.
"""
cmap = np.empty((len(spec),len(vgrid)))
lnL_term1 = np.empty(len(spec))
lnL_term2 = np.empty((len(spec),len(vgrid)))
# Isolate wavelength range and scale data
w_idx = (wave[0,:] >= minwave) & (wave[0,:] <= maxwave)
for frame in range(len(spec)):
fixspec = spec[frame,w_idx] - np.nanmean(spec[frame,w_idx])
fixspec /= stdev[frame,w_idx]
# Calculate data term for log likelihood
lnL_term1[frame] = np.nansum(fixspec**2)
for i, vel in enumerate(vgrid):
# Shift model to desired velocity and scale
redshift = 1. - vel / 3e5
shift_wave = wave[0,w_idx] * redshift
mspec_shifted = model_interp(shift_wave)
mspec_weighted = mspec_shifted - np.nanmean(mspec_shifted)
mspec_weighted /= stdev[frame,w_idx]
# Perform cross-correlation
corr_top = np.nansum(mspec_weighted * fixspec)
#corr_bot = np.sqrt(np.nansum(mspec_weighted**2) * np.nansum(fixspec**2))
cmap[frame,i] = corr_top #/ corr_bot
# Calculate model term for log likelihood
lnL_term2[frame,i] = np.nansum(mspec_weighted**2)
return cmap, lnL_term1, lnL_term2
def submed(cmap):
"""
Subtracts the median along the velocity axis from the
cross-correlation map.
"""
mdn = np.nanmedian(cmap,axis=1)
sub = cmap - mdn[:,np.newaxis]
return sub
def phasefold(Kps, vgrid, vsys, cmap, phase):
"""
Shifts the cross-correlation map to planet's rest frame and
creates the Kp-Vsys map.
"""
fmap = np.empty((len(Kps), len(vsys)))
KTVmap = np.zeros((len(Kps), len(cmap), len(vsys)))
for i, Kp in enumerate(Kps):
fullmap = np.empty((len(cmap),len(vsys)))
for frame in range(len(phase)):
# Shift to planet's orbital velocity
vp = Kp * np.sin(2.*np.pi*phase[frame])
vshift = vgrid - vp
shift = interp1d(vshift, cmap[frame,:], bounds_error=False)
shifted_map = shift(vsys)
fullmap[frame,:] = shifted_map
KTVmap[i] = fullmap
fmap[i,:] = np.nansum(fullmap, axis=0)
return fmap, KTVmap
def chi2(cmap, merr, serr, alpha, Kps, vgrid, vsys, phase):
"""
Calculates the chi squared portion of the lnL from the
previously computed cross-correlation map and other base
terms, for a given set of scaled line contrast values.
"""
X2 = np.zeros((len(alpha), len(Kps), len(vsys))) # (alpha, Kps, Vsys)
# Shift merr and cmap to the planet's velocity, so their axes are (Kp, time, Vsys)
_, term2_shift = phasefold(Kps, vgrid, vsys, merr, phase)
_, term3_shift = phasefold(Kps, vgrid, vsys, cmap, phase)
# Calculate the log likelihood for each value of alpha
for i,a in enumerate(alpha):
X2_KTV = serr[np.newaxis,:,np.newaxis] + a**2 * term2_shift - 2 * a * term3_shift
# Sum the log likelihood in time
X2[i] = np.nansum(X2_KTV, axis=1)
return X2
def brightvar(phase, offset_deg, contrast):
"""
Computes the brightness variation for a given set of day-night
contrast and peak phase offset values over a given phase range.
"""
offset = offset_deg / 360.
# Equation: Ap = 1 - C * cos^2 (pi * (phi - theta))
A_p = 1. - contrast[:,np.newaxis,np.newaxis] * \
np.cos(np.pi*(phase[np.newaxis,np.newaxis,:] - \
offset[np.newaxis,:,np.newaxis]))**2
return A_p
###############################################################################
parser = argparse.ArgumentParser(description="Likelihood Mapping of High-resolution Spectra")
parser.add_argument("-nights", nargs="*", help="MJD nights", type=str)
parser.add_argument("-d", '--datapath', default="./", help="path to data")
parser.add_argument("-m", '--modelpath', default="./", help="path to models")
parser.add_argument("-o", '--outpath', default="./", help="path for output")
parser.add_argument("-ext", '--extension', default=".fits", help="output file name extension")
args = parser.parse_args()
nights = args.nights
data_path = args.datapath
model_path = args.modelpath
out_path = args.outpath
ext = args.extension
# Define parameter arrays
vmrs = np.arange(-5., -2.1, 0.1)
alpha = np.arange(0.5, 5., 0.1)
vgrid = np.arange(-600.,601.5, 1.5)
Vsys = np.arange(-150., 150., 0.5)
Kps = np.arange(175.,275., 0.5)
offset = np.arange(-30.,60., 1.)
contrast = np.arange(0.,1.1, 0.1)
lnL = np.zeros((len(vmrs),len(contrast), len(offset), len(alpha), len(Kps), len(Vsys)))
# Specify number of SYSREM iterations used on spectra for each MJD night
iters = {'56550': 5, '56561': 4, '56904': 4, '56915': 6, '56966': 6, '57321': 6}
# Specify Butterworth filter cut-off frequency for each night
bfreq = {'56550': 0.035, '56561': 0.04, '56904': 0.03, '56915': 0.025, '56966': 0.055, '57321': 0.025}
for night in nights:
# Read in data
spec = np.load(data_path+night+'_spectra.npy')[iters[night]-1] - 1. # (orders, frames, pixels)
wave = np.load(data_path+night+'_wavelength.npy') # (orders, frames, pixels)
phase = np.load(data_path+night+'_phase.npy') # (frames)
# Only include phases below 0.41 and above 0.59, to avoid stellar Fe signal
p_ind = np.where((phase < 0.41) & (phase > -0.41))[0]
phase = phase[p_ind]
spec = spec[:,p_ind,:]
wave = wave[:,p_ind,:]
# Determine size of arrays
n_orders = spec.shape[0]
n_frames = spec.shape[1]
n_pix = spec.shape[2]
for v,vmr in enumerate(vmrs):
# Get dayside model
hdu = fits.open(model_path+'model_wasp33b_FeI_logvmr%.1f.fits' % (vmr)) # (wavelength, spectrum)
model = hdu[0].data
# Interpolate model to wavelength grid with consistent resolution
m_wave = wavegrid(model[0,0], model[0,-1], 3e5)
wv_interp = interp1d(model[0],model[1], kind='linear', fill_value=0, bounds_error=False)
m_spec = wv_interp(m_wave)
# Convolve model with 1D Gaussian kernel, then filter
FWHM_inst = {'CFHT': 4.48, 'Subaru': 1.8}
mspec_conv = convolve(m_spec, Gaussian1DKernel(stddev=FWHM_inst['CFHT']/2.35))
#mspec_day = remove_env(m_wave,mspec_conv, 250)
mspec_bf = butterworth(mspec_conv, 1, bfreq[night])
# Create interpolator to put model onto data's wavelength grid
filt_interp = interp1d(m_wave, mspec_bf, kind='linear', fill_value=0.,bounds_error=False)
# Create variables/arrays for lnL components
N = 0.
cmap_osum = np.zeros((n_frames, len(vgrid)))
merr_osum = np.zeros((n_frames, len(vgrid)))
serr_osum = np.zeros((n_frames))
# Perform cross-correlation for orders redward of 600 nm, and sum together
for i,o in enumerate(np.arange(24,37)):
# Calculate time- and wavelength-dependent uncertainties
tsigma = np.nanstd(spec[o], axis=0)
wsigma = np.nanstd(spec[o], axis=1)
sigma = np.outer(wsigma, tsigma)
sigma /= np.nanstd(spec[o,:,:])
sigma[((sigma < 0.0005) | np.isnan(sigma))] = 1e20
# Calculate number of data points in spectra
minwave, maxwave = np.nanmin(wave[o,:,:]), np.nanmax(wave[o,:,:])
minwidx, maxwidx = np.nanargmin(wave[o,0,:]), np.nanargmax(wave[o,0,:])
N += len(wave[o,0,minwidx:maxwidx]) * len(phase)
# Perform cross-correlation
cmap0, serr, merr = correlate(wave[o,:,:], spec[o,:,:], sigma, vgrid, minwave, maxwave, filt_interp)
cmap = submed(cmap0)
cmap_osum += cmap
merr_osum += merr
serr_osum += serr
# Compute brightness variation for given contrasts and offsets
variation = brightvar(phase, offset, contrast)
# Apply brightness variation to lnL terms
lnL_term1 = serr_osum
lnL_term2 = merr_osum[np.newaxis,np.newaxis,:,:] * variation[:,:,:,np.newaxis]**2
lnL_term3 = cmap_osum[np.newaxis,np.newaxis,:,:] * variation[:,:,:,np.newaxis]
# Calculate lnL for given VMR
for i in range(len(contrast)):
for j in range(len(offset)):
X2 = chi2(lnL_term3[i,j], lnL_term2[i,j], lnL_term1, alpha, Kps, vgrid, Vsys, phase)
lnL[v,i,j] += -N/2. * np.log(X2 / N)
# Find highest likelihood values
maximum = np.nanmax(lnL)
maxes = np.where(lnL == maximum)
fidx = maxes[0][0]
cidx = maxes[1][0]
oidx = maxes[2][0]
aidx = maxes[3][0]
kidx = maxes[4][0]
vidx = maxes[5][0]
# Print highest likelihood values
print 'Location of highest likelihood:'
print 'logVMR = %.1f' % (vmrs[fidx])
print 'C = %.1f' % (contrast[cidx])
print 'off = %.1f' % (offset[oidx])
print 'a = %.1f' % (alpha[aidx])
print 'Kp = %.1f' % (Kps[kidx])
print 'Vsys = %.1f' % (Vsys[vidx])
# Write lnL to fits file
hdu2 = fits.PrimaryHDU(lnL)
hdu2.writeto(out_path+'lnL_wasp33b_FeI%s' % (ext), overwrite=True)
| 2.625 | 3 |
entrypoint.py | vliz-be-opsci/rocrate-to-pages | 0 | 12798349 | #!/usr/bin/env python3
# NOTE: If you are using an alpine docker image
# such as pyaction-lite, the -S option above won't
# work. The above line works fine on other linux distributions
# such as debian, etc, so the above line will work fine
# if you use pyaction:4.0.0 or higher as your base docker image.
# Steps in this action:
# - check if correct files exist.
# - create symlink for index.html and crate.json
# - check validation?
import sys
import os
from pathlib import Path
import subprocess
import logging
from bs4 import BeautifulSoup
log = logging.getLogger('entrypoint')
class CrateObj():
def __init__(self, crate_dir):
self.crate_dir = crate_dir
self.metadata_path = Path(os.path.join(self.crate_dir, 'ro-crate-metadata.json'))
self.metadata_exists = False
self.preview_path = Path(os.path.join(self.crate_dir, 'ro-crate-preview.html'))
self.preview_exists = False
self.crate_valid = None
self.metadata_valid = None
self.preview_valid = None
def check_rocrate_valid(self):
# Checks if there are rocrate objects in directory
log.debug('Checking that rocrate files exist...')
if os.path.exists(self.metadata_path):
log.debug('ROCrate metadata json file exists: {0}'.format(self.metadata_path))
self.metadata_exists = True
elif os.path.exists(self.metadata_path.with_suffix('.jsonld')):
self.metadata_path = self.metadata_path.with_suffix('.jsonld')
log.debug('ROCrate metadata jsonld file exists: {0}'.format(self.metadata_path))
self.metadata_exists = True
else:
log.error('ROCrate metadata file DOES NOT exist: {0}'.format(self.metadata_path))
self.metadata_exists = False
self.crate_valid = False
exit(1)
if os.path.exists(self.preview_path):
log.debug('ROCrate preview file exists: {0}'.format(self.preview_path))
self.preview_exists = True
else:
log.warning('ROCrate preview file DOES NOT exist: {0}'.format(self.preview_path))
self.preview_exists = False
self.check_metadata()
self.check_preview()
if self.metadata_valid and self.preview_valid:
log.info('Crate passes validity checks.')
self.crate_valid = True
return
def check_metadata(self):
log.debug('Checking if metadata is valid...')
#TODO:some test
self.metadata_valid = True
return
def check_preview(self):
log.debug('Checking if preview is valid...')
#TODO:some test
self.preview_valid = True
return
def create_symlink(dst, src):
# This creates a symbolic link on python in tmp directory
log.debug(f'Creating symlink between {src} and {dst}')
try:
os.symlink(src, dst)
except Exception as err:
log.warning('Problem while creating symlink:')
log.warning(err)
return
def create_preview_html(crate_obj):
'''
This uses https://github.com/UTS-eResearch/ro-crate-html-js to create a preview.html
from a rocrate json file.
rochtml rocrate_datacrate_test/ro-crate-metadata.json
'''
log.info('Creating HTML preview file for {0}...'.format(crate_obj.metadata_path))
metadata_file = crate_obj.metadata_path
subprocess.check_call(f'rochtml {metadata_file}', shell=True)
# log.debug('Adding Header/Footer template to preview file...')
# #TODO: Find a better way of getting the header/footer templates.
# NOTE: Header/footer functionality moved to jekyll
# with open(crate_obj.preview_path, 'r') as preview_file:
# soup = BeautifulSoup(preview_file, 'html.parser')
# #Add Header
# header_path = './header.html'
# with open(header_path) as header_file:
# head_soup = BeautifulSoup(header_file, 'html.parser')
# soup.html.body.insert_before(head_soup)
# #Add Footer
# footer_path = './footer.html'
# with open(footer_path, 'r') as footer_file:
# foot_soup = BeautifulSoup(footer_file, 'html.parser')
# soup.html.body.append(foot_soup)
# # Write updated page to html file
# with open('./test_out.html','wb') as outfile:
# outfile.write(soup.prettify("utf-8"))
return
def publish_rocrate(crate_dir):
# steps to follow to create the correct files to publish to GH-Pages
log.info('Preparing to publish ROCrate.')
this_crate = CrateObj(crate_dir)
this_crate.check_rocrate_valid()
create_preview_html(this_crate)
# create_symlink('index.html', this_crate.preview_path)
# if this_crate.preview_exists:
# create_preview_html(this_crate)
# create_symlink('index.html', this_crate.preview_path)
# else:
# #Create index.html page
# create_preview_html(this_crate)
# create_symlink('index.html', this_crate.preview_path)
if this_crate.metadata_exists:
## Create symlink between the .json >> .jsonld file extensions depending on which exists
if os.path.splitext(this_crate.metadata_path) == '.json':
create_symlink('ro-crate-metadata.jsonld', this_crate.metadata_path)
elif os.path.splitext(this_crate.metadata_path) == '.jsonld':
create_symlink('ro-crate-metadata.json', this_crate.metadata_path)
log.info('ROCrate ready to publish')
if __name__ == "__main__" :
# Rename these variables to something meaningful
crate_path = sys.argv[1]
loglevel = sys.argv[2]
logging.basicConfig(
stream=sys.stdout,
format='%(asctime)s - %(levelname)s - %(name)s - %(message)s',
level=getattr(logging, loglevel))
log.setLevel(getattr(logging, loglevel))
# The work:
publish_rocrate(crate_path) | 1.984375 | 2 |
automatization_of_data_mining_project/data_set_statistic_reporter/test/test_data_set_statistic_reporter.py | Sale1996/automatization_of_data_mining_project | 0 | 12798350 | <filename>automatization_of_data_mining_project/data_set_statistic_reporter/test/test_data_set_statistic_reporter.py
import unittest
from data_set_statistic_reporter.classes.statistic_generator.implementations.column_names_statistic_generator import \
ColumnNamesStatisticGenerator
from data_set_statistic_reporter.classes.statistic_generator.implementations.missing_data_statistic_generator import \
MissingDataStatisticGenerator
from data_set_statistic_reporter.classes.statistic_generator.implementations.range_statistic_generator import \
RangeStatisticGenerator
from data_set_statistic_reporter.classes.statistic_generator.implementations.unique_impression_statistic_generator import \
UniqueImpressionStatisticGenerator
from data_set_statistic_reporter.classes.statistic_generator.implementations.variance_statistic_generator import \
VarianceStatisticGenerator
from data_set_statistic_reporter.classes.data_class.statistic_reporter_data_class import StatisticReporterDataClass
from data_set_statistic_reporter.depedency_injector.container import Container
import pandas as pd
class DataSetStatisticReporterTestBase(unittest.TestCase):
pass
class DataSetStatisticGeneratorTestDummyCases(DataSetStatisticReporterTestBase):
def setUp(self):
self.data_set_statistic_reporter = Container.statistic_reporter_data_set()
self.columns_statistic_generator = ColumnNamesStatisticGenerator([])
self.range_statistic_generator = RangeStatisticGenerator(['Year'])
self.unique_impression_statistic_generator = UniqueImpressionStatisticGenerator(['Country Code'])
self.variance_statistic_generator = VarianceStatisticGenerator(["Year"])
self.missing_data_statistic_generator = MissingDataStatisticGenerator(["Test"])
def test_given_empty_statistic_object_when_get_statistics_as_data_set_then_return_only_data_set_name_with_two_empty_arrays(
self):
empty_statistic_object = StatisticReporterDataClass("Test", pd.DataFrame([]), [])
expected_return_value = [["Test"], [], []]
self.assertEqual(expected_return_value,
self.data_set_statistic_reporter.get_statistics_as_data_set([empty_statistic_object]))
def test_given_statistic_object_with_range_generator_when_get_statistics_as_data_set_then_return_data_set_name_with_range_statistics(
self):
data_frame_columns, data_frame_values = self.get_test_data_frame_values_and_columns()
data_set_with_range_report = StatisticReporterDataClass(
"Test", pd.DataFrame(data_frame_values, columns=data_frame_columns),
[self.columns_statistic_generator, self.range_statistic_generator]
)
expected_return_value = [["Test"], ["Columns", "Year - Range"],
["Year, Country Code, Test", "1900-2020"]]
self.assertEqual(expected_return_value,
self.data_set_statistic_reporter.get_statistics_as_data_set([data_set_with_range_report]))
def test_given_statistic_object_with_unique_impression_report_array_when_reports_statistic_then_return_data_set_name_with_unique_impression_report(
self):
data_frame_columns, data_frame_values = self.get_test_data_frame_values_and_columns()
data_set_with_unique_impression_report = StatisticReporterDataClass(
"Test", pd.DataFrame(data_frame_values, columns=data_frame_columns),
[self.columns_statistic_generator, self.unique_impression_statistic_generator]
)
expected_return_value = [["Test"], ["Columns", "Country Code - Unique impressions"],
["Year, Country Code, Test", 4]]
self.assertEqual(expected_return_value,
self.data_set_statistic_reporter.get_statistics_as_data_set(
[data_set_with_unique_impression_report]))
def test_given_statistic_object_with_missing_data_report_array_when_reports_statistic_then_return_data_set_name_with_missing_data_report(
self):
data_frame_columns, data_frame_values = self.get_test_data_frame_values_and_columns()
data_set_with_missing_data_report = StatisticReporterDataClass(
"Test", pd.DataFrame(data_frame_values, columns=data_frame_columns),
[self.columns_statistic_generator, self.missing_data_statistic_generator]
)
expected_return_value = [["Test"], ["Columns",
"Test - Total number of data",
"Test - Total number of missing data",
"Test - Total percent of missing data"],
["Year, Country Code, Test", 6, 1, 16.67]]
self.assertEqual(expected_return_value,
self.data_set_statistic_reporter.get_statistics_as_data_set(
[data_set_with_missing_data_report]))
def test_given_statistic_object_with_variance_report_array_when_reports_statistic_then_return_data_set_name_with_normal_distribution_report(
self):
data_frame_columns, data_frame_values = self.get_test_data_frame_values_and_columns()
data_set_with_variance_report = StatisticReporterDataClass(
"Test", pd.DataFrame(data_frame_values, columns=data_frame_columns),
[self.columns_statistic_generator, self.variance_statistic_generator]
)
expected_return_value = [["Test"], ["Columns",
"Year - Mean value",
"Year - Standard deviation",
"Year - Variance"],
["Year, Country Code, Test", 1978, 41, 1687.22]]
self.assertEqual(expected_return_value,
self.data_set_statistic_reporter.get_statistics_as_data_set(
[data_set_with_variance_report]))
def test_given_statistic_object_with_multiple_report_arrays_when_reports_statistic_then_return_data_set_name_with_required_statistics(
self):
data_frame_columns, data_frame_values = self.get_test_data_frame_values_and_columns()
data_set_with_multiple_reports = StatisticReporterDataClass(
"Test", pd.DataFrame(data_frame_values, columns=data_frame_columns),
[self.columns_statistic_generator, self.range_statistic_generator,
self.unique_impression_statistic_generator,
self.variance_statistic_generator, self.missing_data_statistic_generator]
)
expected_return_value = [["Test"], ["Columns", "Year - Range", "Country Code - Unique impressions",
"Year - Mean value", "Year - Standard deviation",
"Year - Variance", "Test - Total number of data",
"Test - Total number of missing data",
"Test - Total percent of missing data"],
["Year, Country Code, Test", "1900-2020", 4, 1978, 41, 1687.22, 6, 1, 16.67]]
self.assertEqual(expected_return_value,
self.data_set_statistic_reporter.get_statistics_as_data_set(
[data_set_with_multiple_reports]))
def test_given_two_statistic_object_with_multiple_report_arrays_when_reports_statistic_then_return_data_set_name_with_required_statistics(
self):
data_frame_1_columns, data_frame_1_values = self.get_test_data_frame_values_and_columns()
data_frame_2_values = [
["SRB", 1], ["JPN", 2], ["RUS", ], ["SRB", 4], ["ESP", 5],
["JPN", 6]]
data_frame_2_columns = ['Country Code', 'Test']
data_set_1_with_multiple_reports = StatisticReporterDataClass(
"Test", pd.DataFrame(data_frame_1_values, columns=data_frame_1_columns),
[self.columns_statistic_generator, self.range_statistic_generator, self.unique_impression_statistic_generator,
self.variance_statistic_generator, self.missing_data_statistic_generator]
)
data_set_2_with_multiple_reports = StatisticReporterDataClass(
"Test", pd.DataFrame(data_frame_2_values, columns=data_frame_2_columns),
[self.columns_statistic_generator, self.unique_impression_statistic_generator,
self.missing_data_statistic_generator]
)
expected_return_value = [["Test"], ["Columns", "Year - Range", "Country Code - Unique impressions",
"Year - Mean value", "Year - Standard deviation",
"Year - Variance", "Test - Total number of data",
"Test - Total number of missing data",
"Test - Total percent of missing data"],
["Year, Country Code, Test", "1900-2020", 4, 1978, 41, 1687.22, 6, 1, 16.67],
["Test"], ["Columns", "Country Code - Unique impressions",
"Test - Total number of data",
"Test - Total number of missing data",
"Test - Total percent of missing data"],
["Country Code, Test", 4, 6, 1, 16.67]
]
self.assertEqual(expected_return_value,
self.data_set_statistic_reporter.get_statistics_as_data_set(
[data_set_1_with_multiple_reports, data_set_2_with_multiple_reports]))
def get_test_data_frame_values_and_columns(self):
data_frame_values = [
[1900, "SRB", 1], [1950, "JPN", 2], [1990, "RUS", ], [2010, "SRB", 4], [2020, "ESP", 5], [1996, "JPN", 6]]
data_frame_columns = ['Year', 'Country Code', 'Test']
return data_frame_columns, data_frame_values
| 2.59375 | 3 |