max_stars_repo_path
stringlengths 3
269
| max_stars_repo_name
stringlengths 4
119
| max_stars_count
int64 0
191k
| id
stringlengths 1
7
| content
stringlengths 6
1.05M
| score
float64 0.23
5.13
| int_score
int64 0
5
|
---|---|---|---|---|---|---|
pythia/antlr4/PythiaFunctionCallListener.py | 5sigmapoint2/pythiaparser | 0 | 12798851 | <filename>pythia/antlr4/PythiaFunctionCallListener.py
# Generated from /Users/enrique/workspace/other/frontline/pythia/PythiaFunctionCall.g4 by ANTLR 4.5.3
from antlr4 import *
if __name__ is not None and "." in __name__:
from .PythiaFunctionCallParser import PythiaFunctionCallParser
else:
from PythiaFunctionCallParser import PythiaFunctionCallParser
# This class defines a complete listener for a parse tree produced by PythiaFunctionCallParser.
class PythiaFunctionCallListener(ParseTreeListener):
# Enter a parse tree produced by PythiaFunctionCallParser#call.
def enterCall(self, ctx: PythiaFunctionCallParser.CallContext):
pass
# Exit a parse tree produced by PythiaFunctionCallParser#call.
def exitCall(self, ctx: PythiaFunctionCallParser.CallContext):
pass
# Enter a parse tree produced by PythiaFunctionCallParser#full_function_name.
def enterFull_function_name(self,
ctx: PythiaFunctionCallParser.Full_function_nameContext):
pass
# Exit a parse tree produced by PythiaFunctionCallParser#full_function_name.
def exitFull_function_name(self,
ctx: PythiaFunctionCallParser.Full_function_nameContext):
pass
# Enter a parse tree produced by PythiaFunctionCallParser#argument.
def enterArgument(self, ctx: PythiaFunctionCallParser.ArgumentContext):
pass
# Exit a parse tree produced by PythiaFunctionCallParser#argument.
def exitArgument(self, ctx: PythiaFunctionCallParser.ArgumentContext):
pass
# Enter a parse tree produced by PythiaFunctionCallParser#ArrayOfValues.
def enterArrayOfValues(self,
ctx: PythiaFunctionCallParser.ArrayOfValuesContext):
pass
# Exit a parse tree produced by PythiaFunctionCallParser#ArrayOfValues.
def exitArrayOfValues(self,
ctx: PythiaFunctionCallParser.ArrayOfValuesContext):
pass
# Enter a parse tree produced by PythiaFunctionCallParser#EmptyArray.
def enterEmptyArray(self, ctx: PythiaFunctionCallParser.EmptyArrayContext):
pass
# Exit a parse tree produced by PythiaFunctionCallParser#EmptyArray.
def exitEmptyArray(self, ctx: PythiaFunctionCallParser.EmptyArrayContext):
pass
# Enter a parse tree produced by PythiaFunctionCallParser#ArrayValue.
def enterArrayValue(self, ctx: PythiaFunctionCallParser.ArrayValueContext):
pass
# Exit a parse tree produced by PythiaFunctionCallParser#ArrayValue.
def exitArrayValue(self, ctx: PythiaFunctionCallParser.ArrayValueContext):
pass
# Enter a parse tree produced by PythiaFunctionCallParser#String.
def enterString(self, ctx: PythiaFunctionCallParser.StringContext):
pass
# Exit a parse tree produced by PythiaFunctionCallParser#String.
def exitString(self, ctx: PythiaFunctionCallParser.StringContext):
pass
# Enter a parse tree produced by PythiaFunctionCallParser#Integer.
def enterInteger(self, ctx: PythiaFunctionCallParser.IntegerContext):
pass
# Exit a parse tree produced by PythiaFunctionCallParser#Integer.
def exitInteger(self, ctx: PythiaFunctionCallParser.IntegerContext):
pass
# Enter a parse tree produced by PythiaFunctionCallParser#Float.
def enterFloat(self, ctx: PythiaFunctionCallParser.FloatContext):
pass
# Exit a parse tree produced by PythiaFunctionCallParser#Float.
def exitFloat(self, ctx: PythiaFunctionCallParser.FloatContext):
pass
| 2.203125 | 2 |
src/ci/model/agent.py | dreicefield/ci-runner | 1 | 12798852 | #!/usr/bin/env python3
import logging
import subprocess
from typing import Dict
class Agent:
name: str
image: str
environment: Dict[str, str]
def __init__(self, name: str, image: str, environment: Dict[str, str]) -> None:
self.name = name
self.image = image
self.environment = environment
def run(self) -> None:
logging.info("Starting agent '%s' based on image '%s'", self.name, self.image)
subprocess.run(
[
"docker",
"run",
"-d",
"--rm",
"-it",
"--name",
self.name,
self.image,
"/bin/sleep",
"infinity",
]
)
def cleanup(self) -> None:
logging.info("Stopping agent '%s'", self.name)
subprocess.run(["docker", "stop", self.name])
def get_agent_by_label(name: str, label: str) -> Agent:
# TODO: lookup label in config file?
return Agent("ci-agent", "ubuntu:20.04", {})
| 2.46875 | 2 |
eit/elliptic_solver.py | Forgotten/EIT | 1 | 12798853 | class EllipticSolver:
def __init__(self, v_h):
self.v_h = v_h
def update_matrices(self, sigma_vec):
vol_idx = v_h.mesh.vol_idx
bdy_idx = v_h.mesh.bdy_idx
self.S = stiffness_matrix(self.v_h, sigma_vec)
def build_matrices
self.Mass = mass_matrix(self.v_h)
# self.Kx
# self.Ky
def dtn_map(self):
# do this here
| 2.421875 | 2 |
tests/tests/urls_converters/tests.py | ChanTsune/Django-Boost | 25 | 12798854 | <reponame>ChanTsune/Django-Boost<gh_stars>10-100
import os
from django.test import override_settings
from django.urls import reverse
from django_boost.test import TestCase
ROOT_PATH = os.path.dirname(__file__)
@override_settings(
ROOT_URLCONF='tests.tests.urls_converters.urls',
TEMPLATES=[{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [os.path.join(ROOT_PATH, 'templates')],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django_boost.context_processors.user_agent',
],
},
}]
)
class TestConverter(TestCase):
def test_path_converters(self):
case = [('bin', '1010'),
('bin', 12),
('oct', '7'),
('oct', 7),
('hex', 'd'),
('hex', 12),
('bin_str', '1010'),
('oct_str', '236'),
('hex_str', '234'),
('float', 1.1),
('float', '1.1'),
('float', 1),
('float', '1'),
('date', '2020/2/29'),
]
for name, value in case:
url = reverse(name, kwargs={name: value})
response = self.client.get(url)
self.assertStatusCodeEqual(response, 200)
def test_failed_case(self):
from django.urls.exceptions import NoReverseMatch
with self.assertRaises(NoReverseMatch):
reverse('float', kwargs={'float': '1.'})
with self.assertRaises(NoReverseMatch):
reverse('date', kwargs={'date': '2019/2/29'})
class TestRegex(TestCase):
DATE_FORMAT = "%d/%d"
DATE_TEST_CASE = [(m, d) for m in range(20) for d in range(40)]
def test_year_regex(self):
import re
from calendar import isleap as _isleep
from django_boost.urls.converters.date import REGEX_LEAP_YEAR
regex_is_leap = re.compile(REGEX_LEAP_YEAR).fullmatch
def isleap(value):
return value != 0 and _isleep(value)
for i in range(10000):
value = str(i)
with self.subTest(value, value=value):
result = bool(regex_is_leap(value))
self.assertEqual(isleap(i), result)
def test_date_31_regex(self):
import re
from django_boost.urls.converters.date import REGEX_DATE_31
regex_date_31_fullmatch = re.compile(REGEX_DATE_31).fullmatch
for m, d in self.DATE_TEST_CASE:
value = self.DATE_FORMAT % (m, d)
with self.subTest(value, value=value):
result = bool(regex_date_31_fullmatch(value))
if m in [1, 3, 5, 7, 8, 10, 12] and d in range(1, 32):
self.assertTrue(result)
else:
self.assertFalse(result)
def test_date_30_regex(self):
import re
from django_boost.urls.converters.date import REGEX_DATE_30
regex_date_30_fullmatch = re.compile(REGEX_DATE_30).fullmatch
for m, d in self.DATE_TEST_CASE:
value = self.DATE_FORMAT % (m, d)
with self.subTest(value, value=value):
result = bool(regex_date_30_fullmatch(value))
if m in [4, 6, 9, 11] and d in range(1, 31):
self.assertTrue(result)
else:
self.assertFalse(result)
def test_date_29_regex(self):
import re
from django_boost.urls.converters.date import REGEX_DATE_29
regex_date_29_fullmatch = re.compile(REGEX_DATE_29).fullmatch
for m, d in self.DATE_TEST_CASE:
value = self.DATE_FORMAT % (m, d)
with self.subTest(value, value=value):
result = bool(regex_date_29_fullmatch(value))
if m == 2 and d in range(1, 30):
self.assertTrue(result)
else:
self.assertFalse(result)
def test_date_28_regex(self):
import re
from django_boost.urls.converters.date import REGEX_DATE_28
regex_date_28_fullmatch = re.compile(REGEX_DATE_28).fullmatch
for m, d in self.DATE_TEST_CASE:
value = self.DATE_FORMAT % (m, d)
with self.subTest(value, value=value):
result = bool(regex_date_28_fullmatch(value))
if m == 2 and d in range(1, 29):
self.assertTrue(result)
else:
self.assertFalse(result)
def test_date_time_regex(self):
import re
from datetime import datetime
from django_boost.urls.converters.date import REGEX_DATE
regex_fullmatch = re.compile(REGEX_DATE).fullmatch
def is_valid_date(y, m, d):
try:
datetime(year=y, month=m, day=d)
return True
except ValueError:
return False
for y in range(10000):
for m in range(14):
for d in range(32):
with self.subTest("%s/%s/%s" % (y, m, d)):
self.assertEqual(is_valid_date(y, m, d), bool(
regex_fullmatch("%s/%s/%s" % (y, m, d))))
| 2.328125 | 2 |
tests/unit/TestValidation.py | rakhimov/rtk | 0 | 12798855 | <reponame>rakhimov/rtk
#!/usr/bin/env python -O
"""
This is the test class for testing Validation module algorithms and models.
"""
# -*- coding: utf-8 -*-
#
# tests.unit.TestValidation.py is part of The RTK Project
#
# All rights reserved.
# Copyright 2007 - 2017 <NAME> <EMAIL>rew.rowland <AT> reliaqual <DOT> com
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its contributors
# may be used to endorse or promote products derived from this software
# without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
# PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER
# OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import sys
from os.path import dirname
sys.path.insert(0, dirname(dirname(dirname(__file__))) + "/rtk", )
import unittest
from nose.plugins.attrib import attr
import dao.DAO as _dao
from validation.Validation import Model, Validation
__author__ = '<NAME>'
__email__ = '<EMAIL>'
__organization__ = 'ReliaQual Associates, LLC'
__copyright__ = 'Copyright 2015 Andrew "Weibullguy" Rowland'
class TestValidationModel(unittest.TestCase):
"""
Class for testing the Validation data model class.
"""
def setUp(self):
"""
Setup the test fixture for the Validation class.
"""
self.DUT = Model()
@attr(all=True, unit=True)
def test_create(self):
"""
(TestValidation) __init__ should return a Validation model
"""
self.assertTrue(isinstance(self.DUT, Model))
self.assertEqual(self.DUT.revision_id, 0)
self.assertEqual(self.DUT.validation_id, 0)
self.assertEqual(self.DUT.task_description, '')
self.assertEqual(self.DUT.task_type, 0)
self.assertEqual(self.DUT.task_specification, '')
self.assertEqual(self.DUT.measurement_unit, 0)
self.assertEqual(self.DUT.min_acceptable, 0.0)
self.assertEqual(self.DUT.mean_acceptable, 0.0)
self.assertEqual(self.DUT.max_acceptable, 0.0)
self.assertEqual(self.DUT.variance_acceptable, 0.0)
self.assertEqual(self.DUT.start_date, 719163)
self.assertEqual(self.DUT.end_date, 719163)
self.assertEqual(self.DUT.status, 0.0)
self.assertEqual(self.DUT.minimum_time, 0.0)
self.assertEqual(self.DUT.average_time, 0.0)
self.assertEqual(self.DUT.maximum_time, 0.0)
self.assertEqual(self.DUT.mean_time, 0.0)
self.assertEqual(self.DUT.time_variance, 0.0)
self.assertEqual(self.DUT.minimum_cost, 0.0)
self.assertEqual(self.DUT.average_cost, 0.0)
self.assertEqual(self.DUT.maximum_cost, 0.0)
self.assertEqual(self.DUT.mean_cost, 0.0)
self.assertEqual(self.DUT.cost_variance, 0.0)
@attr(all=True, unit=True)
def test_set_attributes(self):
"""
(TestValidation) set_attributes should return a 0 error code on success
"""
_values = (0, 0, 'Description', 0, 'Specification', 0, 0.0, 0.0,
0.0, 0.0, 719163, 719163, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
0.0, 0.0, 0.0, 0.0, 95.0)
(_error_code,
_error_msg) = self.DUT.set_attributes(_values)
self.assertEqual(_error_code, 0)
@attr(all=True, unit=True)
def test_set_attributes_wrong_type(self):
"""
(TestValidation) set_attributes should return a 10 error code when passed a wrong data type
"""
_values = (0, 0, 'Description', 0, 'Specification', 0, 0.0, 0.0,
0.0, 0.0, 719163, 'Date', 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
0.0, 0.0, 0.0, 0.0, 95.0)
(_error_code,
_error_msg) = self.DUT.set_attributes(_values)
self.assertEqual(_error_code, 10)
@attr(all=True, unit=True)
def test_set_attributes_missing_index(self):
"""
(TestValidation) set_attributes should return a 40 error code when too few items are passed
"""
_values = (0, 0, 'Description', 0, 'Specification', 0, 0.0, 0.0,
0.0, 0.0, 719163, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
0.0, 0.0, 0.0, 95.0)
(_error_code,
_error_msg) = self.DUT.set_attributes(_values)
self.assertEqual(_error_code, 40)
@attr(all=True, unit=True)
def test_get_attributes(self):
"""
(TestValidation) get_attributes should return a tuple of attribute values
"""
self.assertEqual(self.DUT.get_attributes(),
(0, 0, '', 0, '', 0, 0.0, 0.0, 0.0, 0.0, 719163,
719163, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
0.0, 0.0, 95.0))
@attr(all=True, unit=True)
def test_calculate_task_time(self):
"""
(TestValidation) calculate returns False on successfully calculating tasks times
"""
self.DUT.minimum_time = 25.2
self.DUT.average_time = 36.8
self.DUT.maximum_time = 44.1
self.assertFalse(self.DUT.calculate())
self.assertAlmostEqual(self.DUT.mean_time, 36.08333333)
self.assertAlmostEqual(self.DUT.time_variance, 9.9225)
@attr(all=True, unit=True)
def test_calculate_task_cost(self):
"""
(TestValidation) calculate returns False on successfully calculating tasks costs
"""
self.DUT.minimum_cost = 252.00
self.DUT.average_cost = 368.00
self.DUT.maximum_cost = 441.00
self.DUT.confidence = 0.95
self.assertFalse(self.DUT.calculate())
self.assertAlmostEqual(self.DUT.mean_cost, 360.83333333)
self.assertAlmostEqual(self.DUT.cost_variance, 992.25)
class TestValidationController(unittest.TestCase):
"""
Class for testing the Validation data controller class.
"""
def setUp(self):
"""
Sets up the test fixture for the Validation class.
"""
self.DUT = Validation()
@attr(all=True, unit=True)
def test_controller_create(self):
"""
(TestValidation) __init__ should create a Validation data controller
"""
self.assertTrue(isinstance(self.DUT, Validation))
self.assertEqual(self.DUT._dao, None)
self.assertEqual(self.DUT._last_id, None)
self.assertEqual(self.DUT.dicTasks, {})
self.assertEqual(self.DUT.dicStatus, {})
| 1.773438 | 2 |
chapter4-function/exercise7.py | MyLanPangzi/py4e | 0 | 12798856 | <reponame>MyLanPangzi/py4e
# Exercise 7: Rewrite the grade program from the previous chapter using a function called computegrade
# that takes a score as its parameter and returns a grade as a string.
def computegrade(score):
if score < 0 or score > 1:
return "score out of range"
elif score >= 0.9:
return "A"
elif score >= 0.8:
return "B"
elif score >= 0.7:
return "C"
elif score >= 0.6:
return "D"
else:
return "F"
print(computegrade(float(input('Enter Score(0-1.0): '))))
| 3.9375 | 4 |
All_Source_Code/ClassificationOverview/ClassificationOverview_5.py | APMonitor/pds | 11 | 12798857 | <filename>All_Source_Code/ClassificationOverview/ClassificationOverview_5.py
from sklearn import datasets, svm, metrics
from sklearn.model_selection import train_test_split
import matplotlib.pyplot as plt
import numpy as np
# The digits dataset
digits = datasets.load_digits()
# Flatten the image to apply classifier
n_samples = len(digits.images)
data = digits.images.reshape((n_samples, -1))
# Create support vector classifier
classifier = svm.SVC(gamma=0.001)
# Split into train and test subsets (50% each)
X_train, X_test, y_train, y_test = train_test_split(
data, digits.target, test_size=0.5, shuffle=False)
# Learn the digits on the first half of the digits
classifier.fit(X_train, y_train)
n_samples/2
# test on second half of data
n = np.random.randint(int(n_samples/2),n_samples)
plt.imshow(digits.images[n], cmap=plt.cm.gray_r, interpolation='nearest')
print('Predicted: ' + str(classifier.predict(digits.data[n:n+1])[0]))
# Select Option by Number
# 0 = Linear, 1 = Quadratic, 2 = Inner Target
# 3 = Moons, 4 = Concentric Circles, 5 = Distinct Clusters
select_option = 5
# generate data
data_options = ['linear','quadratic','target','moons','circles','blobs']
option = data_options[select_option]
# number of data points
n = 2000
X = np.random.random((n,2))
mixing = 0.0 # add random mixing element to data
xplot = np.linspace(0,1,100)
if option=='linear':
y = np.array([False if (X[i,0]+X[i,1])>=(1.0+mixing/2-np.random.rand()*mixing) else True for i in range(n)])
yplot = 1-xplot
elif option=='quadratic':
y = np.array([False if X[i,0]**2>=X[i,1]+(np.random.rand()-0.5)\
*mixing else True for i in range(n)])
yplot = xplot**2
elif option=='target':
y = np.array([False if (X[i,0]-0.5)**2+(X[i,1]-0.5)**2<=0.1 +(np.random.rand()-0.5)*0.2*mixing else True for i in range(n)])
j = False
yplot = np.empty(100)
for i,x in enumerate(xplot):
r = 0.1-(x-0.5)**2
if r<=0:
yplot[i] = np.nan
else:
j = not j # plot both sides of circle
yplot[i] = (2*j-1)*np.sqrt(r)+0.5
elif option=='moons':
X, y = datasets.make_moons(n_samples=n,noise=0.05)
yplot = xplot*0.0
elif option=='circles':
X, y = datasets.make_circles(n_samples=n,noise=0.05,factor=0.5)
yplot = xplot*0.0
elif option=='blobs':
X, y = datasets.make_blobs(n_samples=n,centers=[[-5,3],[5,-3]],cluster_std=2.0)
yplot = xplot*0.0
plt.scatter(X[y>0.5,0],X[y>0.5,1],color='blue',marker='^',label='True')
plt.scatter(X[y<0.5,0],X[y<0.5,1],color='red',marker='x',label='False')
if option not in ['moons','circles','blobs']:
plt.plot(xplot,yplot,'k.',label='Division')
plt.legend()
plt.savefig(str(select_option)+'.png')
# Split into train and test subsets (50% each)
XA, XB, yA, yB = train_test_split(X, y, test_size=0.5, shuffle=False)
# Plot regression results
def assess(P):
plt.figure()
plt.scatter(XB[P==1,0],XB[P==1,1],marker='^',color='blue',label='True')
plt.scatter(XB[P==0,0],XB[P==0,1],marker='x',color='red',label='False')
plt.scatter(XB[P!=yB,0],XB[P!=yB,1],marker='s',color='orange',alpha=0.5,label='Incorrect')
if option not in ['moons','circles','blobs']:
plt.plot(xplot,yplot,'k.',label='Division')
plt.legend()
# Supervised Classification
# Logistic Regression
from sklearn.linear_model import LogisticRegression
lr = LogisticRegression(solver='lbfgs')
lr.fit(XA,yA)
yP = lr.predict(XB)
assess(yP)
# Naïve Bayes
from sklearn.naive_bayes import GaussianNB
nb = GaussianNB()
nb.fit(XA,yA)
yP = nb.predict(XB)
assess(yP)
# Stochastic Gradient Descent
from sklearn.linear_model import SGDClassifier
sgd = SGDClassifier(loss='modified_huber', shuffle=True,random_state=101)
sgd.fit(XA,yA)
yP = sgd.predict(XB)
assess(yP)
# K-Nearest Neighbors
from sklearn.neighbors import KNeighborsClassifier
knn = KNeighborsClassifier(n_neighbors=5)
knn.fit(XA,yA)
yP = knn.predict(XB)
assess(yP)
# Decision Tree
from sklearn.tree import DecisionTreeClassifier
dtree = DecisionTreeClassifier(max_depth=10,random_state=101,max_features=None,\
min_samples_leaf=5)
dtree.fit(XA,yA)
yP = dtree.predict(XB)
assess(yP)
# Random Forest
from sklearn.ensemble import RandomForestClassifier
rfm = RandomForestClassifier(n_estimators=70,oob_score=True,n_jobs=1,\
random_state=101,max_features=None,min_samples_leaf=3)
rfm.fit(XA,yA)
yP = rfm.predict(XB)
assess(yP)
# Support Vector Classifier
from sklearn.svm import SVC
svm = SVC(gamma='scale', C=1.0, random_state=101)
svm.fit(XA,yA)
yP = svm.predict(XB)
assess(yP)
# Neural Network
from sklearn.neural_network import MLPClassifier
clf = MLPClassifier(solver='lbfgs',alpha=1e-5,max_iter=200,\
activation='relu',hidden_layer_sizes=(10,30,10),\
random_state=1, shuffle=True)
clf.fit(XA,yA)
yP = clf.predict(XB)
assess(yP)
# Unsupervised Classification
# K-Means Clustering
from sklearn.cluster import KMeans
km = KMeans(n_clusters=2)
km.fit(XA)
yP = km.predict(XB)
# Arbitrary labels with unsupervised clustering may need to be reversed
if len(XB[yP!=yB]) > n/4: yP = 1 - yP
assess(yP)
# Gaussian Mixture Model
from sklearn.mixture import GaussianMixture
gmm = GaussianMixture(n_components=2)
gmm.fit(XA)
yP = gmm.predict_proba(XB) # produces probabilities
# Arbitrary labels with unsupervised clustering may need to be reversed
if len(XB[np.round(yP[:,0])!=yB]) > n/4: yP = 1 - yP
assess(np.round(yP[:,0]))
# Spectral Clustering
from sklearn.cluster import SpectralClustering
sc = SpectralClustering(n_clusters=2,eigen_solver='arpack',\
affinity='nearest_neighbors')
yP = sc.fit_predict(XB) # No separation between fit and predict calls, need to fit and predict on same dataset
# Arbitrary labels with unsupervised clustering may need to be reversed
if len(XB[yP!=yB]) > n/4: yP = 1 - yP
assess(yP)
plt.show() | 2.734375 | 3 |
src/service.library.video/resources/lib/artwork.py | sebastian-steinmann/kodi-repo | 0 | 12798858 | # -*- coding: utf-8 -*-
#################################################################################################
import logging
import urllib
import requests
from resources.lib.util import JSONRPC
##################################################################################################
log = logging.getLogger("DINGS."+__name__)
##################################################################################################
class Artwork(object):
xbmc_host = 'localhost'
xbmc_port = None
xbmc_username = None
xbmc_password = None
def __init__(self):
if not self.xbmc_port:
self._set_webserver_details()
def _double_urlencode(self, text):
text = self.single_urlencode(text)
text = self.single_urlencode(text)
return text
@classmethod
def single_urlencode(cls, text):
# urlencode needs a utf- string
text = urllib.urlencode({'blahblahblah': text.encode('utf-8')})
text = text[13:]
return text.decode('utf-8') #return the result again as unicode
def _set_webserver_details(self):
# Get the Kodi webserver details - used to set the texture cache
get_setting_value = JSONRPC('Settings.GetSettingValue')
web_query = {
"setting": "services.webserver"
}
result = get_setting_value.execute(web_query)
try:
xbmc_webserver_enabled = result['result']['value']
except (KeyError, TypeError):
xbmc_webserver_enabled = False
if not xbmc_webserver_enabled:
# Enable the webserver, it is disabled
set_setting_value = JSONRPC('Settings.SetSettingValue')
web_port = {
"setting": "services.webserverport",
"value": 8080
}
set_setting_value.execute(web_port)
self.xbmc_port = 8080
web_user = {
"setting": "services.webserver",
"value": True
}
set_setting_value.execute(web_user)
self.xbmc_username = "kodi"
else:
# Webserver already enabled
web_port = {
"setting": "services.webserverport"
}
result = get_setting_value.execute(web_port)
try:
self.xbmc_port = result['result']['value']
except (TypeError, KeyError):
pass
web_user = {
"setting": "services.webserverusername"
}
result = get_setting_value.execute(web_user)
try:
self.xbmc_username = result['result']['value']
except (TypeError, KeyError):
pass
web_pass = {
"setting": "services.webserverpassword"
}
result = get_setting_value.execute(web_pass)
try:
self.xbmc_password = result['result']['value']
except (TypeError, KeyError):
pass
def cache_texture(self, url):
# Cache a single image url to the texture cache
if url:
log.debug("Processing: %s", url)
url = self._double_urlencode(url)
action_url = "http://%s:%s/image/image://%s" % (self.xbmc_host, self.xbmc_port, url)
try: # Add image to texture cache by simply calling it at the http endpoint
requests.head(url=(action_url),
auth=(self.xbmc_username, self.xbmc_password),
timeout=1)
except Exception as e: # We don't need the result
log.error("Feil ved precaching av fil %s med feilmelding %s", action_url, e.message) | 2.28125 | 2 |
python_scripts/binary2Array.py | ingle/ultrasound-simulation | 15 | 12798859 | <gh_stars>10-100
def makeMultiFrameSimFile(fname, numFrames):
'''Read in multiple files with the name fname + number + .dat. Save all those frames to a single file with one header. '''
import numpy as np
import struct
newFile = open(fname + 'allFrames.dat', 'wb')
#Read in the first file and use this one to write the header.
#Read in old file
fIn = open(fname + str(0) + '.dat', 'rb')
freqstep =float( np.fromfile(fIn, np.double,1) )
points = int( np.fromfile(fIn, np.int32,1) )
lines = int( np.fromfile(fIn, np.int32,1) )
tempReal = np.fromfile(fIn,np.double, points*lines )
tempImag = np.fromfile(fIn, np.double, points*lines )
fIn.close()
#now write the header in the new file
header = struct.pack('diii', freqstep, points, lines, int(numFrames))
newFile.write(header)
#also write the real then the imaginary part of the file into
#the header with the frequency index increasing the fastest
for l in range(lines):
for p in range(points):
newFile.write(struct.pack('d', float(tempReal[p + l*points])))
for l in range(lines):
for p in range(points):
newFile.write(struct.pack('d', float(tempImag[p + l*points])))
for f in range(1,numFrames):
#Read in old file
fIn = open(fname + str(f) + '.dat', 'rb')
freqstep =float( np.fromfile(fIn, np.double,1) )
points = int( np.fromfile(fIn, np.int32,1) )
lines = int( np.fromfile(fIn, np.int32,1) )
tempReal = np.fromfile(fIn,np.double, points*lines )
tempImag = np.fromfile(fIn, np.double, points*lines )
fIn.close()
#write the real then the imaginary part of the file into
#the file with the frequency index increasing the fastest
for l in range(lines):
for p in range(points):
newFile.write(struct.pack('d', float(tempReal[p + l*points])))
for l in range(lines):
for p in range(points):
newFile.write(struct.pack('d', float(tempImag[p + l*points])))
| 3.1875 | 3 |
linux-distro/package/nuxleus/Source/Vendor/Microsoft/IronPython-2.0.1/Lib/Kamaelia/Support/Particles/ParticleSystem.py | mdavid/nuxleus | 1 | 12798860 | #!/usr/bin/env python
# Copyright (C) 2004 British Broadcasting Corporation and Kamaelia Contributors(1)
# All Rights Reserved.
#
# You may only modify and redistribute this under the terms of any of the
# following licenses(2): Mozilla Public License, V1.1, GNU General
# Public License, V2.0, GNU Lesser General Public License, V2.1
#
# (1) Kamaelia Contributors are listed in the AUTHORS file and at
# http://kamaelia.sourceforge.net/AUTHORS - please extend this file,
# not this notice.
# (2) Reproduced in the COPYING file, and at:
# http://kamaelia.sourceforge.net/COPYING
# Under section 3.5 of the MPL, we are using this text since we deem the MPL
# notice inappropriate for this file. As per MPL/GPL/LGPL removal of this
# notice is prohibited.
#
# Please contact us via: <EMAIL>
# to discuss alternative licensing.
# -------------------------------------------------------------------------
"""\
=========================================
Discrete time particle physics simulation
=========================================
A discrete time simulator of a system of bonded and unbonded particles, of
multiple types.
The actual physics calculations are deferred to the particles themselves. You
can have as many, or few, spatial dimensions as you like.
Example Usage
-------------
Create 3 particles, two of which are bonded and move noticeably closer after 5
cycles of simulation::
>>> laws = SimpleLaws(bondLength=5)
>>> sim = ParticleSystem(laws)
>>> sim.add( Particle(position=(10,10)) )
>>> sim.add( Particle(position=(10,20)) )
>>> sim.add( Particle(position=(30,40)) )
>>> sim.particles[0].makeBond(sim.particles, 1) # bond 1st and 2nd particles
>>> for p in sim.particles: print p.getLoc()
...
(10, 10)
(10, 20)
(30, 40)
>>> sim.run(cycles=5)
>>> for p in sim.particles: print p.getLoc()
...
[10.0, 13.940067328]
[10.0, 16.059932671999999]
[30, 40]
>>>
How does it work?
-----------------
Set up ParticleSystem by instantiating, specifying the laws to act between
particles and an (optional) set of initial particles.
Particles should be derived from the Particle base class (or have equivalent
functionality).
Particles can be added or removed from the system by reference, or removed by
their ID.
ParticleSystem will work for particles in space with any number of dimensions -
so long as all particles use the same!
Bonds between particles are up to the particles to manage for themselves.
The simulation runs in cycles when the run(...) method is called. Each cycle
advances the 'tick' count by 1. The tick count starts at zero, unless otherwise
specified during initialization.
The following attributes store the particles registered in ParticleSystem:
- particles -- simple list
- particleDict -- dictionary, indexed by particle.ID
ParticleSystem uses a SpatialIndexer object to speed up calculations.
SpatialIndexer reduce the search space when determining what particles lie
within a given region (radius of a point).
If your code changes the position of a particle, the simulator must be informed,
so it can update its spatial indexing data, by calling updateLoc(...)
The actual interactions between particles are calculated by the particles
themselves, *not* by ParticleSystem.
ParticleSystem calls the doInteractions(...) methods of all particles so they
can influence each other. It then calls the update(...) methods of all particles
so they can all update their positions and velocities ready for the next cycle.
This is a two stage process so that, in a given cycle, all particles see each
other at the same positions, irrespective of which particle's
doInteractions(...) method is called first. Particles should not apply their
velocities to update their position until their update(...) method is called.
"""
from SpatialIndexer import SpatialIndexer
class ParticleSystem(object):
"""\
ParticleSystem(laws[,initialParticles][,initialTick]) -> new ParticleSystem object
Discrete time simulator for a system of particles.
Keyword arguments:
- initialParticles -- list of particles (default=[])
- initialTick -- start value of the time 'tick' count (default=0)
"""
def __init__(self, laws, initialParticles = [], initialTick = 0):
"""x.__init__(...) initializes x; see x.__class__.__doc__ for signature"""
self.indexer = SpatialIndexer(laws.maxInteractRadius)
self.laws = laws
self.particles = []
self.tick = initialTick
self.particleDict = {}
self.add(*initialParticles)
def add(self, *newParticles):
"""Add the specified particle(s) into the system"""
self.particles.extend(newParticles)
for p in newParticles:
self.particleDict[p.ID] = p
self.indexer.updateLoc(*newParticles)
def remove(self, *oldParticles):
"""\
Remove the specified particle(s) from the system.
Note that this method does not destroy bonds from other particles to
these ones.
"""
for particle in oldParticles:
self.particles.remove(particle)
del self.particleDict[particle.ID]
self.indexer.remove(*oldParticles)
def removeByID(self, *ids):
"""\
Remove particle(s) as specified by id(s) from the system.
Note that this method does not destroy bonds from other particles to
these ones.
"""
particles = [self.particleDict[id] for id in ids]
self.remove( *particles )
def updateLoc(self, *particles):
"""\
Notify this physics system that the specified particle(s)
have changed position.
Must be called if you change a particle's position,
before calling run().
"""
self.indexer.updateLoc(*particles)
def withinRadius(self, centre, radius, filter=(lambda particle:True)):
"""\
withinRadius(centre,radius[,filter]) -> list of (particle,distSquared)
Returns a list of zero or more (particle,distSquared) tuples. The
particles listed are those within the specified radius of the specified
centre point, and that passed the (optional) filter function:
filter(particle) -> True if the particle is to be included in the list
"""
return self.indexer.withinRadius(centre, radius, filter)
def run(self, cycles = 1):
"""Run the simulation for a given number of cycles (default=1)"""
# optimisation to speed up access to these functions:
_indexer = self.indexer
_laws = self.laws
while cycles > 0:
cycles -= 1
self.tick += 1
_tick = self.tick
for p in self.particles:
p.doInteractions(_indexer, _laws, _tick)
for p in self.particles:
p.update(_laws)
_indexer.updateAll()
| 2.6875 | 3 |
libpysal/cg/shapes.py | Kanahiro/dbf-df-translator | 0 | 12798861 | <reponame>Kanahiro/dbf-df-translator
"""
Computational geometry code for PySAL: Python Spatial Analysis Library.
"""
__author__ = "<NAME>, <NAME>, <NAME>, <NAME>, <NAME>"
import math
from .sphere import arcdist
from typing import Union
__all__ = [
"Point",
"LineSegment",
"Line",
"Ray",
"Chain",
"Polygon",
"Rectangle",
"asShape",
]
def asShape(obj):
"""Returns a PySAL shape object from ``obj``, which
must support the ``__geo_interface__``.
Parameters
----------
obj : {libpysal.cg.{Point, LineSegment, Line, Ray, Chain, Polygon}
A geometric representation of an object.
Raises
------
TypeError
Raised when ``obj`` is not a supported shape.
NotImplementedError
Raised when ``geo_type`` is not a supported type.
Returns
-------
obj : {libpysal.cg.{Point, LineSegment, Line, Ray, Chain, Polygon}
A new geometric representation of the object.
"""
if isinstance(obj, (Point, LineSegment, Line, Ray, Chain, Polygon)):
pass
else:
if hasattr(obj, "__geo_interface__"):
geo = obj.__geo_interface__
else:
geo = obj
if hasattr(geo, "type"):
raise TypeError("%r does not appear to be a shape object." % (obj))
geo_type = geo["type"].lower()
# if geo_type.startswith('multi'):
# raise NotImplementedError, "%s are not supported at this time."%geo_type
if geo_type in _geoJSON_type_to_Pysal_type:
obj = _geoJSON_type_to_Pysal_type[geo_type].__from_geo_interface__(geo)
else:
raise NotImplementedError("%s is not supported at this time." % geo_type)
return obj
class Geometry(object):
"""A base class to help implement ``is_geometry``
and make geometric types extendable.
"""
def __init__(self):
pass
class Point(Geometry):
"""Geometric class for point objects.
Parameters
----------
loc : tuple
The point's location (number :math:`x`-tuple, :math:`x` > 1).
Examples
--------
>>> p = Point((1, 3))
"""
def __init__(self, loc):
self.__loc = tuple(map(float, loc))
@classmethod
def __from_geo_interface__(cls, geo):
return cls(geo["coordinates"])
@property
def __geo_interface__(self):
return {"type": "Point", "coordinates": self.__loc}
def __lt__(self, other) -> bool:
"""Tests if the point is less than another object.
Parameters
----------
other : libpysal.cg.Point
An object to test equality against.
Examples
--------
>>> Point((0, 1)) < Point((0, 1))
False
>>> Point((0, 1)) < Point((1, 1))
True
"""
return (self.__loc) < (other.__loc)
def __le__(self, other) -> bool:
"""Tests if the point is less than or equal to another object.
Parameters
----------
other : libpysal.cg.Point
An object to test equality against.
Examples
--------
>>> Point((0, 1)) <= Point((0, 1))
True
>>> Point((0, 1)) <= Point((1, 1))
True
"""
return (self.__loc) <= (other.__loc)
def __eq__(self, other) -> bool:
"""Tests if the point is equal to another object.
Parameters
----------
other : libpysal.cg.Point
An object to test equality against.
Examples
--------
>>> Point((0, 1)) == Point((0, 1))
True
>>> Point((0, 1)) == Point((1, 1))
False
"""
try:
return (self.__loc) == (other.__loc)
except AttributeError:
return False
def __ne__(self, other) -> bool:
"""Tests if the point is not equal to another object.
Parameters
----------
other : libpysal.cg.Point
An object to test equality against.
Examples
--------
>>> Point((0, 1)) != Point((0, 1))
False
>>> Point((0, 1)) != Point((1, 1))
True
"""
try:
return (self.__loc) != (other.__loc)
except AttributeError:
return True
def __gt__(self, other) -> bool:
"""Tests if the point is greater than another object.
Parameters
----------
other : libpysal.cg.Point
An object to test equality against.
Examples
--------
>>> Point((0, 1)) > Point((0, 1))
False
>>> Point((0, 1)) > Point((1, 1))
False
"""
return (self.__loc) > (other.__loc)
def __ge__(self, other) -> bool:
"""Tests if the point is greater than or equal to another object.
Parameters
----------
other : libpysal.cg.Point
An object to test equality against.
Examples
--------
>>> Point((0, 1)) >= Point((0, 1))
True
>>> Point((0, 1)) >= Point((1, 1))
False
"""
return (self.__loc) >= (other.__loc)
def __hash__(self) -> int:
"""Returns the hash of the point's location.
Examples
--------
>>> hash(Point((0, 1))) == hash(Point((0, 1)))
True
>>> hash(Point((0, 1))) == hash(Point((1, 1)))
False
"""
return hash(self.__loc)
def __getitem__(self, *args) -> Union[int, float]:
"""Return the coordinate for the given dimension.
Parameters
----------
*args : tuple
A singleton tuple of :math:`(i)` with :math:`i`
as the index of the desired dimension.
Examples
--------
>>> p = Point((5.5, 4.3))
>>> p[0] == 5.5
True
>>> p[1] == 4.3
True
"""
return self.__loc.__getitem__(*args)
def __getslice__(self, *args) -> slice:
"""Return the coordinates for the given dimensions.
Parameters
----------
*args : tuple
A tuple of :math:`(i,j)` with :math:`i` as the index to the start
slice and :math:`j` as the index to end the slice (excluded).
Examples
--------
>>> p = Point((3, 6, 2))
>>> p[:2] == (3, 6)
True
>>> p[1:2] == (6,)
True
"""
return self.__loc.__getslice__(*args)
def __len__(self) -> int:
""" Returns the dimensions of the point.
Examples
--------
>>> len(Point((1, 2)))
2
"""
return len(self.__loc)
def __repr__(self) -> str:
"""Returns the string representation of the ``Point``.
Examples
--------
>>> Point((0, 1))
(0.0, 1.0)
"""
return str(self)
def __str__(self) -> str:
"""Returns a string representation of a ``Point`` object.
Examples
--------
>>> p = Point((1, 3))
>>> str(p)
'(1.0, 3.0)'
"""
return str(self.__loc)
# return "POINT ({} {})".format(*self.__loc)
class LineSegment(Geometry):
"""Geometric representation of line segment objects.
Parameters
----------
start_pt : libpysal.cg.Point
The point where the segment begins.
end_pt : libpysal.cg.Point
The point where the segment ends.
Attributes
----------
p1 : libpysal.cg.Point
The starting point of the line segment.
p2 : Point
The ending point of the line segment.
bounding_box : libpysal.cg.Rectangle
The bounding box of the segment.
len : float
The length of the segment.
line : libpysal.cg.Line
The line on which the segment lies.
Examples
--------
>>> ls = LineSegment(Point((1, 2)), Point((5, 6)))
"""
def __init__(self, start_pt, end_pt):
self._p1 = start_pt
self._p2 = end_pt
self._reset_props()
def __str__(self):
return "LineSegment(" + str(self._p1) + ", " + str(self._p2) + ")"
# return "LINESTRING ({} {}, {} {})".format(
# self._p1[0], self._p1[1], self._p2[0], self._p2[1]
# )
def __eq__(self, other) -> bool:
"""Returns ``True`` if ``self`` and ``other`` are the same line segment.
Examples
--------
>>> l1 = LineSegment(Point((1, 2)), Point((5, 6)))
>>> l2 = LineSegment(Point((5, 6)), Point((1, 2)))
>>> l1 == l2
True
>>> l2 == l1
True
"""
eq = False
if not isinstance(other, self.__class__):
pass
else:
if other.p1 == self._p1 and other.p2 == self._p2:
eq = True
elif other.p2 == self._p1 and other.p1 == self._p2:
eq = True
return eq
def intersect(self, other) -> bool:
"""Test whether segment intersects with other segment (``True``) or
not (``False``). Handles endpoints of segments being on other segment.
Parameters
----------
other : libpysal.cg.LineSegment
Another line segment to check against.
Examples
--------
>>> ls = LineSegment(Point((5, 0)), Point((10, 0)))
>>> ls1 = LineSegment(Point((5, 0)), Point((10, 1)))
>>> ls.intersect(ls1)
True
>>> ls2 = LineSegment(Point((5, 1)), Point((10, 1)))
>>> ls.intersect(ls2)
False
>>> ls2 = LineSegment(Point((7, -1)), Point((7, 2)))
>>> ls.intersect(ls2)
True
"""
ccw1 = self.sw_ccw(other.p2)
ccw2 = self.sw_ccw(other.p1)
ccw3 = other.sw_ccw(self.p1)
ccw4 = other.sw_ccw(self.p2)
intersects = ccw1 * ccw2 <= 0 and ccw3 * ccw4 <= 0
return intersects
def _reset_props(self):
"""**HELPER METHOD. DO NOT CALL.**
Resets attributes which are functions of other attributes.
The getters for these attributes (implemented as properties)
then recompute their values if they have been reset since
the last call to the getter.
Examples
--------
>>> ls = LineSegment(Point((1, 2)), Point((5, 6)))
>>> ls._reset_props()
"""
self._bounding_box = None
self._len = None
self._line = False
def _get_p1(self):
"""**HELPER METHOD. DO NOT CALL.**
Returns the ``p1`` attribute of the line segment.
Returns
-------
self._p1 : libpysal.cg.Point
The ``_p1`` attribute.
Examples
--------
>>> ls = LineSegment(Point((1, 2)), Point((5, 6)))
>>> r = ls._get_p1()
>>> r == Point((1, 2))
True
"""
return self._p1
def _set_p1(self, p1):
"""**HELPER METHOD. DO NOT CALL.**
Sets the ``p1`` attribute of the line segment.
Parameters
----------
p1 : libpysal.cg.Point
A point.
Returns
-------
self._p1 : libpysal.cg.Point
The reset ``p1`` attribute.
Examples
--------
>>> ls = LineSegment(Point((1, 2)), Point((5, 6)))
>>> r = ls._set_p1(Point((3, -1)))
>>> r == Point((3.0, -1.0))
True
"""
self._p1 = p1
self._reset_props()
return self._p1
p1 = property(_get_p1, _set_p1)
def _get_p2(self):
"""**HELPER METHOD. DO NOT CALL.**
Returns the ``p2`` attribute of the line segment.
Returns
-------
self._p2 : libpysal.cg.Point
The ``_p2`` attribute.
Examples
--------
>>> ls = LineSegment(Point((1, 2)), Point((5, 6)))
>>> r = ls._get_p2()
>>> r == Point((5, 6))
True
"""
return self._p2
def _set_p2(self, p2):
"""**HELPER METHOD. DO NOT CALL.**
Sets the ``p2`` attribute of the line segment.
Parameters
----------
p2 : libpysal.cg.Point
A point.
Returns
-------
self._p2 : libpysal.cg.Point
The reset ``p2`` attribute.
Examples
--------
>>> ls = LineSegment(Point((1, 2)), Point((5, 6)))
>>> r = ls._set_p2(Point((3, -1)))
>>> r == Point((3.0, -1.0))
True
"""
self._p2 = p2
self._reset_props()
return self._p2
p2 = property(_get_p2, _set_p2)
def is_ccw(self, pt) -> bool:
"""Returns whether a point is counterclockwise of the
segment (``True``) or not (``False``). Exclusive.
Parameters
----------
pt : libpysal.cg.Point
A point lying ccw or cw of a segment.
Examples
--------
>>> ls = LineSegment(Point((0, 0)), Point((5, 0)))
>>> ls.is_ccw(Point((2, 2)))
True
>>> ls.is_ccw(Point((2, -2)))
False
"""
v1 = (self._p2[0] - self._p1[0], self._p2[1] - self._p1[1])
v2 = (pt[0] - self._p1[0], pt[1] - self._p1[1])
return v1[0] * v2[1] - v1[1] * v2[0] > 0
def is_cw(self, pt) -> bool:
"""Returns whether a point is clockwise of the
segment (``True``) or not (``False``). Exclusive.
Parameters
----------
pt : libpysal.cg.Point
A point lying ccw or cw of a segment.
Examples
--------
>>> ls = LineSegment(Point((0, 0)), Point((5, 0)))
>>> ls.is_cw(Point((2, 2)))
False
>>> ls.is_cw(Point((2, -2)))
True
"""
v1 = (self._p2[0] - self._p1[0], self._p2[1] - self._p1[1])
v2 = (pt[0] - self._p1[0], pt[1] - self._p1[1])
return v1[0] * v2[1] - v1[1] * v2[0] < 0
def sw_ccw(self, pt):
"""Sedgewick test for ``pt`` being ccw of segment.
Returns
-------
is_ccw : bool
``1`` if turn from ``self.p1`` to ``self.p2`` to ``pt`` is ccw.
``-1`` if turn from ``self.p1`` to ``self.p2`` to ``pt`` is cw.
``-1`` if the points are collinear and ``self.p1`` is in the middle.
``1`` if the points are collinear and ``self.p2`` is in the middle.
``0`` if the points are collinear and ``pt`` is in the middle.
"""
p0 = self.p1
p1 = self.p2
p2 = pt
dx1 = p1[0] - p0[0]
dy1 = p1[1] - p0[1]
dx2 = p2[0] - p0[0]
dy2 = p2[1] - p0[1]
if dy1 * dx2 < dy2 * dx1:
is_ccw = 1
elif dy1 * dx2 > dy2 * dx1:
is_ccw = -1
elif dx1 * dx2 < 0 or dy1 * dy2 < 0:
is_ccw = -1
elif dx1 * dx1 + dy1 * dy1 >= dx2 * dx2 + dy2 * dy2:
is_ccw = 0
else:
is_ccw = 1
return is_ccw
def get_swap(self):
"""Returns a ``LineSegment`` object which has its endpoints swapped.
Returns
-------
line_seg : libpysal.cg.LineSegment
The ``LineSegment`` object which has its endpoints swapped.
Examples
--------
>>> ls = LineSegment(Point((1, 2)), Point((5, 6)))
>>> swap = ls.get_swap()
>>> swap.p1[0]
5.0
>>> swap.p1[1]
6.0
>>> swap.p2[0]
1.0
>>> swap.p2[1]
2.0
"""
line_seg = LineSegment(self._p2, self._p1)
return line_seg
@property
def bounding_box(self):
"""Returns the minimum bounding box of a ``LineSegment`` object.
Returns
-------
self._bounding_box : libpysal.cg.Rectangle
The bounding box of the line segment.
Examples
--------
>>> ls = LineSegment(Point((1, 2)), Point((5, 6)))
>>> ls.bounding_box.left
1.0
>>> ls.bounding_box.lower
2.0
>>> ls.bounding_box.right
5.0
>>> ls.bounding_box.upper
6.0
"""
# If LineSegment attributes p1, p2 changed, recompute
if self._bounding_box is None:
self._bounding_box = Rectangle(
min([self._p1[0], self._p2[0]]),
min([self._p1[1], self._p2[1]]),
max([self._p1[0], self._p2[0]]),
max([self._p1[1], self._p2[1]]),
)
return Rectangle(
self._bounding_box.left,
self._bounding_box.lower,
self._bounding_box.right,
self._bounding_box.upper,
)
@property
def len(self) -> float:
"""Returns the length of a ``LineSegment`` object.
Examples
--------
>>> ls = LineSegment(Point((2, 2)), Point((5, 2)))
>>> ls.len
3.0
"""
# If LineSegment attributes p1, p2 changed, recompute
if self._len is None:
self._len = math.hypot(self._p1[0] - self._p2[0], self._p1[1] - self._p2[1])
return self._len
@property
def line(self):
"""Returns a ``Line`` object of the line on which the segment lies.
Returns
-------
self._line : libpysal.cg.Line
The ``Line`` object of the line on which the segment lies.
Examples
--------
>>> ls = LineSegment(Point((2, 2)), Point((3, 3)))
>>> l = ls.line
>>> l.m
1.0
>>> l.b
0.0
"""
if self._line == False:
dx = self._p1[0] - self._p2[0]
dy = self._p1[1] - self._p2[1]
if dx == 0 and dy == 0:
self._line = None
elif dx == 0:
self._line = VerticalLine(self._p1[0])
else:
m = dy / float(dx)
# y - mx
b = self._p1[1] - m * self._p1[0]
self._line = Line(m, b)
return self._line
class VerticalLine(Geometry):
"""Geometric representation of verticle line objects.
Parameters
----------
x : {int, float}
The :math:`x`-intercept of the line. ``x`` is also an attribute.
Examples
--------
>>> ls = VerticalLine(0)
>>> ls.m
inf
>>> ls.b
nan
"""
def __init__(self, x):
self._x = float(x)
self.m = float("inf")
self.b = float("nan")
def x(self, y) -> float:
"""Returns the :math:`x`-value of the line at a particular :math:`y`-value.
Parameters
----------
y : {int, float}
The :math:`y`-value at which to compute :math:`x`.
Examples
--------
>>> l = VerticalLine(0)
>>> l.x(0.25)
0.0
"""
return self._x
def y(self, x) -> float:
"""Returns the :math:`y`-value of the line at a particular :math:`x`-value.
Parameters
----------
x : {int, float}
The :math:`x`-value at which to compute :math:`y`.
Examples
--------
>>> l = VerticalLine(1)
>>> l.y(1)
nan
"""
return float("nan")
class Line(Geometry):
"""Geometric representation of line objects.
Parameters
----------
m : {int, float}
The slope of the line. ``m`` is also an attribute.
b : {int, float}
The :math:`y`-intercept of the line. ``b`` is also an attribute.
Raises
------
ArithmeticError
Raised when infinity is passed in as the slope.
Examples
--------
>>> ls = Line(1, 0)
>>> ls.m
1.0
>>> ls.b
0.0
"""
def __init__(self, m, b):
if m == float("inf"):
raise ArithmeticError("Slope cannot be infinite.")
self.m = float(m)
self.b = float(b)
def x(self, y: Union[int, float]) -> float:
"""Returns the :math:`x`-value of the line at a particular :math:`y`-value.
Parameters
----------
y : {int, float}
The :math:`y`-value at which to compute :math:`x`.
Raises
------
ArithmeticError
Raised when ``0.`` is passed in as the slope.
Examples
--------
>>> l = Line(0.5, 0)
>>> l.x(0.25)
0.5
"""
if self.m == 0:
raise ArithmeticError("Cannot solve for 'x' when slope is zero.")
return (y - self.b) / self.m
def y(self, x: Union[int, float]) -> float:
"""Returns the :math:`y`-value of the line at a particular :math:`x`-value.
Parameters
----------
x : {int, float}
The :math:`x`-value at which to compute :math:`y`.
Examples
--------
>>> l = Line(1, 0)
>>> l.y(1)
1.0
"""
if self.m == 0:
return self.b
return self.m * x + self.b
class Ray:
"""Geometric representation of ray objects.
Parameters
----------
origin : libpysal.cg.Point
The point where the ray originates.
second_p :
The second point specifying the ray (not ``origin``.)
Attributes
----------
o : libpysal.cg.Point
The origin (point where ray originates). See ``origin``.
p : libpysal.cg.Point
The second point on the ray (not the point where the
ray originates). See ``second_p``.
Examples
--------
>>> l = Ray(Point((0, 0)), Point((1, 0)))
>>> str(l.o)
'(0.0, 0.0)'
>>> str(l.p)
'(1.0, 0.0)'
"""
def __init__(self, origin, second_p):
self.o = origin
self.p = second_p
class Chain(Geometry):
"""Geometric representation of a chain, also known as a polyline.
Parameters
----------
vertices : list
A point list or list of point lists.
Attributes
----------
vertices : list
The list of points of the vertices of the chain in order.
len : float
The geometric length of the chain.
Examples
--------
>>> c = Chain([Point((0, 0)), Point((1, 0)), Point((1, 1)), Point((2, 1))])
"""
def __init__(self, vertices: list):
if isinstance(vertices[0], list):
self._vertices = [part for part in vertices]
else:
self._vertices = [vertices]
self._reset_props()
@classmethod
def __from_geo_interface__(cls, geo: dict):
if geo["type"].lower() == "linestring":
verts = [Point(pt) for pt in geo["coordinates"]]
elif geo["type"].lower() == "multilinestring":
verts = [list(map(Point, part)) for part in geo["coordinates"]]
else:
raise TypeError("%r is not a Chain." % geo)
return cls(verts)
@property
def __geo_interface__(self) -> dict:
if len(self.parts) == 1:
return {"type": "LineString", "coordinates": self.vertices}
else:
return {"type": "MultiLineString", "coordinates": self.parts}
def _reset_props(self):
"""**HELPER METHOD. DO NOT CALL.** Resets attributes which are
functions of other attributes. The ``getter``s for these attributes
(implemented as ``properties``) then recompute their values if they
have been reset since the last call to the ``getter``.
"""
self._len = None
self._arclen = None
self._bounding_box = None
@property
def vertices(self) -> list:
"""Returns the vertices of the chain in clockwise order.
Examples
--------
>>> c = Chain([Point((0, 0)), Point((1, 0)), Point((1, 1)), Point((2, 1))])
>>> verts = c.vertices
>>> len(verts)
4
"""
return sum([part for part in self._vertices], [])
@property
def parts(self) -> list:
"""Returns the parts (lists of ``libpysal.cg.Point`` objects) of the chain.
Examples
--------
>>> c = Chain(
... [
... [Point((0, 0)), Point((1, 0)), Point((1, 1)), Point((0, 1))],
... [Point((2, 1)), Point((2, 2)), Point((1, 2)), Point((1, 1))]
... ]
... )
>>> len(c.parts)
2
"""
return [[v for v in part] for part in self._vertices]
@property
def bounding_box(self):
"""Returns the bounding box of the chain.
Returns
-------
self._bounding_box : libpysal.cg.Rectangle
The bounding box of the chain.
Examples
--------
>>> c = Chain([Point((0, 0)), Point((2, 0)), Point((2, 1)), Point((0, 1))])
>>> c.bounding_box.left
0.0
>>> c.bounding_box.lower
0.0
>>> c.bounding_box.right
2.0
>>> c.bounding_box.upper
1.0
"""
if self._bounding_box is None:
vertices = self.vertices
self._bounding_box = Rectangle(
min([v[0] for v in vertices]),
min([v[1] for v in vertices]),
max([v[0] for v in vertices]),
max([v[1] for v in vertices]),
)
return self._bounding_box
@property
def len(self) -> int:
"""Returns the geometric length of the chain.
Examples
--------
>>> c = Chain([Point((0, 0)), Point((1, 0)), Point((1, 1)), Point((2, 1))])
>>> c.len
3.0
>>> c = Chain(
... [
... [Point((0, 0)), Point((1, 0)), Point((1, 1))],
... [Point((10, 10)), Point((11, 10)), Point((11, 11))]
... ]
... )
>>> c.len
4.0
"""
def dist(v1: tuple, v2: tuple) -> Union[int, float]:
return math.hypot(v1[0] - v2[0], v1[1] - v2[1])
def part_perimeter(p: list) -> Union[int, float]:
return sum([dist(p[i], p[i + 1]) for i in range(len(p) - 1)])
if self._len is None:
self._len = sum([part_perimeter(part) for part in self._vertices])
return self._len
@property
def arclen(self) -> Union[int, float]:
"""Returns the geometric length of the chain
computed using 'arcdistance' (meters).
"""
def part_perimeter(p: list) -> Union[int, float]:
return sum([arcdist(p[i], p[i + 1]) * 1000.0 for i in range(len(p) - 1)])
if self._arclen is None:
self._arclen = sum([part_perimeter(part) for part in self._vertices])
return self._arclen
@property
def segments(self) -> list:
"""Returns the segments that compose the chain."""
return [
[LineSegment(a, b) for (a, b) in zip(part[:-1], part[1:])]
for part in self._vertices
]
class Ring(Geometry):
"""Geometric representation of a linear ring. Linear rings must be
closed, the first and last point must be the same. Open rings will
be closed. This class exists primarily as a geometric primitive to
form complex polygons with multiple rings and holes. The ordering
of the vertices is ignored and will not be altered.
Parameters
----------
vertices : list
A list of vertices.
Attributes
----------
vertices : list
A list of points with the vertices of the ring.
len : int
The number of vertices.
perimeter : float
The geometric length of the perimeter of the ring.
bounding_box : libpysal.cg.Rectangle
The bounding box of the ring.
area : float
The area enclosed by the ring.
centroid : {tuple, libpysal.cg.Point}
The centroid of the ring defined by the 'center of gravity'
or 'center or mass'.
_quad_tree_structure : libpysal.cg.QuadTreeStructureSingleRing
The quad tree structure for the ring. This structure helps
test if a point is inside the ring.
"""
def __init__(self, vertices):
if vertices[0] != vertices[-1]:
vertices = vertices[:] + vertices[0:1]
# msg = "Supplied vertices do not form a closed ring, "
# msg += "the first and last vertices are not the same."
# raise ValueError(msg)
self.vertices = tuple(vertices)
self._perimeter = None
self._bounding_box = None
self._area = None
self._centroid = None
self._quad_tree_structure = None
def __len__(self) -> int:
return len(self.vertices)
@property
def len(self) -> int:
return len(self)
@staticmethod
def dist(v1, v2) -> Union[int, float]:
return math.hypot(v1[0] - v2[0], v1[1] - v2[1])
@property
def perimeter(self) -> Union[int, float]:
if self._perimeter is None:
dist = self.dist
v = self.vertices
self._perimeter = sum(
[dist(v[i], v[i + 1]) for i in range(-1, len(self) - 1)]
)
return self._perimeter
@property
def bounding_box(self):
"""Returns the bounding box of the ring.
Returns
-------
self._bounding_box : libpysal.cg.Rectangle
The bounding box of the ring.
Examples
--------
>>> r = Ring(
... [
... Point((0, 0)),
... Point((2, 0)),
... Point((2, 1)),
... Point((0, 1)),
... Point((0, 0))
... ]
... )
>>> r.bounding_box.left
0.0
>>> r.bounding_box.lower
0.0
>>> r.bounding_box.right
2.0
>>> r.bounding_box.upper
1.0
"""
if self._bounding_box is None:
vertices = self.vertices
x = [v[0] for v in vertices]
y = [v[1] for v in vertices]
self._bounding_box = Rectangle(min(x), min(y), max(x), max(y))
return self._bounding_box
@property
def area(self) -> Union[int, float]:
"""Returns the area of the ring.
Examples
--------
>>> r = Ring(
... [
... Point((0, 0)),
... Point((2, 0)),
... Point((2, 1)),
... Point((0, 1)),
... Point((0, 0))
... ]
... )
>>> r.area
2.0
"""
return abs(self.signed_area)
@property
def signed_area(self) -> Union[int, float]:
if self._area is None:
vertices = self.vertices
x = [v[0] for v in vertices]
y = [v[1] for v in vertices]
N = len(self)
A = 0.0
for i in range(N - 1):
A += (x[i] + x[i + 1]) * (y[i] - y[i + 1])
A = A * 0.5
self._area = -A
return self._area
@property
def centroid(self):
"""Returns the centroid of the ring.
Returns
-------
self._centroid : libpysal.cg.Point
The ring's centroid.
Notes
-----
The centroid returned by this method is the geometric centroid.
Also known as the 'center of gravity' or 'center of mass'.
Examples
--------
>>> r = Ring(
... [
... Point((0, 0)),
... Point((2, 0)),
... Point((2, 1)),
... Point((0, 1)),
... Point((0, 0))
... ]
... )
>>> str(r.centroid)
'(1.0, 0.5)'
"""
if self._centroid is None:
vertices = self.vertices
x = [v[0] for v in vertices]
y = [v[1] for v in vertices]
A = self.signed_area
N = len(self)
cx = 0
cy = 0
for i in range(N - 1):
f = x[i] * y[i + 1] - x[i + 1] * y[i]
cx += (x[i] + x[i + 1]) * f
cy += (y[i] + y[i + 1]) * f
cx = 1.0 / (6 * A) * cx
cy = 1.0 / (6 * A) * cy
self._centroid = Point((cx, cy))
return self._centroid
def build_quad_tree_structure(self):
"""Build the quad tree structure for this polygon. Once
the structure is built, speed for testing if a point is
inside the ring will be increased significantly.
"""
self._quad_tree_structure = QuadTreeStructureSingleRing(self)
def contains_point(self, point):
"""Point containment using winding number. The implementation is based on
`this <http://www.engr.colostate.edu/~dga/dga/papers/point_in_polygon.pdf>`_.
Parameters
----------
point : libpysal.cg.Point
The point to test for containment.
Returns
-------
point_contained : bool
``True`` if ``point`` is contained within the polygon, otherwise ``False``.
"""
point_contained = False
if self._quad_tree_structure is None:
x, y = point
# bbox checks
bbleft = x < self.bounding_box.left
bbright = x > self.bounding_box.right
bblower = y < self.bounding_box.lower
bbupper = y > self.bounding_box.upper
if bbleft or bbright or bblower or bbupper:
pass
else:
rn = len(self.vertices)
xs = [self.vertices[i][0] - point[0] for i in range(rn)]
ys = [self.vertices[i][1] - point[1] for i in range(rn)]
w = 0
for i in range(len(self.vertices) - 1):
yi = ys[i]
yj = ys[i + 1]
xi = xs[i]
xj = xs[i + 1]
if yi * yj < 0:
r = xi + yi * (xj - xi) / (yi - yj)
if r > 0:
if yi < 0:
w += 1
else:
w -= 1
elif yi == 0 and xi > 0:
if yj > 0:
w += 0.5
else:
w -= 0.5
elif yj == 0 and xj > 0:
if yi < 0:
w += 0.5
else:
w -= 0.5
if w == 0:
pass
else:
point_contained = True
else:
point_contained = self._quad_tree_structure.contains_point(point)
return point_contained
class Polygon(Geometry):
"""Geometric representation of polygon objects.
Returns a polygon created from the objects specified.
Parameters
----------
vertices : list
A list of vertices or a list of lists of vertices.
holes : list
A list of sub-polygons to be considered as holes.
Default is ``None``.
Attributes
----------
vertices : list
A list of points with the vertices of the polygon in clockwise order.
len : int
The number of vertices including holes.
perimeter : float
The geometric length of the perimeter of the polygon.
bounding_box : libpysal.cg.Rectangle
The bounding box of the polygon.
bbox : list
A list representation of the bounding box in the
form ``[left, lower, right, upper]``.
area : float
The area enclosed by the polygon.
centroid : tuple
The 'center of gravity', i.e. the mean point of the polygon.
Examples
--------
>>> p1 = Polygon([Point((0, 0)), Point((1, 0)), Point((1, 1)), Point((0, 1))])
"""
def __init__(self, vertices, holes=None):
self._part_rings = []
self._hole_rings = []
def clockwise(part: list) -> list:
if standalone.is_clockwise(part):
return part[:]
else:
return part[::-1]
vl = list(vertices)
if isinstance(vl[0], list):
self._part_rings = list(map(Ring, vertices))
self._vertices = [clockwise(part) for part in vertices]
else:
self._part_rings = [Ring(vertices)]
self._vertices = [clockwise(vertices)]
if holes is not None and holes != []:
if isinstance(holes[0], list):
self._hole_rings = list(map(Ring, holes))
self._holes = [clockwise(hole) for hole in holes]
else:
self._hole_rings = [Ring(holes)]
self._holes = [clockwise(holes)]
else:
self._holes = [[]]
self._reset_props()
@classmethod
def __from_geo_interface__(cls, geo: dict):
"""While PySAL does not differentiate polygons and multipolygons
GEOS, Shapely, and geoJSON do. In GEOS, etc, polygons may only
have a single exterior ring, all other parts are holes.
MultiPolygons are simply a list of polygons.
"""
geo_type = geo["type"].lower()
if geo_type == "multipolygon":
parts = []
holes = []
for polygon in geo["coordinates"]:
verts = [[Point(pt) for pt in part] for part in polygon]
parts += verts[0:1]
holes += verts[1:]
if not holes:
holes = None
return cls(parts, holes)
else:
verts = [[Point(pt) for pt in part] for part in geo["coordinates"]]
return cls(verts[0:1], verts[1:])
@property
def __geo_interface__(self) -> dict:
"""Return ``__geo_interface__`` information lookup."""
if len(self.parts) > 1:
geo = {
"type": "MultiPolygon",
"coordinates": [[part] for part in self.parts],
}
if self._holes[0]:
geo["coordinates"][0] += self._holes
return geo
if self._holes[0]:
return {"type": "Polygon", "coordinates": self._vertices + self._holes}
else:
return {"type": "Polygon", "coordinates": self._vertices}
def _reset_props(self):
"""Resets the geometric properties of the polygon."""
self._perimeter = None
self._bounding_box = None
self._bbox = None
self._area = None
self._centroid = None
self._len = None
def __len__(self) -> int:
return self.len
@property
def len(self) -> int:
"""Returns the number of vertices in the polygon.
Examples
--------
>>> p1 = Polygon([Point((0, 0)), Point((0, 1)), Point((1, 1)), Point((1, 0))])
>>> p1.len
4
>>> len(p1)
4
"""
if self._len is None:
self._len = len(self.vertices)
return self._len
@property
def vertices(self) -> list:
"""Returns the vertices of the polygon in clockwise order.
Examples
--------
>>> p1 = Polygon([Point((0, 0)), Point((0, 1)), Point((1, 1)), Point((1, 0))])
>>> len(p1.vertices)
4
"""
return sum([part for part in self._vertices], []) + sum(
[part for part in self._holes], []
)
@property
def holes(self) -> list:
"""Returns the holes of the polygon in clockwise order.
Examples
--------
>>> p = Polygon(
... [Point((0, 0)), Point((10, 0)), Point((10, 10)), Point((0, 10))],
... [Point((1, 2)), Point((2, 2)), Point((2, 1)), Point((1, 1))]
... )
>>> len(p.holes)
1
"""
return [[v for v in part] for part in self._holes]
@property
def parts(self) -> list:
"""Returns the parts of the polygon in clockwise order.
Examples
--------
>>> p = Polygon(
... [
... [Point((0, 0)), Point((1, 0)), Point((1, 1)), Point((0, 1))],
... [Point((2, 1)), Point((2, 2)), Point((1, 2)), Point((1, 1))]
... ]
... )
>>> len(p.parts)
2
"""
return [[v for v in part] for part in self._vertices]
@property
def perimeter(self) -> Union[int, float]:
"""Returns the perimeter of the polygon.
Examples
--------
>>> p = Polygon([Point((0, 0)), Point((1, 0)), Point((1, 1)), Point((0, 1))])
>>> p.perimeter
4.0
"""
def dist(v1: Union[int, float], v2: Union[int, float]) -> float:
return math.hypot(v1[0] - v2[0], v1[1] - v2[1])
def part_perimeter(part) -> Union[int, float]:
return sum([dist(part[i], part[i + 1]) for i in range(-1, len(part) - 1)])
sum_perim = lambda part_type: sum([part_perimeter(part) for part in part_type])
if self._perimeter is None:
self._perimeter = sum_perim(self._vertices) + sum_perim(self._holes)
return self._perimeter
@property
def bbox(self):
"""Returns the bounding box of the polygon as a list.
Returns
-------
self._bbox : list
The bounding box of the polygon as a list.
See Also
--------
libpysal.cg.bounding_box
"""
if self._bbox is None:
self._bbox = [
self.bounding_box.left,
self.bounding_box.lower,
self.bounding_box.right,
self.bounding_box.upper,
]
return self._bbox
@property
def bounding_box(self):
"""Returns the bounding box of the polygon.
Returns
-------
self._bounding_box : libpysal.cg.Rectangle
The bounding box of the polygon.
Examples
--------
>>> p = Polygon([Point((0, 0)), Point((2, 0)), Point((2, 1)), Point((0, 1))])
>>> p.bounding_box.left
0.0
>>> p.bounding_box.lower
0.0
>>> p.bounding_box.right
2.0
>>> p.bounding_box.upper
1.0
"""
if self._bounding_box is None:
vertices = self.vertices
self._bounding_box = Rectangle(
min([v[0] for v in vertices]),
min([v[1] for v in vertices]),
max([v[0] for v in vertices]),
max([v[1] for v in vertices]),
)
return self._bounding_box
@property
def area(self) -> float:
"""Returns the area of the polygon.
Examples
--------
>>> p = Polygon([Point((0, 0)), Point((1, 0)), Point((1, 1)), Point((0, 1))])
>>> p.area
1.0
>>> p = Polygon(
... [Point((0, 0)), Point((10, 0)), Point((10, 10)), Point((0, 10))],
... [Point((2, 1)), Point((2, 2)), Point((1, 2)), Point((1, 1))]
... )
>>> p.area
99.0
"""
def part_area(pv: list) -> float:
__area = 0
for i in range(-1, len(pv) - 1):
__area += (pv[i][0] + pv[i + 1][0]) * (pv[i][1] - pv[i + 1][1])
__area = __area * 0.5
if __area < 0:
__area = -area
return __area
sum_area = lambda part_type: sum([part_area(part) for part in part_type])
_area = sum_area(self._vertices) - sum_area(self._holes)
return _area
@property
def centroid(self) -> tuple:
"""Returns the centroid of the polygon.
Notes
-----
The centroid returned by this method is the geometric
centroid and respects multipart polygons with holes.
Also known as the 'center of gravity' or 'center of mass'.
Examples
--------
>>> p = Polygon(
... [Point((0, 0)), Point((10, 0)), Point((10, 10)), Point((0, 10))],
... [Point((1, 1)), Point((1, 2)), Point((2, 2)), Point((2, 1))]
... )
>>> p.centroid
(5.0353535353535355, 5.0353535353535355)
"""
CP = [ring.centroid for ring in self._part_rings]
AP = [ring.area for ring in self._part_rings]
CH = [ring.centroid for ring in self._hole_rings]
AH = [-ring.area for ring in self._hole_rings]
A = AP + AH
cx = sum([pt[0] * area for pt, area in zip(CP + CH, A)]) / sum(A)
cy = sum([pt[1] * area for pt, area in zip(CP + CH, A)]) / sum(A)
return cx, cy
def build_quad_tree_structure(self):
"""Build the quad tree structure for this polygon. Once
the structure is built, speed for testing if a point is
inside the ring will be increased significantly.
"""
for ring in self._part_rings:
ring.build_quad_tree_structure()
for ring in self._hole_rings:
ring.build_quad_tree_structure()
self.is_quad_tree_structure_built = True
def contains_point(self, point):
"""Test if a polygon contains a point.
Parameters
----------
point : libpysal.cg.Point
A point to test for containment.
Returns
-------
contains : bool
``True`` if the polygon contains ``point`` otherwise ``False``.
Examples
--------
>>> p = Polygon(
... [Point((0,0)), Point((4,0)), Point((4,5)), Point((2,3)), Point((0,5))]
... )
>>> p.contains_point((3,3))
1
>>> p.contains_point((0,6))
0
>>> p.contains_point((2,2.9))
1
>>> p.contains_point((4,5))
0
>>> p.contains_point((4,0))
0
Handles holes.
>>> p = Polygon(
... [Point((0, 0)), Point((0, 10)), Point((10, 10)), Point((10, 0))],
... [Point((2, 2)), Point((4, 2)), Point((4, 4)), Point((2, 4))]
... )
>>> p.contains_point((3.0, 3.0))
False
>>> p.contains_point((1.0, 1.0))
True
Notes
-----
Points falling exactly on polygon edges may yield unpredictable results.
"""
searching = True
for ring in self._hole_rings:
if ring.contains_point(point):
contains = False
searching = False
break
if searching:
for ring in self._part_rings:
if ring.contains_point(point):
contains = True
searching = False
break
if searching:
contains = False
return contains
class Rectangle(Geometry):
"""Geometric representation of rectangle objects.
Attributes
----------
left : float
Minimum x-value of the rectangle.
lower : float
Minimum y-value of the rectangle.
right : float
Maximum x-value of the rectangle.
upper : float
Maximum y-value of the rectangle.
Examples
--------
>>> r = Rectangle(-4, 3, 10, 17)
>>> r.left #minx
-4.0
>>> r.lower #miny
3.0
>>> r.right #maxx
10.0
>>> r.upper #maxy
17.0
"""
def __init__(self, left, lower, right, upper):
if right < left or upper < lower:
raise ArithmeticError("Rectangle must have positive area.")
self.left = float(left)
self.lower = float(lower)
self.right = float(right)
self.upper = float(upper)
def __bool__(self):
"""Rectangles will evaluate to False if they have zero area.
``___nonzero__`` is used "to implement truth value
testing and the built-in operation ``bool()``"
``-- http://docs.python.org/reference/datamodel.html
Examples
--------
>>> r = Rectangle(0, 0, 0, 0)
>>> bool(r)
False
>>> r = Rectangle(0, 0, 1, 1)
>>> bool(r)
True
"""
return bool(self.area)
def __eq__(self, other):
if other:
return self[:] == other[:]
return False
def __add__(self, other):
x, y, X, Y = self[:]
x1, y2, X1, Y1 = other[:]
return Rectangle(
min(self.left, other.left),
min(self.lower, other.lower),
max(self.right, other.right),
max(self.upper, other.upper),
)
def __getitem__(self, key):
"""
Examples
--------
>>> r = Rectangle(-4, 3, 10, 17)
>>> r[:]
[-4.0, 3.0, 10.0, 17.0]
"""
l = [self.left, self.lower, self.right, self.upper]
return l.__getitem__(key)
def set_centroid(self, new_center):
"""Moves the rectangle center to a new specified point.
Parameters
----------
new_center : libpysal.cg.Point
The new location of the centroid of the polygon.
Examples
--------
>>> r = Rectangle(0, 0, 4, 4)
>>> r.set_centroid(Point((4, 4)))
>>> r.left
2.0
>>> r.right
6.0
>>> r.lower
2.0
>>> r.upper
6.0
"""
shift = (
new_center[0] - (self.left + self.right) / 2,
new_center[1] - (self.lower + self.upper) / 2,
)
self.left = self.left + shift[0]
self.right = self.right + shift[0]
self.lower = self.lower + shift[1]
self.upper = self.upper + shift[1]
def set_scale(self, scale):
"""Rescales the rectangle around its center.
Parameters
----------
scale : int, float
The ratio of the new scale to the old
scale (e.g. 1.0 is current size).
Examples
--------
>>> r = Rectangle(0, 0, 4, 4)
>>> r.set_scale(2)
>>> r.left
-2.0
>>> r.right
6.0
>>> r.lower
-2.0
>>> r.upper
6.0
"""
center = ((self.left + self.right) / 2, (self.lower + self.upper) / 2)
self.left = center[0] + scale * (self.left - center[0])
self.right = center[0] + scale * (self.right - center[0])
self.lower = center[1] + scale * (self.lower - center[1])
self.upper = center[1] + scale * (self.upper - center[1])
@property
def area(self) -> Union[int, float]:
"""Returns the area of the Rectangle.
Examples
--------
>>> r = Rectangle(0, 0, 4, 4)
>>> r.area
16.0
"""
return (self.right - self.left) * (self.upper - self.lower)
@property
def width(self) -> Union[int, float]:
"""Returns the width of the Rectangle.
Examples
--------
>>> r = Rectangle(0, 0, 4, 4)
>>> r.width
4.0
"""
return self.right - self.left
@property
def height(self) -> Union[int, float]:
"""Returns the height of the Rectangle.
Examples
--------
>>> r = Rectangle(0, 0, 4, 4)
>>> r.height
4.0
"""
return self.upper - self.lower
_geoJSON_type_to_Pysal_type = {
"point": Point,
"linestring": Chain,
"multilinestring": Chain,
"polygon": Polygon,
"multipolygon": Polygon,
}
# moving this to top breaks unit tests !
from . import standalone
from .polygonQuadTreeStructure import QuadTreeStructureSingleRing
| 2.625 | 3 |
pronebo_dj/main/migrations/0004_faq.py | vlsh1n/pronebo | 0 | 12798862 | <filename>pronebo_dj/main/migrations/0004_faq.py
# Generated by Django 3.2.4 on 2021-06-07 14:51
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('main', '0003_item_price'),
]
operations = [
migrations.CreateModel(
name='Faq',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(max_length=255, verbose_name='Название')),
('question', models.CharField(max_length=255, verbose_name='Вопрос')),
('answer', models.CharField(max_length=1000, verbose_name='Ответ')),
],
options={
'verbose_name': 'Вопрос/Ответ',
'verbose_name_plural': 'Вопрос(-ов)/Ответ(-ов)',
},
),
]
| 1.78125 | 2 |
pollicino/core/views.py | inmagik/pollicino | 2 | 12798863 | from rest_framework import parsers, renderers
from rest_framework.authtoken.models import Token
from rest_framework.authtoken.serializers import AuthTokenSerializer
from rest_framework.response import Response
from rest_framework.views import APIView
from .serializers import ClientTokenSerializer
from .models import ClientToken, App
class ObtainClientAuth(APIView):
throttle_classes = ()
permission_classes = ()
parser_classes = (parsers.FormParser, parsers.MultiPartParser, parsers.JSONParser,)
renderer_classes = (renderers.JSONRenderer,)
serializer_class = ClientTokenSerializer
def post(self, request, *args, **kwargs):
serializer = self.serializer_class(data=request.data)
serializer.is_valid(raise_exception=True)
client_secret = serializer.data['client_secret']
app_id = serializer.data['app_id']
app = App.objects.get(pk=app_id)
token, created = ClientToken.objects.get_or_create(client_secret=client_secret, app=app)
return Response({'token': token.key})
obtain_client_auth = ObtainClientAuth.as_view()
| 1.976563 | 2 |
3 - Lists/dbl_linkedlist.py | codyveladev/ds-alg-practice | 0 | 12798864 | class Node:
def __init__(self, value):
self.value = value
self.next = None
self.prev = None
class DoublyLinkedList:
def __init__(self, value):
new_node = Node(value)
self.head = new_node
self.tail = new_node
self.length = 1
def append(self, value):
new_node = Node(value)
if self.length == 0:
self.head = new_node
self.tail = new_node
self.length = 1
else:
#Point curr tail's next to new node
self.tail.next = new_node
#Point new node's prev back to curr tail
new_node.prev = self.tail
#Set the tail as the new node
self.tail = new_node
#increment length
self.length += 1
return True
def pop(self):
#Empty List
if self.length == 0:
return None
#List with only one item
if self.length == 1:
self.head = None
self.tail = None
self.length -= 1
return self.head
#Multiple items in list
else:
temp = self.tail
self.tail = self.tail.prev
self.tail.next = None
temp.prev = None
self.length -= 1
return temp
def prepend(self, value):
new_node = Node(value)
#Empty list
if self.length == 0:
self.head = new_node
self.tail = new_node
else:
# New node next point to curr head
new_node.next = self.head
#curr head prev set to new node
self.head.prev = new_node
#Set the head to the new head
self.head = new_node
#Increment the length by 1
self.length += 1
return self.head
def pop_first(self):
#Empty List
if self.length == 0:
return None
#Old head
temp = self.head
#One item in list
if self.length == 1:
self.head = None
self.tail = None
else:
#Set the head to the new head
self.head = self.head.next
#Point the new head's prev to None instead of old head
self.head.prev = None
#Point old head's next to None
temp.next = None
self.length -= 1
return temp.value
def get(self, index):
if index < 0 or index >= self.length:
return None
else:
temp = self.head
mid = self.length // 2
if mid >= index:
for _ in range(index):
temp = temp.next
else:
temp = self.tail
for _ in range(self.length - 1, index, 1):
temp = temp.prev
return temp
def set_val(self, index, value):
temp = self.get(index)
if temp:
temp.value = value
return True
return False
def insert(self, index, value):
if index < 0 or index >= self.length:
return None
if index == 0:
return self.prepend(value)
if index == self.length:
return self.append(value)
new_node = Node(value)
before = self.get(index - 1)
after = before.next
#Set the pointers for before
before.next = new_node
new_node.prev = before
#Set the pointers for after
after.prev = new_node
new_node.next = after
return True
def remove(self, index):
if index < 0 or index >= self.length:
return None
if index == 0:
return self.pop_first()
if index == self.length - 1:
return self.pop()
node_to_remove = self.get(index)
before = node_to_remove.prev
after = node_to_remove.next
#Set pointers
before.next = after
after.prev = before
#Remove connections
node_to_remove.next = None
node_to_remove.prev = None
self.length -=1
return node_to_remove
def print_list(self):
temp = self.head
while temp:
if not temp.next:
print(temp.value, end=" <--> None \n")
else:
print(temp.value, end=" <--> ")
temp = temp.next
my_dbl_list = DoublyLinkedList(1)
my_dbl_list.append(2)
my_dbl_list.append(5)
my_dbl_list.prepend(15)
# print(my_dbl_list.set_val(3,8))
my_dbl_list.insert(1, 69)
print(my_dbl_list.remove(2))
my_dbl_list.print_list()
| 3.890625 | 4 |
website/drawquest/apps/twitter/tests.py | bopopescu/drawquest-web | 61 | 12798865 | <gh_stars>10-100
from drawquest.tests.tests_helpers import (CanvasTestCase, create_content, create_user, create_group,
create_comment, create_staff, create_quest, create_quest_comment)
from services import Services, override_service
class TestSimpleThing(CanvasTestCase):
def test_basic_addition(self):
""" Tests that 1 + 1 always equals 2. """
self.failUnlessEqual(1 + 1, 2)
| 1.921875 | 2 |
generator/itertool_groupby.py | sherlockliu/pythonic | 0 | 12798866 | # !/usr/bin/python
from itertools import groupby
def compress(data):
return ((len(list(group)), name) for name, group in groupby(data))
def decompress(data):
return (car * size for size, car in data)
my_data = 'get uuuuuuuuuuuuuuuup'
print(list(my_data))
compressed = compress(my_data)
print(''.join(decompress(compressed)))
| 3.90625 | 4 |
tests/unit/lms/views/basic_lti_launch_test.py | robertknight/lms | 0 | 12798867 | <reponame>robertknight/lms
from unittest import mock
import pytest
from lms.resources import LTILaunchResource
from lms.resources._js_config import JSConfig
from lms.views.basic_lti_launch import BasicLTILaunchViews
from tests import factories
def canvas_file_basic_lti_launch_caller(context, pyramid_request):
"""
Call BasicLTILaunchViews.canvas_file_basic_lti_launch().
Set up the appropriate conditions and then call
BasicLTILaunchViews.canvas_file_basic_lti_launch(), and return whatever
BasicLTILaunchViews.canvas_file_basic_lti_launch() returns.
"""
# The file_id param is always present when canvas_file_basic_lti_launch()
# is called. The canvas_file=True view predicate ensures this.
pyramid_request.params["file_id"] = "TEST_FILE_ID"
views = BasicLTILaunchViews(context, pyramid_request)
return views.canvas_file_basic_lti_launch()
def db_configured_basic_lti_launch_caller(context, pyramid_request):
"""
Call BasicLTILaunchViews.db_configured_basic_lti_launch().
Set up the appropriate conditions and then call
BasicLTILaunchViews.db_configured_basic_lti_launch(), and return whatever
BasicLTILaunchViews.db_configured_basic_lti_launch() returns.
"""
views = BasicLTILaunchViews(context, pyramid_request)
return views.db_configured_basic_lti_launch()
def url_configured_basic_lti_launch_caller(context, pyramid_request):
"""
Call BasicLTILaunchViews.url_configured_basic_lti_launch().
Set up the appropriate conditions and then call
BasicLTILaunchViews.url_configured_basic_lti_launch(), and return whatever
BasicLTILaunchViews.url_configured_basic_lti_launch() returns.
"""
# The `url` parsed param is always present when
# url_configured_basic_lti_launch() is called. The url_configured=True view
# predicate and URLConfiguredBasicLTILaunchSchema ensure this.
pyramid_request.parsed_params = {"url": "TEST_URL"}
views = BasicLTILaunchViews(context, pyramid_request)
return views.url_configured_basic_lti_launch()
def unconfigured_basic_lti_launch_caller(context, pyramid_request):
"""
Call BasicLTILaunchViews.unconfigured_basic_lti_launch().
Set up the appropriate conditions and then call
BasicLTILaunchViews.unconfigured_basic_lti_launch(), and return whatever
BasicLTILaunchViews.unconfigured_basic_lti_launch() returns.
"""
views = BasicLTILaunchViews(context, pyramid_request)
return views.unconfigured_basic_lti_launch()
def configure_module_item_caller(context, pyramid_request):
"""
Call BasicLTILaunchViews.configure_module_item().
Set up the appropriate conditions and then call
BasicLTILaunchViews.configure_module_item(), and return whatever
BasicLTILaunchViews.configure_module_item() returns.
"""
# The document_url, resource_link_id and tool_consumer_instance_guid parsed
# params are always present when configure_module_item() is called.
# ConfigureModuleItemSchema ensures this.
pyramid_request.parsed_params = {
"document_url": "TEST_DOCUMENT_URL",
"resource_link_id": "TEST_RESOURCE_LINK_ID",
"tool_consumer_instance_guid": "TEST_TOOL_CONSUMER_INSTANCE_GUID",
}
views = BasicLTILaunchViews(context, pyramid_request)
return views.configure_module_item()
class TestBasicLTILaunchViewsInit:
"""Unit tests for BasicLTILaunchViews.__init__()."""
def test_it_sets_the_focused_user(self, context, pyramid_request):
BasicLTILaunchViews(context, pyramid_request)
context.js_config.maybe_set_focused_user.assert_called_once_with()
class TestCommon:
"""
Tests common to multiple (but not all) BasicLTILaunchViews views.
See the parametrized `view_caller` fixture below for the list of view
methods that these tests apply to.
"""
def test_it_reports_lti_launches(
self, context, pyramid_request, LtiLaunches, view_caller
):
pyramid_request.params.update(
{
"context_id": "TEST_CONTEXT_ID",
"oauth_consumer_key": "TEST_OAUTH_CONSUMER_KEY",
}
)
view_caller(context, pyramid_request)
LtiLaunches.add.assert_called_once_with(
pyramid_request.db,
pyramid_request.params["context_id"],
pyramid_request.params["oauth_consumer_key"],
)
@pytest.mark.usefixtures("user_is_learner")
def test_it_calls_grading_info_upsert(
self, context, pyramid_request, grading_info_service, view_caller
):
view_caller(context, pyramid_request)
grading_info_service.upsert_from_request.assert_called_once_with(
pyramid_request,
h_user=pyramid_request.lti_user.h_user,
lti_user=pyramid_request.lti_user,
)
def test_it_does_not_call_grading_info_upsert_if_instructor(
self, context, pyramid_request, grading_info_service, view_caller
):
pyramid_request.lti_user = factories.LTIUser(roles="instructor")
view_caller(context, pyramid_request)
grading_info_service.upsert_from_request.assert_not_called()
@pytest.mark.usefixtures("is_canvas")
def test_it_does_not_call_grading_info_upsert_if_canvas(
self, context, pyramid_request, grading_info_service, view_caller
):
view_caller(context, pyramid_request)
grading_info_service.upsert_from_request.assert_not_called()
@pytest.fixture(
params=[
canvas_file_basic_lti_launch_caller,
db_configured_basic_lti_launch_caller,
url_configured_basic_lti_launch_caller,
configure_module_item_caller,
]
)
def view_caller(self, request):
"""
Return a function that calls the view method to be tested.
This is a parametrized fixture. A test that uses this fixture will be
run multiple times, once for each parametrized version of this fixture.
See https://docs.pytest.org/en/latest/fixture.html#parametrizing-fixtures
"""
return request.param
class TestCourseRecording:
def test_it_records_the_course_in_the_DB(
self, context, pyramid_request, view_caller, course_service
):
view_caller(context, pyramid_request)
course_service.get_or_create.assert_called_once_with(
context.h_group.authority_provided_id
)
@pytest.fixture(
params=[
canvas_file_basic_lti_launch_caller,
db_configured_basic_lti_launch_caller,
url_configured_basic_lti_launch_caller,
unconfigured_basic_lti_launch_caller,
]
)
def view_caller(self, request):
"""
Return a function that calls the view method to be tested.
This is a parametrized fixture. A test that uses this fixture will be
run multiple times, once for each parametrized version of this fixture.
See https://docs.pytest.org/en/latest/fixture.html#parametrizing-fixtures
"""
return request.param
@pytest.mark.usefixtures("is_canvas")
class TestCanvasFileBasicLTILaunch:
@pytest.mark.usefixtures("is_canvas")
def test_it_adds_the_canvas_file_id(self, context, pyramid_request):
canvas_file_basic_lti_launch_caller(context, pyramid_request)
context.js_config.add_canvas_file_id.assert_called_once_with(
pyramid_request.params["file_id"]
)
class TestDBConfiguredBasicLTILaunch:
def test_it_enables_frontend_grading(self, context, pyramid_request):
db_configured_basic_lti_launch_caller(context, pyramid_request)
context.js_config.maybe_enable_grading.assert_called_once_with()
def test_it_adds_the_document_url(
self, context, pyramid_request, ModuleItemConfiguration
):
db_configured_basic_lti_launch_caller(context, pyramid_request)
ModuleItemConfiguration.get_document_url.assert_called_once_with(
pyramid_request.db, "TEST_GUID", "TEST_RESOURCE_LINK_ID"
)
context.js_config.add_document_url.assert_called_once_with(
ModuleItemConfiguration.get_document_url.return_value
)
class TestURLConfiguredBasicLTILaunch:
def test_it_enables_frontend_grading(self, context, pyramid_request):
url_configured_basic_lti_launch_caller(context, pyramid_request)
context.js_config.maybe_enable_grading.assert_called_once_with()
def test_it_adds_the_document_url(self, context, pyramid_request):
url_configured_basic_lti_launch_caller(context, pyramid_request)
context.js_config.add_document_url.assert_called_once_with(
pyramid_request.parsed_params["url"]
)
class TestConfigureModuleItem:
def test_it_saves_the_assignments_document_url_to_the_db(
self, context, pyramid_request, ModuleItemConfiguration
):
configure_module_item_caller(context, pyramid_request)
ModuleItemConfiguration.set_document_url.assert_called_once_with(
pyramid_request.db,
"TEST_TOOL_CONSUMER_INSTANCE_GUID",
"TEST_RESOURCE_LINK_ID",
"TEST_DOCUMENT_URL",
)
def test_it_enables_frontend_grading(self, context, pyramid_request):
configure_module_item_caller(context, pyramid_request)
context.js_config.maybe_enable_grading.assert_called_once_with()
def test_it_adds_the_document_url(self, context, pyramid_request):
configure_module_item_caller(context, pyramid_request)
context.js_config.add_document_url.assert_called_once_with(
pyramid_request.parsed_params["document_url"]
)
class TestUnconfiguredBasicLTILaunch:
def test_it_enables_content_item_selection_mode(
self, BearerTokenSchema, bearer_token_schema, context, pyramid_request
):
unconfigured_basic_lti_launch_caller(context, pyramid_request)
BearerTokenSchema.assert_called_once_with(pyramid_request)
bearer_token_schema.authorization_param.assert_called_once_with(
pyramid_request.lti_user
)
context.js_config.enable_content_item_selection_mode.assert_called_once_with(
form_action="http://example.com/module_item_configurations",
form_fields=dict(
self.form_fields(),
authorization=bearer_token_schema.authorization_param.return_value,
),
)
def form_fields(self):
return {
"user_id": "TEST_USER_ID",
"resource_link_id": "TEST_RESOURCE_LINK_ID",
"oauth_consumer_key": "TEST_OAUTH_CONSUMER_KEY",
"tool_consumer_instance_guid": "TEST_TOOL_CONSUMER_INSTANCE_GUID",
"context_id": "TEST_CONTEXT_ID",
}
@pytest.fixture
def pyramid_request(self, pyramid_request):
pyramid_request.params = dict(
self.form_fields(),
oauth_nonce="TEST_OAUTH_NONCE",
oauth_timestamp="TEST_OAUTH_TIMESTAMP",
oauth_signature="TEST_OAUTH_SIGNATURE",
)
return pyramid_request
class TestUnconfiguredBasicLTILaunchNotAuthorized:
def test_it_returns_the_right_template_data(self, context, pyramid_request):
data = BasicLTILaunchViews(
context, pyramid_request
).unconfigured_basic_lti_launch_not_authorized()
assert data == {}
pytestmark = pytest.mark.usefixtures(
"ai_getter", "course_service", "h_api", "grading_info_service", "lti_h_service"
)
@pytest.fixture
def context():
context = mock.create_autospec(LTILaunchResource, spec_set=True, instance=True)
context.js_config = mock.create_autospec(JSConfig, spec_set=True, instance=True)
context.is_canvas = False
return context
@pytest.fixture
def is_canvas(context):
"""Set the LMS that launched us to Canvas."""
context.is_canvas = True
@pytest.fixture
def pyramid_request(pyramid_request):
pyramid_request.params.update(
{
"lis_result_sourcedid": "modelstudent-assignment1",
"lis_outcome_service_url": "https://hypothesis.shinylms.com/outcomes",
}
)
return pyramid_request
@pytest.fixture(autouse=True)
def BearerTokenSchema(patch):
return patch("lms.views.basic_lti_launch.BearerTokenSchema")
@pytest.fixture(autouse=True)
def LtiLaunches(patch):
return patch("lms.views.basic_lti_launch.LtiLaunches")
@pytest.fixture(autouse=True)
def ModuleItemConfiguration(patch):
return patch("lms.views.basic_lti_launch.ModuleItemConfiguration")
@pytest.fixture
def bearer_token_schema(BearerTokenSchema):
return BearerTokenSchema.return_value
| 2.21875 | 2 |
uploader/custom_auth/middleware.py | stfc/cvmfs-stratum-uploader | 0 | 12798868 | <reponame>stfc/cvmfs-stratum-uploader
from django.core.urlresolvers import reverse
from django.http import HttpResponsePermanentRedirect
class NoLoginAdminRedirectMiddleware:
"""
This middleware forbids to access admin page for unauthorized but authenticated users.
"""
def process_request(self, request):
if request.META['PATH_INFO'].startswith('/admin'):
if not request.user.is_authenticated() or not request.user.is_staff:
return HttpResponsePermanentRedirect(reverse('index'))
| 2.1875 | 2 |
Source Separation/Archive/spleeter_test.py | elliottwaissbluth/tensor-hero | 1 | 12798869 | <gh_stars>1-10
from spleeter.separator import Separator
# Use audio loader explicitly for loading audio waveform :
from spleeter.audio.adapter import AudioAdapter
from scipy.io.wavfile import write
from pydub import AudioSegment
separator = Separator('spleeter:2stems')
#separator.separate_to_file('/path/to/audio', '2stem_sep_audio')#separator.separate_to_file('/path/to/audio', '/path/to/output/directory')
audio_loader = AudioAdapter.default()
sample_rate = 22050
#waveform, _ = audio_loader.load('/path/to/audio/file', sample_rate=sample_rate)
waveform, _ = audio_loader.load(r'C:\Users\ewais\Documents\GitHub\tensor-hero\Source Separation\song.ogg', sample_rate=sample_rate)
# Perform the separation :
prediction = separator.separate(waveform)
print(prediction) | 2.796875 | 3 |
tests/test_grids.py | knaidoo29/magpie | 0 | 12798870 | import numpy as np
import magpie
# check cartesian
def test_get_xedges():
xedges = magpie.grids.get_xedges(1., 2)
xedges = np.round(xedges, decimals=2)
assert len(xedges) == 3, "Length of xedges is incorrect."
assert xedges[-1] - xedges[0] == 1., "xedges range is incorrect."
xedges = magpie.grids.get_xedges(1., 2, xmin=-1.)
xedges = np.round(xedges, decimals=2)
assert xedges[0]==-1. and xedges[1]==-0.5 and xedges[-1]==0., "xedges with xmin are not as expected."
assert xedges[-1] - xedges[0] == 1., "xedges range is incorrect."
def test_xedges2mid():
xedges = magpie.grids.get_xedges(1., 10)
xmid = magpie.grids.xedges2mid(xedges)
xmid = np.round(xmid, decimals=2)
assert len(xedges) == len(xmid) + 1, "Length of xmid is incorrect."
assert xmid[0] == 0.05 and xmid[1] == 0.15 and xmid[5] == 0.55, "xmid is not as expected."
def test_xmid2edges():
xedges = magpie.grids.get_xedges(1., 10)
xmid = magpie.grids.xedges2mid(xedges)
xedges2 = magpie.grids.xmid2edges(xmid)
assert np.round(np.sum(xedges-xedges2), decimals=2), "Conversion from xmid to xedges is not consistent with input xedges."
def test_grid1d():
xmid = magpie.grids.grid1d(10., 10)
assert np.round(xmid[0], decimals=4) == 0.5 and np.round(xmid[7], decimals=4) == 7.5, "grid1d unexpected results."
xmid = magpie.grids.grid1d(10., 10, xmin=10)
assert np.round(xmid[0], decimals=4) == 10.5 and np.round(xmid[7], decimals=4) == 17.5, "grid1d unexpected results."
xmid, xedges = magpie.grids.grid1d(10., 10, return_edges=True)
assert len(xmid)+1 == len(xedges), "Length of xmid and xedges is not as expected."
assert np.round(xedges[0], decimals=4) == 0. and np.round(xedges[7], decimals=4) == 7., "grid1d unexpected results."
assert np.round(xmid[0], decimals=4) == 0.5 and np.round(xmid[7], decimals=4) == 7.5, "grid1d unexpected results."
def test_grid2d():
x2d, y2d = magpie.grids.grid2d(10, 10)
assert np.shape(x2d) == (10, 10), "shape is not as expected."
assert np.shape(y2d) == (10, 10), "shape is not as expected."
x2d, y2d, xmid, ymid = magpie.grids.grid2d(10, 10, return1d=True)
assert np.round(xmid[0], decimals=4) == 0.5 and np.round(xmid[7], decimals=4) == 7.5, "grid2d unexpected results."
assert np.round(np.sum(np.unique(x2d.flatten())-xmid), decimals=4) == 0., "xmid is inconsistent with x2d."
assert np.round(ymid[0], decimals=4) == 0.5 and np.round(ymid[7], decimals=4) == 7.5, "grid2d unexpected results."
assert np.round(np.sum(np.unique(y2d.flatten())-ymid), decimals=4) == 0., "ymid is inconsistent with y2d."
x2d, y2d, xmid, ymid = magpie.grids.grid2d(10, 10, mins=[10., 20.], return1d=True)
assert np.round(xmid[0], decimals=4) == 10.5 and np.round(xmid[7], decimals=4) == 17.5, "grid2d unexpected results."
assert np.round(np.sum(np.unique(x2d.flatten())-xmid), decimals=4) == 0., "xmid is inconsistent with x2d."
assert np.round(ymid[0], decimals=4) == 20.5 and np.round(ymid[7], decimals=4) == 27.5, "grid2d unexpected results."
assert np.round(np.sum(np.unique(y2d.flatten())-ymid), decimals=4) == 0., "ymid is inconsistent with y2d."
x2d, y2d = magpie.grids.grid2d(10, [10, 20])
assert np.shape(x2d) == (10, 20), "shape is not as expected."
assert np.shape(y2d) == (10, 20), "shape is not as expected."
x2d, y2d, xmid, ymid = magpie.grids.grid2d([10, 20], [10, 20], return1d=True)
assert np.round(xmid[0], decimals=4) == 0.5 and np.round(xmid[7], decimals=4) == 7.5, "grid2d unexpected results."
assert np.round(np.sum(np.unique(x2d.flatten())-xmid), decimals=4) == 0., "xmid is inconsistent with x2d."
assert np.round(ymid[0], decimals=4) == 0.5 and np.round(ymid[7], decimals=4) == 7.5, "grid2d unexpected results."
assert np.round(np.sum(np.unique(y2d.flatten())-ymid), decimals=4) == 0., "ymid is inconsistent with y2d."
x2d, y2d, xmid, ymid = magpie.grids.grid2d([10, 20], [10, 20], mins=[10., 20.], return1d=True)
assert np.round(xmid[0], decimals=4) == 10.5 and np.round(xmid[7], decimals=4) == 17.5, "grid2d unexpected results."
assert np.round(np.sum(np.unique(x2d.flatten())-xmid), decimals=4) == 0., "xmid is inconsistent with x2d."
assert np.round(ymid[0], decimals=4) == 20.5 and np.round(ymid[7], decimals=4) == 27.5, "grid2d unexpected results."
assert np.round(np.sum(np.unique(y2d.flatten())-ymid), decimals=4) == 0., "ymid is inconsistent with y2d."
def test_grid3d():
x3d, y3d, z3d = magpie.grids.grid3d(10, 10)
assert np.shape(x3d) == (10, 10, 10), "shape is not as expected."
assert np.shape(y3d) == (10, 10, 10), "shape is not as expected."
assert np.shape(z3d) == (10, 10, 10), "shape is not as expected."
x3d, y3d, z3d, xmid, ymid, zmid = magpie.grids.grid3d(10, 10, return1d=True)
assert np.round(xmid[0], decimals=4) == 0.5 and np.round(xmid[7], decimals=4) == 7.5, "grid3d unexpected results."
assert np.round(np.sum(np.unique(x3d.flatten())-xmid), decimals=4) == 0., "xmid is inconsistent with x3d."
assert np.round(ymid[0], decimals=4) == 0.5 and np.round(ymid[7], decimals=4) == 7.5, "grid3d unexpected results."
assert np.round(np.sum(np.unique(y3d.flatten())-ymid), decimals=4) == 0., "ymid is inconsistent with y3d."
assert np.round(zmid[0], decimals=4) == 0.5 and np.round(zmid[7], decimals=4) == 7.5, "grid3d unexpected results."
assert np.round(np.sum(np.unique(z3d.flatten())-zmid), decimals=4) == 0., "zmid is inconsistent with z3d."
x3d, y3d, z3d, xmid, ymid, zmid = magpie.grids.grid3d(10, 10, mins=[10., 20., 30.], return1d=True)
assert np.round(xmid[0], decimals=4) == 10.5 and np.round(xmid[7], decimals=4) == 17.5, "grid3d unexpected results."
assert np.round(np.sum(np.unique(x3d.flatten())-xmid), decimals=4) == 0., "xmid is inconsistent with x3d."
assert np.round(ymid[0], decimals=4) == 20.5 and np.round(ymid[7], decimals=4) == 27.5, "grid3d unexpected results."
assert np.round(np.sum(np.unique(y3d.flatten())-ymid), decimals=4) == 0., "ymid is inconsistent with y3d."
assert np.round(zmid[0], decimals=4) == 30.5 and np.round(zmid[7], decimals=4) == 37.5, "grid3d unexpected results."
assert np.round(np.sum(np.unique(z3d.flatten())-zmid), decimals=4) == 0., "zmid is inconsistent with z3d."
x3d, y3d, z3d = magpie.grids.grid3d(10, [10, 20, 30])
assert np.shape(x3d) == (10, 20, 30), "shape is not as expected."
assert np.shape(y3d) == (10, 20, 30), "shape is not as expected."
assert np.shape(z3d) == (10, 20, 30), "shape is not as expected."
x3d, y3d, z3d, xmid, ymid, zmid = magpie.grids.grid3d([10, 20, 30], [10, 20, 30], return1d=True)
assert np.round(xmid[0], decimals=4) == 0.5 and np.round(xmid[7], decimals=4) == 7.5, "grid3d unexpected results."
assert np.round(np.sum(np.unique(x3d.flatten())-xmid), decimals=4) == 0., "xmid is inconsistent with x3d."
assert np.round(ymid[0], decimals=4) == 0.5 and np.round(ymid[7], decimals=4) == 7.5, "grid3d unexpected results."
assert np.round(np.sum(np.unique(y3d.flatten())-ymid), decimals=4) == 0., "ymid is inconsistent with y3d."
assert np.round(zmid[0], decimals=4) == 0.5 and np.round(zmid[7], decimals=4) == 7.5, "grid3d unexpected results."
assert np.round(np.sum(np.unique(z3d.flatten())-zmid), decimals=4) == 0., "zmid is inconsistent with z3d."
x3d, y3d, z3d, xmid, ymid, zmid = magpie.grids.grid3d([10, 20, 30], [10, 20, 30], mins=[10., 20., 30], return1d=True)
assert np.round(xmid[0], decimals=4) == 10.5 and np.round(xmid[7], decimals=4) == 17.5, "grid3d unexpected results."
assert np.round(np.sum(np.unique(x3d.flatten())-xmid), decimals=4) == 0., "xmid is inconsistent with x3d."
assert np.round(ymid[0], decimals=4) == 20.5 and np.round(ymid[7], decimals=4) == 27.5, "grid3d unexpected results."
assert np.round(np.sum(np.unique(y3d.flatten())-ymid), decimals=4) == 0., "ymid is inconsistent with y3d."
assert np.round(zmid[0], decimals=4) == 30.5 and np.round(zmid[7], decimals=4) == 37.5, "grid3d unexpected results."
assert np.round(np.sum(np.unique(z3d.flatten())-zmid), decimals=4) == 0., "zmid is inconsistent with z3d."
# check polar
def test_polargrid():
r2d, p2d = magpie.grids.polargrid(10, 20)
assert np.shape(r2d) == (10, 20), "shape is not as expected."
assert np.shape(p2d) == (10, 20), "shape is not as expected."
r2d, p2d, rmid, pmid = magpie.grids.polargrid(10, 20, return1d=True)
assert np.round(rmid[0], decimals=4) == 0.05 and np.round(rmid[7], decimals=4) == 0.75, "polargrid unexpected results."
assert np.round(np.sum(np.unique(r2d.flatten())-rmid), decimals=4) == 0., "rmid is inconsistent with r2d."
assert np.round(pmid[0], decimals=4) == np.round(np.pi/20, decimals=4) and np.round(pmid[7], decimals=4) == np.round(15*np.pi/20, decimals=4), "polargrid unexpected results."
assert np.round(np.sum(np.unique(p2d.flatten())-pmid), decimals=4) == 0., "pmid is inconsistent with p2d."
r2d, p2d, rmid, pmid = magpie.grids.polargrid(10, 10, rmin=10., rmax=20., phimin=np.pi/2., phimax=np.pi, return1d=True)
assert np.round(rmid[0], decimals=4) == 10.5 and np.round(rmid[7], decimals=4) == 17.5, "polargrid unexpected results."
assert np.round(np.sum(np.unique(r2d.flatten())-rmid), decimals=4) == 0., "rmid is inconsistent with r2d."
assert np.round(pmid[0], decimals=4) == np.round((np.pi/2.)/20 + np.pi/2., decimals=4) \
and np.round(pmid[7], decimals=4) == np.round(15*(np.pi/2.)/20 + np.pi/2., decimals=4), "polargrid unexpected results."
assert np.round(np.sum(np.unique(p2d.flatten())-pmid), decimals=4) == 0., "pmid is inconsistent with p2d."
def test_polargrid():
r2d, p2d = magpie.grids.polargrid(10, 20)
assert np.shape(r2d) == (10, 20), "shape is not as expected."
assert np.shape(p2d) == (10, 20), "shape is not as expected."
r2d, p2d, rmid, pmid = magpie.grids.polargrid(10, 20, return1d=True)
assert np.round(rmid[0], decimals=4) == 0.05 and np.round(rmid[7], decimals=4) == 0.75, "polargrid unexpected results."
assert np.round(np.sum(np.unique(r2d.flatten())-rmid), decimals=4) == 0., "rmid is inconsistent with r2d."
assert np.round(pmid[0], decimals=4) == np.round(np.pi/20, decimals=4) and np.round(pmid[7], decimals=4) == np.round(15*np.pi/20, decimals=4), "polargrid unexpected results."
assert np.round(np.sum(np.unique(p2d.flatten())-pmid), decimals=4) == 0., "pmid is inconsistent with p2d."
r2d, p2d, rmid, pmid = magpie.grids.polargrid(10, 10, rmin=10., rmax=20., phimin=np.pi/2., phimax=np.pi, return1d=True)
assert np.round(rmid[0], decimals=4) == 10.5 and np.round(rmid[7], decimals=4) == 17.5, "polargrid unexpected results."
assert np.round(np.sum(np.unique(r2d.flatten())-rmid), decimals=4) == 0., "rmid is inconsistent with r2d."
assert np.round(pmid[0], decimals=4) == np.round((np.pi/2.)/20 + np.pi/2., decimals=4) \
and np.round(pmid[7], decimals=4) == np.round(15*(np.pi/2.)/20 + np.pi/2., decimals=4), "polargrid unexpected results."
assert np.round(np.sum(np.unique(p2d.flatten())-pmid), decimals=4) == 0., "pmid is inconsistent with p2d."
def test_polarEA():
r, p = magpie.grids.polarEA_grid(10)
npix = magpie.grids.polarEA_npix(10)
assert len(r) == len(p), "PolarEA grid size for r and p are not the same."
assert len(r) == npix, "Length of polarEA grid does not match expectations."
r, p = magpie.grids.polarEA_grid(6, base_nphi=3)
npix = magpie.grids.polarEA_npix(6, base_nphi=3)
assert len(r) == len(p), "PolarEA grid size for r and p are not the same."
assert len(r) == npix, "Length of polarEA grid does not match expectations."
r, p = magpie.grids.polarEA_grid(10, base_nphi=3)
npix = magpie.grids.polarEA_npix(10, base_nphi=3)
assert len(r) == len(p), "PolarEA grid size for r and p are not the same."
assert len(r) == npix, "Length of polarEA grid does not match expectations."
assert r[3*4**2] == 0.45, "r values are incorrect."
assert r[3*7**2] == 0.75, "r values are incorrect."
assert np.round(p[3*4**2], decimals=4) == np.round(np.pi/(3*(2*4+1)), decimals=4), "p values are incorrect."
assert np.round(p[3*7**2 + 7], decimals=4) == np.round(15*np.pi/(3*(2*7+1)), decimals=4), "p values are incorrect."
area = magpie.grids.polarEA_area(10, rmax=10., base_nphi=4)
assert(np.round(area, decimals=4) == np.round(np.pi/4., decimals=4)), "area calculation is incorrect."
| 2.59375 | 3 |
testing/chat/chatclient.py | FinleyDavies/super-pygame-bomberman | 0 | 12798871 | from communication import *
import socket
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.connect((socket.gethostname(), 1123))
while True:
m = receive_message(s)
if m:
print(m, "\n")
ping(s)
print(s.getsockname())
print(socket.gethostbyname(socket.gethostname()))
print(socket.get) | 3.015625 | 3 |
test/simple_log/quick_start.py | lesteve/tensorwatch | 3,453 | 12798872 | <reponame>lesteve/tensorwatch<gh_stars>1000+
import tensorwatch as tw
import time
w = tw.Watcher(filename='test.log')
s = w.create_stream(name='my_metric')
#w.make_notebook()
for i in range(1000):
s.write((i, i*i))
time.sleep(1)
| 2.03125 | 2 |
insights/parsers/dmsetup.py | mglantz/insights-core | 1 | 12798873 | """
dmsetup commands - Command ``dmsetup``
======================================
Parsers for parsing and extracting data from output of commands related to
``dmsetup``.
Parsers contained in this module are:
DmsetupInfo - command ``dmsetup info -C``
-----------------------------------------
"""
from insights import parser, CommandParser
from insights.parsers import parse_delimited_table
from insights.specs import Specs
@parser(Specs.dmsetup_info)
class DmsetupInfo(CommandParser):
"""
``dmsetup info -C`` command output
Example input::
Name Maj Min Stat Open Targ Event UUID
VG00-tmp 253 8 L--w 1 1 0 LVM-gy9uAwD7LuTIApplr2sogbOx5iS0FTax6lLmBji2ueSbX49gxcV76M29cmukQiw4
VG00-home 253 3 L--w 1 1 0 LVM-gy9uAwD7LuTIApplr2sogbOx5iS0FTaxCqXOnbGe2zjhX923dFiIdl1oi7mO9tXp
VG00-var 253 6 L--w 1 2 0 LVM-gy9uAwD7LuTIApplr2sogbOx5iS0FTaxicvyvt67113nTb8vMlGfgdEjDx0LKT2O
VG00-swap 253 1 L--w 2 1 0 LVM-gy9uAwD7LuTIApplr2sogbOx5iS0FTax3Ll2XhOYZkylx1CjOQi7G4yHgrIOsyqG
VG00-root 253 0 L--w 1 1 0 LVM-gy9uAwD7LuTIApplr2sogbOx5iS0FTaxKpnAKYhrYMYMNMwjegkW965bUgtJFTRY
VG00-var_log_audit 253 5 L--w 1 1 0 LVM-<KEY>
Example data structure produced::
data = [
{
'Stat': 'L--w',
'Name': 'VG00-tmp',
'Min': '8',
'Targ': '1',
'Maj': '253',
'Open': '1',
'Event': '0',
'UUID': 'LVM-gy9uAwD7LuTIApplr2sogbOx5iS0FTax6lLmBji2ueSbX49gxcV76M29cmukQiw4'
},...
]
Attributes:
data (list): List of devices found, in order
names (list): Device names, in order found
uuids (list): UUID
by_name (dict): Access to each device by devicename
by_uuid (dict): Access to each device by uuid
Example:
>>> len(info)
6
>>> info.names[0]
'VG00-tmp'
>>> info[1]['Maj']
'253'
>>> info[1]['Stat']
'L--w'
"""
def parse_content(self, content):
self.data = parse_delimited_table(content)
self.names = [dm['Name'] for dm in self.data if 'Name' in dm]
self.by_name = dict((dm['Name'], dm) for dm in self.data if 'Name' in dm)
self.uuids = [dm['UUID'] for dm in self.data if 'UUID' in dm]
self.by_uuid = dict((dm['UUID'], dm) for dm in self.data if 'UUID' in dm)
def __len__(self):
"""
The length of the devices list
"""
return len(self.data)
def __iter__(self):
"""
Iterate through the devices list
"""
for dm in self.data:
yield dm
def __getitem__(self, idx):
"""
Fetch a device by index in devices list
"""
return self.data[idx]
| 1.78125 | 2 |
code.py | theyoungkwon/mastering_scikit | 0 | 12798874 | import sys, os, random
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.colors import Normalize
from matplotlib.colors import ListedColormap
from sklearn.model_selection import train_test_split, GridSearchCV
from sklearn.model_selection import StratifiedShuffleSplit
from sklearn.preprocessing import StandardScaler
from sklearn.datasets import make_moons, make_circles, make_classification
from sklearn.linear_model import SGDClassifier
from sklearn.svm import SVC
from sklearn.neural_network import MLPClassifier
from sklearn.neighbors import KNeighborsClassifier
from sklearn.gaussian_process import GaussianProcessClassifier
from sklearn.gaussian_process.kernels import RBF
from sklearn.tree import DecisionTreeClassifier
from sklearn.ensemble import RandomForestClassifier, AdaBoostClassifier
from sklearn.naive_bayes import GaussianNB
from sklearn.discriminant_analysis import QuadraticDiscriminantAnalysis
from sklearn.metrics import *
# find a current file' directory path.
try:
dirpath = os.path.dirname(__file__)
except Exception as inst:
dirpath = ''
pass
f_name1 = os.path.join(dirpath,"../datasets/breast-cancer.npz")
f_name2 = os.path.join(dirpath,"../datasets/diabetes.npz")
f_name3 = os.path.join(dirpath,"../datasets/digit.npz")
f_name4 = os.path.join(dirpath,"../datasets/iris.npz")
f_name5 = os.path.join(dirpath,"../datasets/wine.npz")
# Utility function to move the midpoint of a colormap to be around
# the values of interest.
class MidpointNormalize(Normalize):
def __init__(self, vmin=None, vmax=None, midpoint=None, clip=False):
self.midpoint = midpoint
Normalize.__init__(self, vmin, vmax, clip)
def __call__(self, value, clip=None):
x, y = [self.vmin, self.midpoint, self.vmax], [0, 0.5, 1]
return np.ma.masked_array(np.interp(value, x, y))
class ClassModels:
def __init__(self):
self.name = ''
self.grid = ''
self.param_grid = ''
self.cv = StratifiedShuffleSplit(n_splits=5, test_size=0.2, random_state=42)
self.scoring = 'neg_log_loss' #'accuracy', 'f1', 'precision', 'recall', 'roc_auc'
def trainModel(self, cname):
if (cname == "Logistic Regression"):
self.trainLogisticRegression()
elif (cname == "Linear SVM"):
self.trainLinearSVM()
elif (cname == "RBF SVM"):
self.trainRBFSVM()
elif (cname == "Neural Nets"):
self.trainNeuralNets()
else:
print("Please put existing classifier names")
pass
# run CV according to params for each classifier
def trainLogisticRegression(self):
# TODO: try different scoring rule such as Accuracy (default), F1-measure, AUC
loss_range = ['log']
penalty_range = ['l2','l1','none']
alpha_range = np.geomspace(1.e-07, 1.e+05, num=13) # 13 params
self.param_grid = dict(loss=loss_range, penalty=penalty_range, alpha=alpha_range, max_iter=[1000], tol=[1e-3])
self.grid = GridSearchCV(SGDClassifier(), param_grid=self.param_grid, cv=self.cv,
n_jobs=-1)
pass
def trainLinearSVM(self):
kernel_range = ['linear']
C_range = np.geomspace(1.e-07, 1.e+05, num=13) # 13 params :
self.param_grid = dict(kernel=kernel_range, C=C_range)
self.grid = GridSearchCV(SVC(), param_grid=self.param_grid, cv=self.cv,
n_jobs=-1)
pass
def trainRBFSVM(self):
# params C / gamma
kernel_range = ['rbf']
C_range = np.geomspace(1.e-07, 1.e+05, num=13) # 13 params :
gamma_range = np.array([0.001,0.005,0.01,0.05,0.1,0.5,1,2,3]) # 9 params
self.param_grid = dict(kernel=kernel_range, gamma=gamma_range, C=C_range)
self.grid = GridSearchCV(SVC(), param_grid=self.param_grid, cv=self.cv,
n_jobs=-1)
pass
def trainNeuralNets(self):
# early stopping default False, Momentum default 0.9
hidden_layer_sizes_range = np.array([1,2,3,4,5,6,7,8,9,10,16,32]) # 12 params
activation_range = ['logistic']
solver_range = ['sgd']
learning_rate_init_range = np.array([1.0e-04,1.0e-03,1.0e-02,1.0e-01]) # 4 params
self.param_grid = dict(hidden_layer_sizes=hidden_layer_sizes_range,
activation=activation_range,solver=solver_range,
learning_rate_init=learning_rate_init_range,
max_iter=[1000])
self.grid = GridSearchCV(MLPClassifier(), param_grid=self.param_grid, cv=self.cv,
n_jobs=-1)
pass
class Report:
def __init__(self):
pass
# Loss + Accuracy (training + test)
# auc + confusion matrix
# cpu computation time
def showResult(self, model, predicted_test, target_test, predicted_train, target_train):
print("The best parameters are %s with a score of %0.3f"
% (model.grid.best_params_, model.grid.best_score_))
print("The Train Log Loss %0.3f Zero one loss %f"
% (log_loss(target_train, predicted_train), zero_one_loss(target_train, predicted_train)))
print("The test Log Loss %0.3f Zero one loss %f"
% (log_loss(target_test, predicted_test), zero_one_loss(target_test, predicted_test)))
print("The train Accuracy %0.3f"
% (accuracy_score(target_train, predicted_train)))
print("The test Accuracy %0.3f"
% (accuracy_score(target_test, predicted_test) ))
print("The test AUC of %0.3f"
% (roc_auc_score(target_test, predicted_test) ))
print("The mean training time of %f"
% (np.mean(model.grid.cv_results_['mean_fit_time'], axis=0)) )
print("The mean test time of %f"
% (np.mean(model.grid.cv_results_['mean_score_time'], axis=0)) )
# confusion matrix
print("confusion matrix / precision recall scores")
print ( confusion_matrix(target_test, predicted_test) )
print ( classification_report(target_test, predicted_test) )
pass
def showPlot(self, model, clfname):
if (clfname == "Logistic Regression"):
self.showLogisticRegression(model, clfname)
elif (clfname == "Linear SVM"):
self.showLinearSVM(model, clfname)
elif (clfname == "RBF SVM"):
self.showRBFSVM(model, clfname)
elif (clfname == "Neural Nets"):
self.showNeuralNets(model, clfname)
else:
print("Please put existing classifier names")
pass
def showLogisticRegression(self, model, clfname):
penalty_range = model.param_grid['penalty']
alpha_range = model.param_grid['alpha'] # 13 params
scores = np.array(model.grid.cv_results_['mean_test_score'])
min_score = scores.min()
max_score = scores.max()
mean_score = np.mean(scores, axis=0)
scores = scores.reshape(len(alpha_range),len(penalty_range))
plt.figure(figsize=(8, 6))
plt.subplots_adjust(left=.2, right=0.95, bottom=0.15, top=0.95)
plt.imshow(scores, interpolation='nearest', cmap=plt.cm.hot,
norm=MidpointNormalize(vmin=min_score, vmax=max_score, midpoint=mean_score))
plt.xlabel('penalty')
plt.ylabel('alpha (regularization)')
plt.colorbar()
plt.xticks(np.arange(len(penalty_range)), penalty_range, rotation=45)
plt.yticks(np.arange(len(alpha_range)), alpha_range)
plt.title('Validation accuracy')
# plt.show()
pass
def showLinearSVM(self, model, clfname):
C_range = model.param_grid['C']
scores = np.array(model.grid.cv_results_['mean_test_score'])
min_score = scores.min()
max_score = scores.max()
mean_score = np.mean(scores, axis=0)
scores = scores.reshape(len(C_range),1)
plt.figure(figsize=(8, 6))
plt.subplots_adjust(left=.2, right=0.95, bottom=0.15, top=0.95)
plt.imshow(scores, interpolation='nearest', cmap=plt.cm.hot,
norm=MidpointNormalize(vmin=min_score, vmax=max_score, midpoint=mean_score))
plt.ylabel('C')
plt.colorbar()
plt.yticks(np.arange(len(C_range)), C_range)
plt.title('Validation accuracy')
# plt.show()
pass
def showRBFSVM(self, model, clfname):
C_range = model.param_grid['C']
gamma_range = model.param_grid['gamma']
# scores = model.grid.cv_results_['mean_test_score'].reshape(len(C_range), len(gamma_range))
scores = np.array(model.grid.cv_results_['mean_test_score'])
min_score = scores.min()
max_score = scores.max()
mean_score = np.mean(scores, axis=0)
scores = scores.reshape(len(C_range), len(gamma_range))
plt.figure(figsize=(8, 6))
plt.subplots_adjust(left=.2, right=0.95, bottom=0.15, top=0.95)
# plt.imshow(scores, interpolation='nearest', cmap=plt.cm.hot,
# norm=MidpointNormalize(vmin=0.2, midpoint=0.92))
plt.imshow(scores, interpolation='nearest', cmap=plt.cm.hot,
norm=MidpointNormalize(vmin=min_score,vmax=max_score, midpoint=mean_score))
plt.xlabel('gamma')
plt.ylabel('C')
plt.colorbar()
plt.xticks(np.arange(len(gamma_range)), gamma_range, rotation=45)
plt.yticks(np.arange(len(C_range)), C_range)
plt.title('Validation accuracy')
# plt.show()
pass
def showNeuralNets(self, model, clfname):
hidden_layer_sizes_range = model.param_grid['hidden_layer_sizes']
learning_rate_init_range = model.param_grid['learning_rate_init']
scores = np.array(model.grid.cv_results_['mean_test_score'])
min_score = scores.min()
max_score = scores.max()
mean_score = np.mean(scores, axis=0)
scores = scores.reshape(len(learning_rate_init_range), len(hidden_layer_sizes_range))
plt.figure(figsize=(8, 6))
plt.subplots_adjust(left=.2, right=0.95, bottom=0.15, top=0.95)
plt.imshow(scores, interpolation='nearest', cmap=plt.cm.hot,
norm=MidpointNormalize(vmin=min_score,vmax=max_score, midpoint=mean_score))
plt.xlabel('hidden_layer_sizes')
plt.ylabel('learning_rate_init')
plt.colorbar()
plt.xticks(np.arange(len(hidden_layer_sizes_range)), hidden_layer_sizes_range, rotation=45)
plt.yticks(np.arange(len(learning_rate_init_range)), learning_rate_init_range)
plt.title('Validation accuracy')
# plt.show()
pass
def plotLROverTime(data_x, loss_y, acc_y, idx):
# Set the style globally
# Alternatives include bmh, fivethirtyeight, ggplot,
# dark_background, seaborn-deep, etc
plt.style.use('ggplot')
plt.rcParams['font.family'] = 'serif'
plt.rcParams['font.serif'] = 'Ubuntu'
plt.rcParams['font.monospace'] = 'Ubuntu Mono'
plt.rcParams['font.size'] = 10
plt.rcParams['axes.labelsize'] = 10
plt.rcParams['axes.labelweight'] = 'bold'
plt.rcParams['axes.titlesize'] = 10
plt.rcParams['xtick.labelsize'] = 8
plt.rcParams['ytick.labelsize'] = 8
plt.rcParams['legend.fontsize'] = 10
plt.rcParams['figure.titlesize'] = 12
# Set an aspect ratio
width, height = plt.figaspect(1.68)
fig = plt.figure(figsize=(width, height), dpi=400)
plt.plot(data_x, loss_y, linewidth=0.5, linestyle=':', marker='o',
markersize=2, label='loss')
plt.plot(data_x, acc_y, linewidth=0.5, linestyle='--', marker='v',
markersize=2, label='accuracy')
plt.xlabel('Data Points')
plt.ylabel('Score')
# Axes alteration to put zero values inside the figure Axes
# Avoids axis white lines cutting through zero values - fivethirtyeight style
xmin, xmax, ymin, ymax = plt.axis()
plt.axis([xmin - 0.1, xmax + 0.1, ymin, ymax])
plt.title('LR performance over time', fontstyle='italic')
plt.legend(loc='best', numpoints=1, fancybox=True)
# Space plots a bit
plt.subplots_adjust(hspace=0.25, wspace=0.40)
plt.savefig('./LR_overtime_'+str(idx)+'.png', bbox_inches='tight')
pass
def batches(l, n):
for i in range(0, len(l), n):
yield l[i:i+n]
def runLROverTime(train_X, train_y, test_X, test_y, idx):
clf = SGDClassifier(loss='log') # shuffle=True is useless here
shuffledRange = range(train_X.shape[0])
n_iter = 10
data_point = 0
f_loss = open('./LR_overtime_loss_'+str(idx)+'.txt', 'w')
f_acc = open('./LR_overtime_acc_'+str(idx)+'.txt', 'w')
data_x = []
loss_y = []
acc_y = []
# temp_loss = zero_one_loss(train_y, clf.predict(train_X))
# temp_acc = accuracy_score(train_y, clf.predict(train_X))
# f_loss.write("data_point= " + str(data_point) + " zero_one_loss= " + str(temp_loss) + " \n")
# f_acc.write("data_point= " + str(data_point) + " accuracy= " + str(temp_acc) + " \n")
# data_x.append(data_point)
# loss_y.append(temp_loss)
# acc_y.append(temp_acc)
for n in range(n_iter):
shuffledRange = list(shuffledRange)
random.shuffle(shuffledRange)
shuffledX = [train_X[i] for i in shuffledRange]
shuffledY = [train_y[i] for i in shuffledRange]
for batch in batches(range(len(shuffledX)), 10):
clf.partial_fit(shuffledX[batch[0]:batch[-1] + 1], shuffledY[batch[0]:batch[-1] + 1],
classes=np.unique(train_y))
data_point += len(batch)
temp_loss = zero_one_loss(train_y, clf.predict(train_X))
temp_acc = accuracy_score(train_y, clf.predict(train_X))
f_loss.write("data_point= " + str(data_point) + " zero_one_loss= " + str(temp_loss) + " \n")
f_acc.write("data_point= " + str(data_point) + " accuracy= " + str(temp_acc) + " \n")
data_x.append(data_point)
loss_y.append(temp_loss)
acc_y.append(temp_acc)
f_loss.write("\n===== End of Training / Test Set Results =====\n")
f_loss.write("data_point= %d , zero_one_loss= %f\n" % (data_point, zero_one_loss(test_y, clf.predict(test_X))))
f_acc.write("\n===== End of Training / Test Set Results =====\n")
f_acc.write("data_point= %d , accuracy= %f\n" % (data_point, accuracy_score(test_y, clf.predict(test_X))))
f_loss.close()
f_acc.close()
plotLROverTime(data_x, loss_y, acc_y, idx)
pass
class RunEval:
def __init__(self):
self.dnames = [f_name1, f_name2, f_name3, f_name4, f_name5]
self.train_X = []
self.train_y = []
self.test_X = []
self.test_y = []
def run(self):
report = Report()
for idx, dname in enumerate(self.dnames):
# load data
if len(sys.argv) > 1 and int(sys.argv[1]) != idx:
continue
data = np.load(dname)
self.train_y = data['train_Y']
self.test_y = data['test_Y']
# standardize data (mean=0, std=1)
self.train_X = StandardScaler().fit_transform(data['train_X'])
self.test_X = StandardScaler().fit_transform(data['test_X'])
print ("shape of data set ", self.train_X.shape, self.train_y.shape, self.test_X.shape, self.test_y.shape)
if len(sys.argv) > 2 and int(sys.argv[2]) == 1:
runLROverTime(self.train_X, self.train_y, self.test_X, self.test_y, idx)
continue
clfnames = ["Logistic Regression", "Linear SVM", "RBF SVM", "Neural Nets"]
# clfnames = ["RBF SVM"]
# clfnames = ["Linear SVM"]
for idx2, clfname in enumerate(clfnames):
print("===== %s " %(dname))
print("===== %s" %(clfname))
# (1) train model with CV model = ClassModels()
model = ClassModels()
model.trainModel(clfname)
model.grid.fit(self.train_X, self.train_y)
# (2) show results
predicted_test = model.grid.predict(self.test_X)
predicted_train = model.grid.predict(self.train_X)
# Loss + Accuracy (training + test)
# auc + confusion matrix
# cpu computation time
report.showResult(model, predicted_test, self.test_y, predicted_train, self.train_y)
report.showPlot(model, clfname)
plt.savefig('./'+clfname+'_'+str(idx)+'.png', bbox_inches = 'tight')
if __name__ == '__main__':
eval = RunEval()
eval.run()
exit() | 2.21875 | 2 |
maxixe/tests/__init__.py | tswicegood/maxixe | 1 | 12798875 | import unittest
import maxixe
from maxixe.tests import decorators
from maxixe.tests import loader
from maxixe.tests import parser
from maxixe.tests import utils
suite = unittest.TestSuite()
suite.addTests(unittest.TestLoader().loadTestsFromModule(decorators))
suite.addTests(unittest.TestLoader().loadTestsFromModule(loader))
suite.addTests(unittest.TestLoader().loadTestsFromModule(parser))
suite.addTests(unittest.TestLoader().loadTestsFromModule(utils))
| 1.6875 | 2 |
app.py | swapno-ahmed/CoursePicker | 3 | 12798876 | <reponame>swapno-ahmed/CoursePicker
import pandas as pd
import tkinter.messagebox
from utils import get_unlocked_course, reset_completed
from tkinter import *
from tkinter import filedialog
df = pd.read_csv('course.csv')
df = df.fillna('')
df.set_index('Course', inplace=True)
completed_course = None
filepath = None
completed_course = pd.read_csv('completed_course.csv')
completed_course.replace(['YES', 'NO'], [True, False], inplace=True)
completed_course.set_index('Course', inplace=True)
def browsefunc1():
filename = filedialog.askopenfilename(filetypes=(
("csv files", "*.csv"), ("All files", "*.*")))
input1.insert(END, filename)
def submit1():
filepath = input1.get()
completed_course = pd.read_csv(filepath)
completed_course.replace(['YES', 'NO'], [True, False], inplace=True)
completed_course.set_index('Course', inplace=True)
unlocked = get_unlocked_course(df, completed_course)
unlocked_courses = '\n'.join(unlocked)
tkinter.messagebox.showinfo("Unlocked Courses", unlocked_courses)
reset_completed(completed_course)
def browsefunc2():
filename = filedialog.askopenfilename(filetypes=(
("txt files", "*.txt"), ("All files", "*.*")))
input2.insert(END, filename)
def submit2():
filepath = input2.get()
with open(filepath, mode='r', encoding='utf-8') as file:
lines = list(map(lambda x: x.strip().upper(), file.read().split('\n')))
for line in lines:
completed_course.loc[line, 'Finished'] = True
unlocked = get_unlocked_course(df, completed_course)
unlocked_courses = '\n'.join(unlocked)
tkinter.messagebox.showinfo("Unlocked Courses", unlocked_courses)
reset_completed(completed_course)
def check(x):
completed_course.loc[x,
'Finished'] = not completed_course.loc[x, 'Finished']
def submit3():
unlocked = get_unlocked_course(df, completed_course)
unlocked_courses = '\n'.join(unlocked)
tkinter.messagebox.showinfo("Unlocked Courses", unlocked_courses)
reset_completed(completed_course)
window = Tk()
photo = PhotoImage(file='folder.png')
photo = photo.subsample(13, 13)
label = Label(window, text='Use one of the methods')
frame1 = Frame(window)
frame2 = Frame(window)
frame3 = Frame(window)
label1 = Label(frame1, text='.csv file path')
input1 = Entry(frame1, width=40)
button1 = Button(frame1, image=photo, command=browsefunc1)
button2 = Button(frame1, text="Submit", command=submit1)
label2 = Label(frame2, text='.txt file path')
input2 = Entry(frame2, width=40)
button3 = Button(frame2, image=photo, command=browsefunc2)
button4 = Button(frame2, text="Submit", command=submit2)
label3 = Label(frame3, text="Choose the courses you've done so far")
checkboxes = {}
row, col = 2, 1
for i, r in df.iterrows():
b = Checkbutton(frame3, text=i, command=lambda x=i: check(x))
checkboxes[i] = b
b.grid(row=row, column=col)
row += 1
if row > 10:
col += 1
row = 2
if row == 2:
col -= 1
if col % 2 == 0:
col = col // 2
else:
col = col // 2 + 1
button5 = Button(frame3, text="Submit", command=submit3)
label.grid(row=1, column=1)
frame1.grid(row=2, column=1)
label1.grid(row=1, column=1)
input1.grid(row=2, column=1, columnspan=10)
button1.grid(row=2, column=11)
button2.grid(row=2, column=12)
frame2.grid(row=3, column=1)
label2.grid(row=1, column=1)
input2.grid(row=2, column=1, columnspan=10)
button3.grid(row=2, column=11)
button4.grid(row=2, column=12)
frame3.grid(row=4, column=1)
label3.grid(row=1, column=1, columnspan=5)
button5.grid(row=11, column=col, columnspan=row if row == 2 else 1)
window.mainloop()
| 3.296875 | 3 |
_celery/djusecelery/app/blog/models.py | yc19890920/ap | 1 | 12798877 | from django.db import models
class Blog(models.Model):
title = models.CharField("标题", unique=True, max_length=200)
class Meta:
db_table = 'blog'
verbose_name = '文章' | 2.21875 | 2 |
wormer/graber.py | tyrantqiao/PythonGraber | 0 | 12798878 | from wormer.tools import manager, downloader
from wormer.data import strategy
import re
class Graber:
synopsis_pattern = '''(?=lemma-summary")(.*?)(?<=config) '''
text_pattern = '>\s*?([^\&\b\n\[\]]*?)<'
href_pattern = '<a target=_blank href="(/item/[\w\d%]*?)">'
def __init__(self):
self.urlManager = manager.UrlsManager()
self.downloader = downloader.DownLoader()
self.textManager = manager.TextManager()
self.logManager = manager.LogManager()
self.threadManager = manager.ThreadManager()
self.url_start = ''
def get_readme(self):
self.downloader.grab_single(self.url_start)
tips = self.downloader.get_readme()
return tips
def grabing_urls(self, limit=100, grab_strategy=strategy.GrabStrategy.BREATH_FIRST):
self.urlManager.add_single_url(self.url_start)
self.urlManager.add_single_url(self.url_start, 'urls_grabbed')
while self.urlManager.has_next_url():
page_source = self.downloader.grab_single_url(self.urlManager.get_url()).content.decode('utf-8')
# match need to the beginning of the string, and return is a turple, use [i for i in turple] to change, and findall return list
urls = self.textManager.find_urls_by_regex(page_source, Graber.href_pattern)
synopsis = self.textManager.find_text_by_regex(page_source, Graber.synopsis_pattern, re.VERBOSE|re.MULTILINE|re.DOTALL)
# print(synopsis)
page_content = self.textManager.find_text_by_regex(synopsis, Graber.text_pattern, re.VERBOSE|re.MULTILINE|re.DOTALL)
if urls and page_content is not None:
self.add_urls_head(urls, 'https://baike.baidu.com')
self.urlManager.add_urls(urls)
self.logManager.collect_data(page_content)
self.logManager.save_all_data()
@staticmethod
def add_urls_head(urls, head):
for i, item in enumerate(urls):
item = head + item
urls[i] = item
def get_start(self, url_start):
self.url_start = url_start
if __name__ == '__main__':
# url = input('The website you want:\n')
url_python_baike = 'https://baike.baidu.com/item/Python'
graber = Graber()
graber.get_start(url_python_baike)
graber.grabing_urls()
# text = graber.get_readme(url)
# graber.logManager.log_text(text)
| 2.515625 | 3 |
30 Days of Code/Day 2 - Operators/solution.py | matheuscordeiro/HackerRank | 0 | 12798879 | #!/usr/local/bin/python3
"""Task
Given the meal price (base cost of a meal), tip percent (the percentage of the meal price being added as tip),
and tax percent (the percentage of the meal price being added as tax) for a meal, find and print the meal's total cost.
Note: Be sure to use precise values for your calculations, or you may end up with an incorrectly rounded result!
Complete the stub code provided in your editor to print whether or not n is weird.
"""
# Complete the solve function below.
def solve(meal_cost, tip_percent, tax_percent):
print(round(meal_cost + (tip_percent*meal_cost/100) + (tax_percent*meal_cost/100)))
meal_cost = float(input())
tip_percent = int(input())
tax_percent = int(input())
solve(meal_cost, tip_percent, tax_percent)
| 4.03125 | 4 |
argus/unit_tests/backends/tempest/test_tempest_backend.py | mateimicu/cloudbase-init-ci | 6 | 12798880 | <reponame>mateimicu/cloudbase-init-ci
# Copyright 2016 Cloudbase Solutions Srl
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
# pylint: disable=no-value-for-parameter, protected-access, arguments-differ
# pylint: disable= unused-argument, no-member, attribute-defined-outside-init
import copy
import unittest
from argus.backends.tempest import tempest_backend
from argus.unit_tests import test_utils
from argus import util
try:
import unittest.mock as mock
except ImportError:
import mock
LOG = util.get_logger()
class FakeBaseTempestBackend(tempest_backend.BaseTempestBackend):
def __init__(self, name, userdata, metadata, availability_zone):
super(FakeBaseTempestBackend, self).__init__(
name, userdata, metadata, availability_zone)
def get_remote_client(self, **kwargs):
return "fake get_remote_client"
def remote_client(self):
return "fake_remote_client"
class TestBaseTempestBackend(unittest.TestCase):
@mock.patch('argus.config.CONFIG.argus')
@mock.patch('argus.backends.tempest.manager.APIManager')
def setUp(self, mock_api_manager, mock_config):
mock_config.openstack.image_ref = "fake image ref"
mock_config.openstack.flavor_ref = "fake flavor ref"
name = mock.sentinel
userdata = "fake userdata"
metadata = mock.sentinel
availability_zone = mock.sentinel
self._base_tempest_backend = FakeBaseTempestBackend(
name, userdata, metadata, availability_zone)
@mock.patch('argus.config.CONFIG.argus')
def test__configure_networking(self, mock_config):
mock_network = mock.Mock()
mock_network.subnet = {"id": "fake id"}
mock_primary_credentials = mock.Mock()
mock_primary_credentials.return_value = mock_network
(self._base_tempest_backend._manager.
primary_credentials) = mock_primary_credentials
mock_subnets_client = mock.Mock()
mock_subnets_client.update_subnet.return_value = None
(self._base_tempest_backend.
_manager.subnets_client) = mock_subnets_client
mock_argus = mock.Mock()
mock_argus.dns_nameservers.return_value = "fake dns nameservers"
mock_config.argus = mock_argus
self._base_tempest_backend._configure_networking()
(self._base_tempest_backend._manager.subnets_client.
update_subnet.assert_called_once())
(self._base_tempest_backend._manager.subnets_client.
update_subnet.assert_called_once())
@mock.patch('argus.util.rand_name', return_value="fake-server")
@mock.patch('tempest.common.waiters.wait_for_server_status')
def _test_create_server(self, mock_waiters, mock_util,
kwargs, wait_until=None):
fake_server = {
'server': {
'id': "fake server id"
}
}
stripped_kwargs = copy.deepcopy(kwargs)
for key, value in list(stripped_kwargs.items()):
if not value:
del stripped_kwargs[key]
(self._base_tempest_backend._manager.servers_client.
create_server) = mock.Mock(return_value=fake_server)
self._base_tempest_backend.image_ref = "fake image ref"
self._base_tempest_backend.flavor_ref = "fake flavor ref"
self._base_tempest_backend._name = "fake name"
if wait_until is not None:
result = (self._base_tempest_backend
._create_server(wait_until, kwargs))
else:
result = self._base_tempest_backend._create_server(**kwargs)
self.assertEqual(result, {"id": "fake server id"})
(self._base_tempest_backend._manager.servers_client.create_server.
assert_called_once_with(name="fake-server-instance",
imageRef="fake image ref",
flavorRef="fake flavor ref",
**stripped_kwargs))
if wait_until is not None:
mock_waiters.assert_called_once_with(
self._base_tempest_backend._manager.servers_client,
"fake server id", wait_until)
else:
mock_waiters.assert_called_once_with(
self._base_tempest_backend._manager.servers_client,
"fake server id", 'ACTIVE')
def test_create_server(self):
kwargs = {
"arg 1": "value 1",
"arg 2": "value 2",
"arg 3": None,
"arg 4": "value 4"
}
self._test_create_server(kwargs=kwargs)
def test__assign_floating_ip(self):
mock_create_floating_ip = mock.Mock()
mock_create_floating_ip.return_value = {
"floating_ip": {
"ip": "fake ip"
}
}
mock_floating_ips_client = mock.Mock()
mock_floating_ips_client.create_floating_ip = mock_create_floating_ip
(mock_floating_ips_client.associate_floating_ip_to_server
.return_value) = None
mock_internal_instance_id = mock.Mock()
mock_internal_instance_id.return_value = "fake id"
(self._base_tempest_backend._manager.
floating_ips_client) = mock_floating_ips_client
(self._base_tempest_backend.
internal_instance_id) = mock_internal_instance_id
result = self._base_tempest_backend._assign_floating_ip()
self.assertEqual(result, {"ip": "fake ip"})
(self._base_tempest_backend._manager.floating_ips_client.
associate_floating_ip_to_server.assert_called_once_with(
"fake ip", "fake id"))
def test_get_mtu(self):
mock_get_mtu = mock.Mock()
mock_get_mtu.return_value = "fake mtu"
self._base_tempest_backend._manager.get_mtu = mock_get_mtu
result = self._base_tempest_backend.get_mtu()
self.assertEqual(result, "fake mtu")
self._base_tempest_backend._manager.get_mtu.assert_called_once()
def test__add_security_group_exceptions(self):
mock_security_group_rules_client = mock.Mock()
(mock_security_group_rules_client.create_security_group_rule
.return_value) = {"security_group_rule": "fake sg_rule"}
(self._base_tempest_backend._manager
.security_group_rules_client) = mock_security_group_rules_client
result = (self._base_tempest_backend.
_add_security_group_exceptions("fake secgroup_id"))
for item in result:
self.assertEqual(item, "fake sg_rule")
def test__create_security_groups(self):
fake_security_group = {
"security_group": {
"id": [
{"id": 1},
{"id": 2},
{"id": 3},
{"id": 4},
{"id": 5}
],
"name": "fake name"
}
}
mock_security_groups_client = mock.Mock()
(mock_security_groups_client.create_security_group
.return_value) = fake_security_group
(self._base_tempest_backend._manager
.security_groups_client) = mock_security_groups_client
self._base_tempest_backend._security_groups_rules = []
self._base_tempest_backend._add_security_group_exceptions = mock.Mock(
return_value=fake_security_group["security_group"]["id"])
self._base_tempest_backend._manager.servers_client = mock.Mock()
self._base_tempest_backend.internal_instance_id = mock.Mock(
return_value="fake ip")
result = self._base_tempest_backend._create_security_groups()
self.assertEqual(result, fake_security_group["security_group"])
(self._base_tempest_backend._manager.security_groups_client.
create_security_group.assert_called_once())
self._base_tempest_backend.internal_instance_id.assert_called_once()
(self._base_tempest_backend._manager.servers_client.add_security_group
.assert_called_once())
self.assertEqual(self._base_tempest_backend._security_groups_rules,
[1, 2, 3, 4, 5])
@mock.patch('tempest.common.waiters.wait_for_server_termination')
def _test_cleanup(self, mock_waiters, security_groups_rules=None,
security_group=None, server=None, floating_ip=None,
keypair=None):
expected_logging = ["Cleaning up..."]
if security_groups_rules is not None:
(self._base_tempest_backend.
_security_groups_rules) = security_groups_rules
(self._base_tempest_backend._manager.security_group_rules_client.
delete_security_group_rule) = mock.Mock()
if security_group is not None:
(self._base_tempest_backend._manager.servers_client
.remove_security_group) = mock.Mock()
self._base_tempest_backend.internal_instance_id = mock.Mock(
return_value="fake id")
self._base_tempest_backend._security_group = security_group
if server is not None:
mock_servers_client = mock.Mock()
mock_servers_client.delete_server = mock.Mock()
(self._base_tempest_backend._manager.
servers_client) = mock_servers_client
self._base_tempest_backend.internal_instance_id = mock.Mock(
return_value="fake id")
self._base_tempest_backend._server = server
if floating_ip is not None:
(self._base_tempest_backend._manager.floating_ips_client.
delete_floating_ip) = mock.Mock()
self._base_tempest_backend._floating_ip = floating_ip
if keypair is not None:
self._base_tempest_backend._keypair = keypair
self._base_tempest_backend._manager.cleanup_credentials = mock.Mock()
with test_utils.LogSnatcher('argus.backends.tempest.'
'tempest_backend') as snatcher:
self._base_tempest_backend.cleanup()
if security_groups_rules is not None:
(self.assertEqual(
self._base_tempest_backend._manager.
security_group_rules_client.delete_security_group_rule.
call_count,
len(security_groups_rules)))
if security_group is not None:
(self._base_tempest_backend._manager.servers_client.
remove_security_group.assert_called_once_with(
server_id="fake id",
name=security_group['name']))
(self._base_tempest_backend.internal_instance_id.
assert_called_once())
if server is not None:
(self._base_tempest_backend._manager.servers_client.delete_server
.assert_called_once_with("fake id"))
(mock_waiters.assert_called_once_with(
self._base_tempest_backend._manager.servers_client,
"fake id"))
self.assertEqual(
self._base_tempest_backend.internal_instance_id.call_count, 2)
if floating_ip is not None:
(self._base_tempest_backend._manager.floating_ips_client.
delete_floating_ip.assert_called_once_with(floating_ip['id']))
if keypair is not None:
self._base_tempest_backend._keypair.destroy.assert_called_once()
(self._base_tempest_backend._manager.cleanup_credentials.
assert_called_once())
self.assertEqual(expected_logging, snatcher.output)
def test_cleanup_security_groups_rules(self):
fake_rules = ["rule 1", "rule 2", "rule 3", "rule 4"]
self._test_cleanup(security_groups_rules=fake_rules)
def test_cleanup_security_group(self):
self._test_cleanup(security_group={'name': "fake name"})
def test_cleanup_server(self):
self._test_cleanup(server="fake server")
def test_cleanup_floating_ip(self):
self._test_cleanup(floating_ip={"id": "fake floating ip id"})
def test_cleanup_keypair(self):
self._test_cleanup(keypair=mock.Mock())
def test_cleanup_credentials(self):
self._test_cleanup()
def test_instance_setup_create_server(self):
expected_logging = ["Creating server..."]
self._base_tempest_backend._configure_networking = mock.Mock()
self._base_tempest_backend._manager.create_keypair = mock.Mock()
self._base_tempest_backend._create_server = mock.Mock(
return_value="fake server")
self._base_tempest_backend._assign_floating_ip = mock.Mock()
self._base_tempest_backend._create_security_groups = mock.Mock()
self._base_tempest_backend._availability_zone = mock.Mock()
self._base_tempest_backend.__get_id_tenant_network = mock.Mock()
with test_utils.LogSnatcher('argus.backends.base') as snatcher:
self._base_tempest_backend.setup_instance()
self.assertEqual(expected_logging, snatcher.output)
self._base_tempest_backend._configure_networking.assert_called_once()
self._base_tempest_backend._manager.create_keypair.assert_called_once()
self._base_tempest_backend._create_server.assert_called_once()
self._base_tempest_backend._assign_floating_ip.assert_called_once()
self._base_tempest_backend._create_security_groups.assert_called_once()
def test_reboot_instance(self):
self._base_tempest_backend._manager.reboot_instance = mock.Mock(
return_value="fake reboot")
self._base_tempest_backend.internal_instance_id = mock.Mock(
return_value="fake id")
result = self._base_tempest_backend.reboot_instance()
self.assertEqual(result, "fake reboot")
(self._base_tempest_backend._manager.reboot_instance.
assert_called_once_with("fake id"))
def test_instance_password(self):
self._base_tempest_backend._manager.instance_password = mock.Mock(
return_value="fake password")
self._base_tempest_backend.internal_instance_id = mock.Mock(
return_value="fake id")
self._base_tempest_backend._keypair = "fake keypair"
result = self._base_tempest_backend.instance_password()
self.assertEqual(result, "fake password")
self._base_tempest_backend.internal_instance_id.assert_called_once()
def test_internal_instance_id(self):
self._base_tempest_backend._server = {"id": "fake server"}
result = self._base_tempest_backend.internal_instance_id()
self.assertEqual(result, "fake server")
def test_instance_output(self):
self._base_tempest_backend._manager.instance_output = mock.Mock(
return_value="fake output")
self._base_tempest_backend.internal_instance_id = mock.Mock(
return_value="fake id")
result = self._base_tempest_backend.instance_output(limit=10)
self.assertEqual(result, "fake output")
self._base_tempest_backend.internal_instance_id.assert_called_once()
self._base_tempest_backend._manager.test_instance_output("fake id", 10)
def test_instance_server(self):
self._base_tempest_backend._manager.instance_server = mock.Mock(
return_value="fake instance server")
self._base_tempest_backend.internal_instance_id = mock.Mock(
return_value="fake instance id")
result = self._base_tempest_backend.instance_server()
self.assertEqual(result, "fake instance server")
self._base_tempest_backend.internal_instance_id.assert_called_once()
def test_public_key(self):
mock_keypair = mock.Mock()
mock_keypair.public_key = "fake public key"
self._base_tempest_backend._keypair = mock_keypair
result = self._base_tempest_backend.public_key()
self.assertEqual(result, "fake public key")
def test_private_key(self):
mock_keypair = mock.Mock()
mock_keypair.private_key = "fake private key"
self._base_tempest_backend._keypair = mock_keypair
result = self._base_tempest_backend.private_key()
self.assertEqual(result, "fake private key")
def test_get_image_by_ref(self):
(self._base_tempest_backend._manager.compute_images_client.
show_image) = mock.Mock(return_value={"image": "fake image"})
self._base_tempest_backend._conf = mock.Mock()
result = self._base_tempest_backend.get_image_by_ref()
self.assertEqual(result, "fake image")
def test_floating_ip(self):
self._base_tempest_backend._floating_ip = {"ip": "fake ip"}
result = self._base_tempest_backend.floating_ip()
self.assertEqual(result, "fake ip")
class TestBaseWindowsTempestBackend(unittest.TestCase):
@mock.patch('argus.config.CONFIG.argus')
@mock.patch('argus.backends.tempest.manager.APIManager')
def setUp(self, mock_api_manager, mock_config):
mock_config.openstack.image_ref = "fake image ref"
mock_config.openstack.flavor_ref = "fake flavor ref"
name = mock.sentinel
userdata = "fake userdata"
metadata = mock.sentinel
availability_zone = mock.sentinel
self._base = tempest_backend.BaseWindowsTempestBackend(
name, userdata, metadata, availability_zone)
@mock.patch('argus.config.CONFIG.argus')
@mock.patch('argus.backends.base.CloudBackend._get_log_template')
def test_get_log_template(self, mock_get_log, mock_config):
mock_get_log.return_value = "fake call"
mock_config.build = "fake build"
mock_config.arch = "fake arch"
expected_result = "{}-{}-{}".format(mock_config.build,
mock_config.arch,
mock_get_log.return_value)
result = self._base._get_log_template("fake suffix")
self.assertEqual(result, expected_result)
| 1.789063 | 2 |
scripts/click.py | liujordan/TestYourTech | 0 | 12798881 | class Click(ActionBase):
def _execute(self):
try:
element = WebDriverWait(browser1, 10).until(
EC.presence_of_element_located((By.XPATH, selector1))
)
element.click()
return True
except:
print("timedout", function, selector1, value1)
return False | 2.609375 | 3 |
keras_frcnn/simple_parser.py | Heyjuke58/frcnn-wind-turbine-detection | 0 | 12798882 | <filename>keras_frcnn/simple_parser.py
import cv2
import os
def get_data(input_path: str, test_only: bool = False):
found_bg = False
all_imgs = {}
classes_count = {}
class_mapping = {}
with open(input_path, 'r') as f:
print('Parsing annotation files')
for line in f:
filename, x1, y1, x2, y2, class_name, bucket, height, set = line.strip().split(',')
if test_only and set != 'test':
continue
if class_name not in classes_count:
classes_count[class_name] = 1
else:
classes_count[class_name] += 1
if class_name not in class_mapping:
if class_name == 'bg' and found_bg == False:
print('Found class name with special name bg. Will be treated as a background region (this is usually for hard negative mining).')
found_bg = True
class_mapping[class_name] = len(class_mapping)
if filename not in all_imgs:
path = 'images/' + filename
if not os.path.exists(path):
continue
img = cv2.imread(path)
img = img[0:1280-50, 0:1280-50]
(rows,cols) = img.shape[:2]
all_imgs[filename] = {}
all_imgs[filename]['filepath'] = path
all_imgs[filename]['width'] = cols
all_imgs[filename]['height'] = rows
all_imgs[filename]['bboxes'] = []
all_imgs[filename]['imageset'] = set
if int(x1) + (int(x2) - int(x1)) / 2 >= 1230 or int(y1) + (int(y2) - int(y1)) / 2 >= 1230:
continue
x2 = 1229 if int(x2) >= 1230 else int(x2)
y2 = 1229 if int(y2) >= 1230 else int(y2)
all_imgs[filename]['bboxes'].append({'class': class_name, 'bucket': bucket, 'height': height, 'x1': int(x1), 'y1': int(y1), 'x2': int(x2), 'y2': int(y2)})
all_data = []
for key in all_imgs:
all_data.append(all_imgs[key])
# make sure the bg class is last in the list
if found_bg:
if class_mapping['bg'] != len(class_mapping) - 1:
key_to_switch = [key for key in class_mapping.keys() if class_mapping[key] == len(class_mapping)-1][0]
val_to_switch = class_mapping['bg']
class_mapping['bg'] = len(class_mapping) - 1
class_mapping[key_to_switch] = val_to_switch
return all_data, classes_count, class_mapping
def get_data_modified(input_path: str, test_only: bool = False):
found_bg = False
all_imgs = {}
classes_count = {}
class_mapping = {}
with open(input_path,'r') as f:
print('Parsing annotation files')
for line in f:
filename, x, y, class_name, set = line.strip().split(',')
if test_only and set != 'test':
continue
if class_name not in classes_count:
classes_count[class_name] = 1
else:
classes_count[class_name] += 1
if class_name not in class_mapping:
if class_name == 'bg' and found_bg == False:
print('Found class name with special name bg. Will be treated as a background region (this is usually for hard negative mining).')
found_bg = True
class_mapping[class_name] = len(class_mapping)
if filename not in all_imgs:
all_imgs[filename] = {}
path = 'images/' + filename
img = cv2.imread(path)
img = img[0:1280-50, 0:1280-50]
(rows,cols) = img.shape[:2]
all_imgs[filename]['filepath'] = path
all_imgs[filename]['width'] = cols
all_imgs[filename]['height'] = rows
all_imgs[filename]['coordinates'] = []
all_imgs[filename]['imageset'] = set
if int(x) >= 1230 or int(y) >= 1230:
continue
all_imgs[filename]['coordinates'].append({'class': class_name, 'x': int(x), 'y': int(y)})
all_data = []
for key in all_imgs:
all_data.append(all_imgs[key])
# make sure the bg class is last in the list
if found_bg:
if class_mapping['bg'] != len(class_mapping) - 1:
key_to_switch = [key for key in class_mapping.keys() if class_mapping[key] == len(class_mapping)-1][0]
val_to_switch = class_mapping['bg']
class_mapping['bg'] = len(class_mapping) - 1
class_mapping[key_to_switch] = val_to_switch
return all_data, classes_count, class_mapping
| 2.875 | 3 |
athumb/__init__.py | tahy/django-athumb | 10 | 12798883 | <reponame>tahy/django-athumb
VERSION = '2.4.1'
| 0.941406 | 1 |
example/test.py | flaviolsousa/ping-pong-ia | 0 | 12798884 | <reponame>flaviolsousa/ping-pong-ia
import pygame
from pygame.locals import *
def main():
pygame.init()
screen = pygame.display.set_mode(
(200, 200), HWSURFACE | DOUBLEBUF | RESIZABLE)
fake_screen = screen.copy()
pic = pygame.surface.Surface((50, 50))
pic.fill((255, 100, 200))
while True:
for event in pygame.event.get():
if event.type == QUIT:
pygame.display.quit()
elif event.type == VIDEORESIZE:
screen = pygame.display.set_mode(
event.size, HWSURFACE | DOUBLEBUF | RESIZABLE)
fake_screen.fill('black')
fake_screen.blit(pic, (100, 100))
screen.blit(pygame.transform.scale(
fake_screen, screen.get_rect().size), (0, 0))
pygame.display.flip()
main()
| 3.109375 | 3 |
Exercicios - Mundo1/Ex014.py | BrianMath/ExerciciosPythonCeV | 0 | 12798885 | # Ex. 014
c = float(input("Digite uma temperatura em °C: "))
print(f"{c}°C = {((9*c)/5)+32:.1f}°F")
| 3.96875 | 4 |
apps/node/urls.py | dongdawang/ssrmgmt | 0 | 12798886 | <reponame>dongdawang/ssrmgmt
from django.urls import path
from .views import NodeShow, NodeDetail, SelectNode
urlpatterns = [
path('node/', NodeShow.as_view(), name='node-show'),
path('node/detail/<int:n_id>', NodeDetail.as_view(), name='node-detail'),
path('node/select/', SelectNode.as_view(), name='node-select')
]
| 1.960938 | 2 |
Kubera/Controllers/dividend_summary.py | santhoshraje/kubera | 5 | 12798887 | from telegram.ext import ConversationHandler
from telegram.ext import MessageHandler
from telegram.ext import Filters
from telegram.ext import CallbackQueryHandler
from Model.share import Share
import Controllers.global_states as states
from Utils.logging import get_logger as log
import pandas as pd
import datetime
GETSUMMARY = range(1)
class DividendSummary:
def __init__(self, dispatcher):
self.__dp = dispatcher
self.__handler()
def __handler(self):
ds_handler = ConversationHandler(
entry_points=[CallbackQueryHandler(
self.get_ticker, pattern='^' + str(states.DIVIDENDINFO) + '$')],
states={
GETSUMMARY: [
MessageHandler(Filters.text, self.get_dividend_summary)
],
},
fallbacks=[]
)
self.__dp.add_handler(ds_handler)
@staticmethod
def get_ticker(update, context):
user = update.effective_user
log().info("User %s pressed the dividend summary button.", user.first_name)
query = update.callback_query
query.answer()
query.edit_message_text(
text="Enter ticker symbol (e.g D05)")
return GETSUMMARY
@staticmethod
def get_dividend_summary(update, context):
ticker = update.message.text
user = update.effective_user
log().info("User %s entered ticker value %s.", user.first_name, ticker)
years = 5
share = Share(ticker)
if not share.is_valid:
update.message.reply_text("Invalid ticker. Please use /start to go back to the main menu")
log().info("User %s entered an invalid ticker value %s.", user.first_name, ticker)
return ConversationHandler.END
a = share.get_dividend_summary(datetime.datetime.now().year, datetime.datetime.now().year - years)
s = '<b>' + share.name + '</b>\n\n'
for item in a:
s += '<b>' + str(item.year) + ' (' + str(item.total) + ')</b>' + '\n'
i = 1
for pay_date, pay_amount in zip(item.pay_date, item.amount):
if pay_date == '-':
continue
s += pd.to_datetime(pay_date).strftime('%d %B') + ': ' + str(pay_amount).replace('SGD', 'SGD ') +'\n'
i += 1
s += '\n'
update.message.reply_text(s, parse_mode='HTML')
return ConversationHandler.END
| 2.46875 | 2 |
scripts/KMerFreq.py | chrisquince/BayesPaths | 3 | 12798888 | import gzip
import sys
import argparse
import re
import logging
import numpy as np
import pandas as p
from itertools import product, tee
from collections import Counter, OrderedDict
from Bio import SeqIO
def generate_feature_mapping(kmer_len):
BASE_COMPLEMENT = {"A":"T","T":"A","G":"C","C":"G"}
kmer_hash = {}
counter = 0
for kmer in product("ATGC",repeat=kmer_len):
if kmer not in kmer_hash:
kmer_hash[kmer] = counter
rev_compl = tuple([BASE_COMPLEMENT[x] for x in reversed(kmer)])
kmer_hash[rev_compl] = counter
counter += 1
return kmer_hash, counter
def window(seq,n):
els = tee(seq,n)
for i,el in enumerate(els):
for _ in range(i):
next(el, None)
return zip(*els)
def _calculate_composition(read_file, kmer_len, length_threshold=25):
#Generate kmer dictionary
feature_mapping, nr_features = generate_feature_mapping(kmer_len)
composition = np.zeros(nr_features,dtype=np.int)
start_composition = np.zeros(nr_features,dtype=np.int)
with gzip.open(read_file, "rt") as handle:
for seq in SeqIO.parse(handle,"fastq"):
seq_len = len(seq)
if seq_len<= length_threshold:
continue
str_seq = str(seq.seq)
# Create a list containing all kmers, translated to integers
kmers = [
feature_mapping[kmer_tuple]
for kmer_tuple
in window(str_seq.upper(), kmer_len)
if kmer_tuple in feature_mapping
]
# numpy.bincount returns an array of size = max + 1
# so we add the max value and remove it afterwards
# numpy.bincount was found to be much more efficient than
# counting manually or using collections.Counter
kmers.append(nr_features - 1)
composition_v = np.bincount(np.array(kmers))
composition_v[-1] -= 1
# Adding pseudo counts before storing in dict
composition += composition_v
failStart = 0
if seq_len >= kmer_len:
startKmer = str_seq[0:kmer_len].upper()
startKmerT = tuple(startKmer)
if startKmerT in feature_mapping:
start_composition[feature_mapping[startKmerT]]+=1
else:
failStart+=1
return feature_mapping, composition, start_composition, failStart
def main(argv):
parser = argparse.ArgumentParser()
parser.add_argument("read_file", help="gzipped fastq read file")
parser.add_argument("kmer_length", help="kmer length assumed overlap")
parser.add_argument("outFileStub", help="stub for output files")
args = parser.parse_args()
#import ipdb; ipdb.set_trace()
(feature_mapping, composition, start_composition,failStart) = _calculate_composition(args.read_file, int(args.kmer_length))
print(str(failStart))
for k in sorted(feature_mapping, key=feature_mapping.get):
kidx = feature_mapping[k]
print("".join(k) + "," + str(kidx) + "," + str(composition[kidx]) + "," + str(start_composition[kidx]) )
if __name__ == "__main__":
main(sys.argv[1:])
| 2.28125 | 2 |
hw4/hw3_farmer.py | farmerjm/PHYS38600 | 0 | 12798889 | # -*- coding: utf-8 -*-
'''
<NAME>
1. a. Frequentist confidence intervals do not respect the physical limitations imposed on a system, ie non-negativity of a mass.
b. Typically, that the probability to be found outside the interval on both sides of the distribution is 16% (or (100-CL)/2 %).
Often constructed with a likelihood function, finding where the likelihood reduces by a half.
c. We need a prior PDF to construct the posterior PDF for \mu_t.
d. 1/\mu_t. He justifies that this is invariant over changes of power of \mu_t.
e. Bayesian methods fail to be objective: they must be injected with a prior PDF to construct the posterior from the likelihood function.
Classical intervals fail to consider physical limitations on the measured parameter.
Classical limits also handle systematics in a counterintuitive way, such that a bad calibration leads to a tighter confidence interval.
It seems that generally people use classical statistics except when it produces things that 'seem' wrong, in which case use Bayesian.
f. As Cousins did, perform classical analysis on the mean and statistical error and use a Bayesian analysis of the detector sensitivity.
2. I repeated this entire problem for a quadratic plot. The files ending in "_quad.pdf" are from the second iteration with a quadratic dataset.
a. The data are shown in blue, the linear fit in red, and the quadratic fit in blue.
b. The symmetry of the histogram reflects unbiased estimators.
c. The functional form is:
1/(2^{df/2}\Gamma(df/2)) x^{df/2-1}e^{-x/2}
The single parameter, df, is the number of degrees of freedom in the fit. Since we have 15 data points, this is either 12 or 13.
For the linear fit, we have two free parameters so df=13; for the quadratic fit with three free parameters, df=12.
We expected the reduced chi square to be around 1, and this is the result for both fits.
* For comparison I give a normalized reduced Chi2 distribution for df=12 and df=13. Overlaying them was not obviously easy, but comparing by-eye they are identical.
I plotted reduced chi squares through because of their goodness-of-fit usefulness, but the conversion between the two statistics is simple.
d. In the case of the linear data, the fit gets worse. It is difficult to predict what happens here: if we are lucky enough that we can fit
some noise to the new x^2 degree of freedom, the X^2 will lower. However, the ndf has reduced by 1, so if there is overall no noise we can
fit away, then the reduced chi square will rise.
In the case of the quadratic data, the linear fit is abysmal and the quadratic fit is around 1. This is also expected.
3. a. I sampled the distribution using the cdf; for reference I included both the plot of the distrubution and the cdf.
b. Transforming error bars for log data is not entirely trivial because applying the logarithm literally yields asymmetric error bars.
Instead, I transformed to first-order (d/dx log x), using \sigma_{D,log}=\sigma_D/D
c. It takes a rather long time to run this with a large number of statistics (maybe I am doing something very inefficient).
From running the experiment 500 times, I can say that poor sampling of the tails of the distribution leads to underestimation: that is,
we can see a bias in the distribution that favors the left side. I verified this by reducing the number of samples taken
from the distribution by a factor of 10 and re-running, giving bins that are much less well-populated. I attached outputs for both cases.
Rather than wrestle with masking or reassigning garbage datasets post-log, I discarded all results for which the fit failed.
'''
import random
import math
import matplotlib.pyplot as plt
import numpy as np
import scipy.stats as stat
#Samples from the PDF and computes the mean.
#Maps random reals in (0,1) to the Poisson distribution using a Poisson lookup table
class MeasurementTaker:
def __init__(self, resolution):
self.theResolution=resolution
def GeneratePointWithResolution(self, val):
point=np.random.normal(loc=val,scale=self.theResolution)
return point
class theLine:
def __init__(self, degree):
self.quadcoeff=1
self.degree=degree
self.m=2
self.b=6
self.res=2
self.X=np.linspace(1,15, 15)
self.Y=[]
self.x=0
self.residuals=0
self.ChiSquare=0
if self.degree == 1:
self.BuildLine()
else:
self.BuildLineQuadratic()
self.FitLine()
def BuildLine(self):
measurer = MeasurementTaker(2)
for i, entry in enumerate(self.X):
self.Y.append(measurer.GeneratePointWithResolution(self.m*entry+self.b))
def BuildLineQuadratic(self):
measurer = MeasurementTaker(2)
for i, entry in enumerate(self.X):
self.Y.append(measurer.GeneratePointWithResolution(self.quadcoeff*entry**2+self.m*entry+self.b))
def FitLine(self):
self.coeffs = np.polyfit(self.X, self.Y, 1)
self.ChiSquare=np.sum((((self.coeffs[0]*self.X+self.coeffs[1])-self.Y)/self.res) ** 2)
self.quadcoeffs=np.polyfit(self.X, self.Y,2)
self.ChiSquareQuad=np.sum((((self.quadcoeffs[0]*self.X**2+self.quadcoeffs[1]*self.X+self.quadcoeffs[2])-self.Y)/self.res)**2)
def PlotLine(self, title):
plt.errorbar(self.X,self.Y,xerr=0,yerr=2)
plt.plot(self.X,self.quadcoeffs[0]*self.X**2+self.quadcoeffs[1]*self.X+self.quadcoeffs[2])
plt.plot(self.X,self.coeffs[0]*self.X+self.coeffs[1])
plt.xlabel("x")
plt.ylabel("y")
plt.title("The Line")
plt.savefig(title)
plt.clf()
class theExponential:
lookup_x=[]
lookup_y=[]
cdf=[]
maxcdf=0
def GenerateSample(self):
randomNumber = random.uniform(theExponential.cdf[0],theExponential.maxcdf)
index=-1
if randomNumber < theExponential.cdf[0]:
index=0
else:
for i in range(0,len(theExponential.cdf)-1):
if randomNumber > theExponential.cdf[i] and randomNumber < theExponential.cdf[i+1]:
index=i+1
if index != -1:
self.samples.append(theExponential.lookup_x[index])
def GenerateNSamples(self, numSamples):
for i in range(0, numSamples):
self.GenerateSample()
def AnalyzeDistro(self, index):
y,binEdges = np.histogram(self.samples,bins=10)
bincenters = 0.5*(binEdges[1:]+binEdges[:-1])
menStd = np.sqrt(y)
width = 0.20
plt.clf()
if index == 1:
plt.bar(bincenters, y, width=width, yerr=menStd, ecolor='g')
plt.xlabel("Value")
plt.ylabel("Entries")
plt.title(str(len(self.samples))+" exponential samples")
plt.savefig("3b_exp_samples.png")
plt.clf()
self.logsamples=np.log(y)
logcoeffs = np.polyfit(bincenters, self.logsamples, 1)
if index == 1:
plt.bar(bincenters,self.logsamples,width=width, yerr=menStd/y, ecolor='g')
plt.xlabel("Value")
plt.ylabel("log Entries")
plt.title(str(len(self.samples))+" exponential samples")
theFitX=np.linspace(0,5,1000)
theFitY=theFitX*logcoeffs[0]+logcoeffs[1]
plt.plot(theFitX,theFitY)
plt.savefig("3b_exp_samples_log.png")
plt.clf()
return -1*logcoeffs[0]
def __init__(self, nSamples):
self.samples=[]
self.logbins=[]
self.GenerateNSamples(nSamples)
theExponential.lookup_x=np.linspace(0, 5, 10000)
theExponential.lookup_y=np.exp(-theExponential.lookup_x)
runningAverage=0
for val in theExponential.lookup_y:
runningAverage=runningAverage+val
theExponential.cdf.append(runningAverage)
theExponential.maxcdf=theExponential.cdf[len(theExponential.cdf)-1]
plt.clf()
print("Running...")
plt.plot(theExponential.lookup_x, theExponential.lookup_y)
plt.xlabel("x")
plt.ylabel("$e^{-x}$")
plt.title("Exponential distribution")
plt.savefig("3_exponential_dist.png")
plt.clf()
plt.plot(theExponential.lookup_x, theExponential.cdf)
plt.xlabel("x")
plt.ylabel("cdf")
plt.title("Exponential cdf")
plt.savefig("3_exponential_cdf.png")
plt.clf()
for i in range(0,2):
fileEnding=0
degree=i+1
if i == 0:
fileEnding=".png"
else:
fileEnding="_quad.png"
Lines=[]
slopes=[]
intercepts=[]
quads=[]
chisqs=[]
chisqquads=[]
for j in range(0,1000):
line = theLine(degree)
Lines.append(line)
if j == 1:
line.PlotLine("2a_line"+fileEnding)
if i == 0:
slopes.append(line.coeffs[0])
intercepts.append(line.coeffs[1])
else:
quads.append(line.quadcoeffs[0])
slopes.append(line.quadcoeffs[1])
intercepts.append(line.quadcoeffs[2])
chisqs.append(line.ChiSquare/13)
chisqquads.append(line.ChiSquareQuad/12)
plt.hist(slopes, bins=100)
plt.xlabel("m")
plt.ylabel("Entries")
plt.title("Slopes histogram")
plt.savefig("2b_slopes"+fileEnding)
plt.clf()
plt.hist(intercepts, bins=100)
plt.xlabel("b")
plt.ylabel("Entries")
plt.title("Intercepts histogram")
plt.savefig("2b_intercepts"+fileEnding)
plt.clf()
if i == 1:
plt.hist(intercepts, bins=100)
plt.xlabel("a (quadratic coefficient)")
plt.ylabel("Entries")
plt.title("Quadratic coefficient histogram")
plt.savefig("2b_quads"+fileEnding)
plt.clf()
plt.hist(chisqs, bins=100)
plt.xlabel("X^2 / ndf")
plt.ylabel("Entries")
plt.title("Chi-square of linear fit")
plt.savefig("2c_chisq"+fileEnding)
plt.clf()
plt.hist(chisqquads, bins=100)
plt.xlabel("X^2 / ndf")
plt.ylabel("Entries")
plt.title("Chi-square of quadratic fit")
plt.savefig("2d_chisq2"+fileEnding)
plt.clf()
theNdf=0
if i ==1:
theNdf=12
else:
theNdf=13
chispace=np.linspace(0,theNdf*3,1000)
chidist=stat.chi2(theNdf,1)
plt.plot(chispace/theNdf, chidist.pdf(chispace))
plt.xlabel("X^2")
plt.ylabel("P")
plt.title("Chi-square distribution (ndf ="+str(theNdf)+")")
plt.savefig("2d_chisq2pdf"+fileEnding)
plt.clf()
Taus=[]
for i in range(0,500):
if i % 100 == 0:
print(i)
exp = theExponential(500)
result=exp.AnalyzeDistro(i)
if math.isnan(result) == False:
Taus.append(result)
print(Taus)
plt.hist(Taus, bins=20)
plt.xlabel("Tau")
plt.ylabel("Entries")
plt.title("Estimated Tau")
plt.savefig("3c_tau_hist_500samples.png")
Taus=[]
for i in range(0,500):
if i % 100 == 0:
print(i)
exp = theExponential(50)
result=exp.AnalyzeDistro(i)
if math.isnan(result) == False:
Taus.append(result)
print(Taus)
plt.hist(Taus, bins=20)
plt.xlabel("Tau")
plt.ylabel("Entries")
plt.title("Estimated Tau")
plt.savefig("3c_tau_hist_50samples.png")
| 3.15625 | 3 |
Lib/rcjktools/buildVarC.py | BlackFoundryCom/rcjk-tools | 1 | 12798890 | from fontTools.misc.fixedTools import floatToFixed
from fontTools.ttLib import TTFont, newTable, registerCustomTableClass
from fontTools.varLib.models import VariationModel, allEqual
from fontTools.varLib.varStore import OnlineVarStoreBuilder
from rcjktools.varco import VarCoFont
from rcjktools.table_VarC import (
fixedCoord,
getToFixedConverterForNumIntBitsForScale,
transformToIntConverters,
transformDefaults,
VARIDX_KEY,
ComponentRecord,
CoordinateRecord,
TransformRecord,
)
def precompileAllComponents(vcData, allLocations, axisTags):
precompiled = {}
masterModel = VariationModel(allLocations, axisTags)
storeBuilder = OnlineVarStoreBuilder(axisTags)
for gn in vcData.keys():
components, locations = vcData[gn]
sparseMapping = [None] * len(allLocations)
for locIndex, loc in enumerate(locations):
allIndex = allLocations.index(loc)
sparseMapping[allIndex] = locIndex
subModel, mapping = masterModel.getSubModel(sparseMapping)
storeBuilder.setModel(subModel)
# reorder master values according to allLocations
components = [[c[i] for i in mapping] for c in components]
precompiledGlyph = precompileVarComponents(
gn, components, storeBuilder, axisTags
)
if precompiledGlyph is not None:
# glyph components do not contain data that has to go to the 'VarC' table
precompiled[gn] = precompiledGlyph
return precompiled, storeBuilder.finish()
def precompileVarComponents(glyphName, components, storeBuilder, axisTags):
precompiled = []
haveVarCData = False
for component in components:
coordKeys = sorted({k for coord, transform in component for k in coord})
coordDefaults = {k: 0 for k in coordKeys}
coordConverters = {k: fixedCoord for k in coordKeys}
dicts = [coord for coord, transform in component]
coordDict = compileDicts(
dicts,
coordDefaults,
coordConverters,
storeBuilder,
allowIndividualVarIdx=True,
)
dicts = [transform for coord, transform in component]
transformToIntConvertersLocal = dict(transformToIntConverters)
numIntBitsForScale = calcNumIntBitsForScale(dicts)
scaleConvert = getToFixedConverterForNumIntBitsForScale(numIntBitsForScale)
transformToIntConvertersLocal["ScaleX"] = scaleConvert
transformToIntConvertersLocal["ScaleY"] = scaleConvert
transformDict = compileDicts(
dicts, transformDefaults, transformToIntConvertersLocal, storeBuilder
)
if coordDict or transformDict:
haveVarCData = True
precompiled.append(
ComponentRecord(
CoordinateRecord(coordDict),
TransformRecord(transformDict),
numIntBitsForScale,
),
)
if haveVarCData:
return precompiled
else:
return None
def compileDicts(
dicts, dictDefaults, dictConverters, storeBuilder, allowIndividualVarIdx=False
):
resultDict = {}
convertedMasterValues = {}
hasVariations = False # True if any key has variations
for k, default in dictDefaults.items():
masterValues = [d.get(k, default) for d in dicts]
if not allEqual(masterValues):
hasVariations = True
elif masterValues[0] == default:
# No variations, value is default, skip altogether
continue
resultDict[k] = dict(value=masterValues[0])
convertedMasterValues[k] = [dictConverters[k](value) for value in masterValues]
if hasVariations:
for k, masterValues in convertedMasterValues.items():
if allowIndividualVarIdx and allEqual(
masterValues
): # TODO: Avoid second allEqual() call?
continue
base, varIdx = storeBuilder.storeMasters(masterValues)
assert base == masterValues[0], (k, base, masterValues)
resultDict[k][VARIDX_KEY] = varIdx
return resultDict
def calcNumIntBitsForScale(dicts):
minScale, maxScale = _calcMinMaxScale(dicts)
numIntBits = _calcNumIntBits(minScale, maxScale)
return numIntBits
def _calcNumIntBits(minValue, maxValue, maxIntBits=7):
# TODO: there must be a better way, but at least this is correct
assert minValue <= maxValue
for i in range(maxIntBits):
precisionBits = 16 - i
minIntVal = floatToFixed(minValue, precisionBits)
maxIntVal = floatToFixed(maxValue, precisionBits)
if -32768 <= minIntVal and maxIntVal <= 32767:
return i + 1 # use one more: deltas may be bigger! (this is rather fuzzy)
raise ValueError("value does not fit in maxBits")
def _calcMinMaxScale(transformDicts):
minScale = 0
maxScale = 0
for d in transformDicts:
minScale = min(minScale, d.get("ScaleX", 0))
minScale = min(minScale, d.get("ScaleY", 0))
maxScale = max(maxScale, d.get("ScaleX", 0))
maxScale = max(maxScale, d.get("ScaleY", 0))
return minScale, maxScale
def remapVarIdxs(precompiled, mapping):
for glyphName, components in precompiled.items():
for component in components:
for v in component.coord.values():
if VARIDX_KEY in v:
v[VARIDX_KEY] = mapping[v[VARIDX_KEY]]
for v in component.transform.values():
if VARIDX_KEY in v:
v[VARIDX_KEY] = mapping[v[VARIDX_KEY]]
def buildVarCTable(ttf, vcData, allLocations):
axisTags = [axis.axisTag for axis in ttf["fvar"].axes]
varc_table = ttf["VarC"] = newTable("VarC")
varc_table.Version = 0x00010000
precompiled, store = precompileAllComponents(vcData, allLocations, axisTags)
mapping = store.optimize()
remapVarIdxs(precompiled, mapping)
varc_table.GlyphData = precompiled
varc_table.VarStore = store
def buildVarC(
designspacePath, ttfPath, outTTFPath, doTTX, saveWoff2, neutralOnly=False
):
import pathlib
registerCustomTableClass("VarC", "rcjktools.table_VarC", "table_VarC")
ttfPath = pathlib.Path(ttfPath)
if outTTFPath is None:
outTTFPath = ttfPath.parent / (ttfPath.stem + "-varc" + ttfPath.suffix)
else:
outTTFPath = pathlib.Path(outTTFPath)
ttf = TTFont(ttfPath)
axisTags = [axis.axisTag for axis in ttf["fvar"].axes]
globalAxisNames = {axisTag for axisTag in axisTags if axisTag[0] != "V"}
vcFont = VarCoFont(designspacePath)
vcData, allLocations, neutralGlyphNames = vcFont.extractVarCoData(
globalAxisNames, neutralOnly
)
if neutralGlyphNames:
gvarTable = ttf["gvar"]
for glyphName in neutralGlyphNames:
del gvarTable.variations[glyphName]
buildVarCTable(ttf, vcData, allLocations)
if doTTX:
outTTXPath = outTTFPath.parent / (outTTFPath.stem + "-before.ttx")
ttf.saveXML(outTTXPath, tables=["VarC"])
ttf.save(outTTFPath)
ttf = TTFont(outTTFPath, lazy=True) # Load from scratch
if doTTX:
outTTXPath = outTTFPath.parent / (outTTFPath.stem + "-after.ttx")
ttf.saveXML(outTTXPath, tables=["VarC"])
if saveWoff2:
outWoff2Path = outTTFPath.parent / (outTTFPath.stem + ".woff2")
ttf.flavor = "woff2"
ttf.save(outWoff2Path)
def main():
import argparse
parser = argparse.ArgumentParser()
parser.add_argument("designspace", help="The VarCo .designspace source")
parser.add_argument("ttf", help="The input Variable Font")
parser.add_argument("--output", help="The output Variable Font")
parser.add_argument(
"--ttx", action="store_true", help="write TTX dumps for the VarC table."
)
parser.add_argument("--no-woff2", action="store_true")
parser.add_argument(
"--neutral-only",
action="store_true",
help="hack: build a pseudo static COLRv1 table, that won't respond to the "
"non-hidden axes",
)
args = parser.parse_args()
buildVarC(
args.designspace,
args.ttf,
args.output,
args.ttx,
not args.no_woff2,
args.neutral_only,
)
if __name__ == "__main__":
main()
| 1.804688 | 2 |
unifi/objects/device.py | BastiG/unifi-py | 0 | 12798891 | from unifi.objects.base import UnifiBaseObject
from unifi.helper import find_by_attr, json_print
class UnifiDeviceObject(UnifiBaseObject):
def get_port_profile(self, **filter_kwargs):
port = find_by_attr(self.port_table, **filter_kwargs)
port_override = find_by_attr(self.port_overrides, port_idx=port['port_idx'])
portconf_id = port_override['portconf_id'] if port_override and 'portconf_id' in port_override else port['portconf_id']
portconf = find_by_attr(self.controller.portconf(), _id=portconf_id)
return portconf
def set_port_profile(self, portconf, **filter_kwargs):
port = find_by_attr(self.port_table, **filter_kwargs)
port_override = find_by_attr(self.port_overrides, port_idx=port['port_idx'])
if port_override:
port_override['portconf_id'] = portconf['_id']
else:
port_override = {
'port_idx': port['port_idx'],
'portconf_id': portconf['_id']
}
self.port_overrides.append(port_override)
| 2.40625 | 2 |
2008/round-1b/mousetrap/script.py | iamFIREcracker/google-code-jam | 0 | 12798892 | from collections import deque
for case in xrange(input()):
cards = input()
indexes = map(int, raw_input().split())
deck = [0 for i in xrange(cards)]
index = -1
for i in xrange(1, cards + 1):
while True:
index = (index + 1)%cards
if deck[index] == 0:
break
for j in xrange(i - 1):
while True:
index = (index + 1)%cards
if deck[index] == 0:
break
deck[index] = i
#--------------------------------------------------
# for case in xrange(input()):
# k = input()
# indexes = map(int, raw_input().split())
#
# deck = deque()
# for card in xrange(k, 0, -1):
# deck.appendleft(card)
# print deck
# deck.rotate(card - 1)
# print deck
#--------------------------------------------------
print 'Case #%d: %s' % (case + 1, ' '.join(str(deck[i - 1])
for i in indexes[1:]))
| 3.140625 | 3 |
lifx_rest.py | rsilveira79/Utils-python | 0 | 12798893 | <reponame>rsilveira79/Utils-python<filename>lifx_rest.py
import requests
from time import sleep
token = "<KEY>"
# ID da minha lampada:
# "id": "d0<PASSWORD>502164d",
# "uuid": "021063bb-1cae-416b-bbff-3dbe5cc22a35",
headers = {
"Authorization": "Bearer %s" % token,
}
state_off ={
"power": "off",
"color": "blue saturation:0.5",
"brightness": 0.5,
}
state1 ={
"power": "on",
"color": "yellow",
"brightness": 0.5,
}
state2 ={
"power": "on",
"color": "rgb:0,140,251",
"brightness": 1,
}
# URL base a ser acessada
url_1 = 'https://api.lifx.com/v1/lights/all'
url_2 = 'https://api.lifx.com/v1/scenes'
url_3 = 'https://api.lifx.com/v1/lights/d073d502164d/state'
url_4 = 'https://api.lifx.com/v1/lights/d073d502164d/toggle'
# Request GET - lista todas as lampadas
response = requests.get(url_1,headers=headers)
print(response.text)
#scenes = requests.get(url_2, data={}, headers=headers)
#print(scenes.text)
#t_power = requests.post(url_4,headers=headers)
#print(t_power.text)
activate = requests.put('https://api.lifx.com/v1/lights/d073d502164d/state', data={"power": "on","color": "rgb:128,128,128","brightness": 251}, headers=headers)
#activate = requests.put('https://api.lifx.com/v1/lights/d073d502164d/state', data=state2, headers=headers)
#sleep(5)
#activate = requests.put('https://api.lifx.com/v1/lights/d073d502164d/state', data=state1, headers=headers)
#sleep(5)
#activate = requests.put('https://api.lifx.com/v1/lights/d073d502164d/state', data=state_off, headers=headers)
#print(activate.text)
'''
cycles=0
while cycles == 0:
sleep(2)
t_power = requests.post(url_4,headers=headers)
print(t_power.text)
'''
| 2.796875 | 3 |
dmlab2d/dmlab2d_test.py | Robert-Held/lab2d | 377 | 12798894 | # Copyright 2019 The DMLab2D Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for dmlab2d.dmlab2d."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from absl.testing import absltest
from dm_env import test_utils
import numpy as np
import dmlab2d
from dmlab2d import runfiles_helper
class Dmlab2dDmEnvTest(test_utils.EnvironmentTestMixin, absltest.TestCase):
def make_object_under_test(self):
lab2d = dmlab2d.Lab2d(runfiles_helper.find(),
{'levelName': 'examples/level_api'})
return dmlab2d.Environment(lab2d, lab2d.observation_names(), 0)
class Dmlab2DTest(absltest.TestCase):
def _create_env(self, extra_settings=None):
settings = extra_settings.copy() if extra_settings else {}
settings['levelName'] = 'examples/level_api'
return dmlab2d.Lab2d(runfiles_helper.find(), settings)
def test_lab2d_environment_name(self):
self.assertEqual(self._create_env().name(), 'dmlab2d')
def test_lab2d_observation_names(self):
env = self._create_env()
self.assertEqual(env.observation_names(),
['VIEW' + str(i) for i in range(1, 6)])
def test_lab2d_observation_spec(self):
env = self._create_env()
self.assertEqual(
env.observation_spec('VIEW1'), {
'dtype': np.dtype('uint8'),
'shape': (1,)
})
self.assertEqual(
env.observation_spec('VIEW2'), {
'dtype': np.dtype('double'),
'shape': (2,)
})
self.assertEqual(
env.observation_spec('VIEW3'), {
'dtype': np.dtype('int32'),
'shape': (3,)
})
self.assertEqual(
env.observation_spec('VIEW4'), {
'dtype': np.dtype('int64'),
'shape': (4,)
})
# Text is stored in objects.
self.assertEqual(
env.observation_spec('VIEW5'), {
'dtype': np.dtype('O'),
'shape': ()
})
def test_lab2d_action_spec(self):
env = self._create_env()
self.assertEqual(env.action_discrete_names(), ['REWARD_ACT'])
self.assertEqual(
env.action_discrete_spec('REWARD_ACT'), {
'min': 0,
'max': 4
})
self.assertEqual(env.action_continuous_names(), ['OBSERVATION_ACT'])
self.assertEqual(
env.action_continuous_spec('OBSERVATION_ACT'), {
'min': -5,
'max': 5
})
self.assertEqual(env.action_text_names(), ['LOG_EVENT'])
def test_lab2d_start_environment(self):
env = self._create_env()
env.start(episode=0, seed=0)
def test_lab2d_events_start(self):
env = self._create_env()
env.start(episode=0, seed=0)
events = env.events()
self.assertLen(events, 1)
event_name, observations = events[0]
self.assertEqual(event_name, 'start')
self.assertLen(observations, 1)
np.testing.assert_array_equal(observations[0], [1, 2, 3])
def test_lab2d_events_cleared_after_advance_not_read(self):
env = self._create_env()
env.start(episode=0, seed=0)
self.assertLen(env.events(), 1)
self.assertLen(env.events(), 1)
env.advance()
self.assertEmpty(env.events())
def test_lab2d_observe(self):
env = self._create_env()
env.start(episode=0, seed=0)
np.testing.assert_array_equal(env.observation('VIEW1'), [1])
np.testing.assert_array_equal(env.observation('VIEW2'), [1, 2])
np.testing.assert_array_equal(env.observation('VIEW3'), [1, 2, 3])
np.testing.assert_array_equal(env.observation('VIEW4'), [1, 2, 3, 4])
self.assertEqual(env.observation('VIEW5'), b'')
def test_lab2d_ten_steps_terminate_environment(self):
env = self._create_env()
env.start(episode=0, seed=0)
for _ in range(9):
self.assertEqual(env.advance()[0], dmlab2d.RUNNING)
self.assertEqual(env.advance()[0], dmlab2d.TERMINATED)
def test_lab2d_settings_environment(self):
env = self._create_env({'steps': '5'})
env.start(episode=0, seed=0)
for _ in range(4):
self.assertEqual(env.advance()[0], dmlab2d.RUNNING)
self.assertEqual(env.advance()[0], dmlab2d.TERMINATED)
def test_lab2d_properties_environment(self):
env = self._create_env({'steps': '5'})
properties = env.list_property('')
self.assertLen(properties, 1)
self.assertEqual(properties[0],
('steps', dmlab2d.PropertyAttribute.READABLE_WRITABLE))
self.assertEqual(env.read_property('steps'), '5')
env.write_property('steps', '3')
self.assertEqual(env.read_property('steps'), '3')
env.start(episode=0, seed=0)
for _ in range(2):
self.assertEqual(env.advance()[0], dmlab2d.RUNNING)
self.assertEqual(env.advance()[0], dmlab2d.TERMINATED)
def test_lab2d_act_discrete(self):
env = self._create_env({'steps': '5'})
env.start(episode=0, seed=0)
env.act_discrete(np.array([2], np.dtype('int32')))
_, reward = env.advance()
self.assertEqual(reward, 2)
def test_lab2d_act_continuous(self):
env = self._create_env({'steps': '5'})
env.start(episode=0, seed=0)
np.testing.assert_array_equal(env.observation('VIEW3'), [1, 2, 3])
env.act_continuous([10])
env.advance()
np.testing.assert_array_equal(env.observation('VIEW3'), [11, 12, 13])
def test_lab2d_act_text(self):
env = self._create_env({'steps': '5'})
env.start(episode=0, seed=0)
view = env.observation('VIEW5')
self.assertEqual(view, b'')
env.act_text(['Hello'])
env.advance()
view = env.observation('VIEW5')
self.assertEqual(view, b'Hello')
def test_lab2d_invalid_setting(self):
with self.assertRaises(ValueError):
self._create_env({'missing': '5'})
def test_lab2d_bad_action_spec_name(self):
env = self._create_env()
with self.assertRaises(KeyError):
env.action_discrete_spec('bad_key')
with self.assertRaises(KeyError):
env.action_continuous_spec('bad_key')
def test_lab2d_bad_observation_spec_name(self):
env = self._create_env()
with self.assertRaises(KeyError):
env.observation_spec('bad_key')
def test_lab2d_observe_before_start(self):
env = self._create_env()
with self.assertRaises(RuntimeError):
env.observation('VIEW1')
def test_lab2d_act_before_start(self):
env = self._create_env()
with self.assertRaises(RuntimeError):
env.act_discrete([0])
with self.assertRaises(RuntimeError):
env.act_continuous([0])
with self.assertRaises(RuntimeError):
env.act_text([''])
def test_lab2d_act_bad_shape(self):
env = self._create_env()
env.start(0, 0)
with self.assertRaises(ValueError):
env.act_discrete([0, 1])
with self.assertRaises(ValueError):
env.act_continuous([0, 1])
def test_lab2d_advance_after_episode_ends(self):
env = self._create_env({'steps': '2'})
env.start(0, 0)
self.assertEqual(env.advance()[0], dmlab2d.RUNNING)
self.assertEqual(env.advance()[0], dmlab2d.TERMINATED)
with self.assertRaises(RuntimeError):
env.advance()
def test_lab2d_missing_properties(self):
env = self._create_env({'steps': '5'})
with self.assertRaises(KeyError):
env.list_property('missing')
with self.assertRaises(KeyError):
env.read_property('missing')
with self.assertRaises(KeyError):
env.write_property('missing', '10')
def test_lab2d_invalid_ops_properties(self):
env = self._create_env({'steps': '5'})
with self.assertRaises(ValueError):
env.list_property('steps')
with self.assertRaises(ValueError):
env.write_property('steps', 'mouse')
if __name__ == '__main__':
absltest.main()
| 1.976563 | 2 |
molpy/data/__init__.py | yonghui-cc/molpy | 0 | 12798895 | <reponame>yonghui-cc/molpy<filename>molpy/data/__init__.py<gh_stars>0
from .reader import look_and_say, get_molecule
from .read_xyz_files import open_xyz
| 1.429688 | 1 |
clavier/sh.py | nrser/clavier | 0 | 12798896 | <gh_stars>0
from typing import *
import os
from os.path import isabs, basename
import subprocess
from pathlib import Path
import json
from shutil import rmtree
import shlex
from functools import wraps
import splatlog as logging
from .cfg import CFG
from .io import OUT, ERR, fmt, fmt_cmd
TOpts = Mapping[Any, Any]
TOptsStyle = Literal["=", " ", ""]
TOptsLongPrefix = Literal["--", "-"]
_TPath = Union[Path, str]
CONFIG = CFG.clavier.sh
LOG = logging.getLogger(__name__)
DEFAULT_OPTS_STYLE: TOptsStyle = "="
DEFAULT_OPTS_SORT = True
CompletedProcess = subprocess.CompletedProcess
def render_path(path: Path, rel_to: Optional[Path]) -> str:
if rel_to is None:
return str(path)
return str(path.relative_to(rel_to))
def _iter_opt(
flag: str,
value: Any,
style: TOptsStyle,
is_short: bool,
rel_to: Optional[Path] = None,
) -> Generator[str, None, None]:
"""Private helper for `iter_opts`."""
if isinstance(value, Path):
value = render_path(value, rel_to)
if value is None or value is False:
# Special case #1 — value is `None` or `False`
#
# We omit these entirely.
#
pass
elif value is True:
# Special case #2 — value is `True`
#
# We emit the bare flag, like `-x` or `--blah`.
#
yield flag
elif isinstance(value, (list, tuple)):
# Special case #3 — value is a `list` or `tuple`
#
# We handle these by emitting the option multiples times, once for each
# inner value.
#
for item in value:
yield from _iter_opt(flag, item, style, is_short)
elif style == " " or (is_short and style != ""):
# General case #1 — space-separated
#
# _Short_ (single-character) flags and values are _always_ space-
# sparated.
#
# _All_ flags and values are space-separated when the `style` is `" "`.
#
yield flag
yield str(value)
else:
# General case #2 — flag=value format
#
# When no other branch has matched, we're left with `=`-separated flag
# and value.
#
yield f"{flag}{style}{value}"
def render_opts(
opts: TOpts,
*,
long_prefix: TOptsLongPrefix = CONFIG.opts.long_prefix,
sort: bool = CONFIG.opts.sort,
style: TOptsStyle = CONFIG.opts.style,
rel_to: Optional[Path] = None,
) -> Generator[str, None, None]:
"""
Render a mapping of option names to values to a (yielded) sequence of
strings.
Examples:
### Style Examples ###
1. By default, `=` is used to separate "long options" and their values,
while "short options" (single-character options) are always separate
tokens from their values:
>>> list(render_opts({"a": 1, "bee": 2}))
['-a', '1', '--bee=2']
2. Use space-separated option names and values:
>>> list(render_opts({'blah': 1, 'meh': 2}, style=" "))
['--blah', '1', '--meh', '2']
3. Use a single `-` prefix on long options ("X toolkit" style):
>>> list(render_opts({'blah': 1, 'meh': 2}, long_prefix='-'))
['-blah=1', '-meh=2']
4. Use that weird "no-separator" style you sometimes see:
>>> list(render_opts({'x': 123, 'y': 456}, style=""))
['-x123', '-y456']
### List Value Examples ###
1. Short opt with a list (or tuple) value:
>>> list(render_opts({'x': [1, 2, 3]}))
['-x', '1', '-x', '2', '-x', '3']
2. Long opt with a list (or tuple) value:
>>> list(render_opts({'blah': [1, 2, 3]}))
['--blah=1', '--blah=2', '--blah=3']
3. Due to the recursive, yield-centric nature, nested lists work as well:
>>> list(render_opts({'blah': [1, 2, [[3], 4], 5]}))
['--blah=1', '--blah=2', '--blah=3', '--blah=4', '--blah=5']
Neat, huh?!
### Relative Path Examples ###
1. As with positional arguments, `pathlib.Path` option values can be
rendered relative to a `rel_to` directory. Only paths that are
descendants of `rel_to` will be relativized (no `../` transformations).
>>> list(
... render_opts(
... {
... 'input': Path("/tmp/blah.json"),
... 'output': Path("/dev/null"),
... },
... rel_to=Path("/tmp")
... )
... )
['--input=blah.json', '--output=/dev/null']
"""
# Handle `None` as a legit value, making life easier on callers assembling
# commands
if opts is None:
return
# Sort key/value pairs if needed
items = sorted(opts.items()) if sort else list(opts.items())
for name, value in items:
name_s = str(name)
is_short = len(name_s) == 1
flag = f"-{name_s}" if is_short else f"{long_prefix}{name_s}"
yield from _iter_opt(flag, value, style, is_short, rel_to)
def render_args(
args: Iterable[Any],
*,
opts_long_prefix: TOptsLongPrefix = CONFIG.opts.long_prefix,
opts_sort: bool = CONFIG.opts.sort,
opts_style: TOptsStyle = CONFIG.opts.style,
rel_to: Optional[Path] = None,
) -> Generator[Union[str, bytes], None, None]:
"""\
Render `args` to sequence of `str` (and/or `bytes`, if any values passed in
are `bytes`).
`args` entries are handled by type:
1. `str` and `bytes` -- passed through.
2. `pathlib.Path` -- passed (along with `rel_to`) through `render_path`.
3. `typing.Mapping` -- understood as options, passed through `render_opts`.
4. `typing.Iterable` -- recurred into.
5. Other -- converted to a string with `str()`.
"""
for arg in args:
if isinstance(arg, (str, bytes)):
yield arg
elif isinstance(arg, Path):
yield render_path(arg, rel_to)
elif isinstance(arg, Mapping):
yield from render_opts(
arg,
long_prefix=opts_long_prefix,
style=opts_style,
sort=opts_sort,
rel_to=rel_to,
)
elif isinstance(arg, Iterable):
yield from render_args(
arg,
opts_long_prefix=opts_long_prefix,
opts_style=opts_style,
opts_sort=opts_sort,
rel_to=rel_to,
)
else:
yield str(arg)
def prepare(
*args,
cwd: Optional[_TPath] = None,
rel_paths: bool = CONFIG.rel_paths,
**opts,
) -> List[str]:
"""\
Prepare `args` to be passed `subprocess.run` or similar functions.
Contextualizes the relative path capabilities of `render_args` and
`render_opts` to the working directory, which can either be provided as
`cwd` or assumed to be the current directory.
Relative path conversion is controlled by the `rel_paths` flag.
## Examples ##
>>> prepare(
... "kubectl",
... {"namespace": "blah"},
... "logs",
... {"follow": True},
... "some-pod",
... )
['kubectl', '--namespace=blah', 'logs', '--follow', 'some-pod']
"""
# Normalize str cwd path to Path
if isinstance(cwd, str):
cwd = Path(cwd)
if rel_paths is True:
rel_to = Path.cwd() if cwd is None else cwd
else:
rel_to = None
return list(render_args(args, rel_to=rel_to, **opts))
def join(*args, **opts) -> str:
"""\
Render `args` to a single string with `prepare` -> `shlex.join`. Returned
string _should_ be suitable for pasting in a shell.
## Parameters ##
Same as `prepare`.
"""
return shlex.join(prepare(*args, **opts))
def prepare_wrap(fn: Callable) -> Callable:
"""\
Decorator helper to run `prepare` and do a bit more common normalization
for `get`, `run` etc.
"""
@wraps(fn)
def _prepare_wrapper(
*args,
cwd: Optional[_TPath] = None,
encoding: Optional[str] = CONFIG.encoding,
opts_long_prefix: TOptsLongPrefix = CONFIG.opts.long_prefix,
opts_sort: bool = CONFIG.opts.sort,
opts_style: TOptsStyle = CONFIG.opts.style,
rel_paths: bool = CONFIG.rel_paths,
**opts,
):
# Normalize str cwd path to Path
if isinstance(cwd, str):
cwd = Path(cwd)
cmd = prepare(
*args,
cwd=cwd,
opts_long_prefix=opts_long_prefix,
opts_sort=opts_sort,
opts_style=opts_style,
rel_paths=rel_paths,
)
return fn(*cmd, cwd=cwd, encoding=encoding, **opts)
return _prepare_wrapper
# pylint: disable=redefined-builtin
@LOG.inject
@prepare_wrap
def get(
*cmd,
log=LOG,
format: Optional[str] = None,
**opts,
) -> Any:
log.debug(
"Getting system command output...",
cmd=fmt_cmd(cmd),
format=format,
**opts,
)
# https://docs.python.org/3.8/library/subprocess.html#subprocess.check_output
output = subprocess.check_output(cmd, **opts)
if format is None:
return output
elif format == "strip":
return output.strip()
elif format == "json":
return json.loads(output)
else:
log.warn("Unknown `format`", format=format, expected=[None, "json"])
return output
@LOG.inject
@prepare_wrap
def run(
*cmd,
log=LOG,
check: bool = True,
input: Union[None, str, bytes, Path] = None,
**opts,
) -> CompletedProcess:
log.info(
"Running system command...",
cmd=fmt_cmd(cmd),
**opts,
)
# https://docs.python.org/3.8/library/subprocess.html#subprocess.run
if isinstance(input, Path):
with input.open("r", encoding="utf-8") as file:
return subprocess.run(
cmd,
check=check,
input=file.read(),
**opts,
)
else:
return subprocess.run(cmd, check=check, input=input, **opts)
@LOG.inject
def test(*args, **kwds) -> bool:
"""\
Run a command and return whether or not it succeeds (has
`subprocess.CompletedProcess.returncode` equal to `0`).
>>> test("true", shell=True)
True
>>> test("false", shell=True)
False
"""
return run(*args, check=False, **kwds).returncode == 0
@LOG.inject
@prepare_wrap
def replace(
*cmd,
log=LOG,
# Used, but defaulted in `prepare_cmd`, so needs to be accepted here
encoding: Optional[str] = None,
env: Optional[Mapping] = None,
cwd: Optional[Union[str, Path]] = None,
) -> NoReturn:
# https://docs.python.org/3.9/library/os.html#os.execl
for console in (OUT, ERR):
console.file.flush()
proc_name = basename(cmd[0])
log.debug(
"Replacing current process with system command...",
cmd=fmt_cmd(cmd),
env=env,
cwd=cwd,
)
if cwd is not None:
os.chdir(cwd)
if env is None:
if isabs(cmd[0]):
os.execv(cmd[0], cmd)
else:
os.execvp(proc_name, cmd)
else:
if isabs(cmd[0]):
os.execve(cmd[0], cmd, env)
else:
os.execvpe(proc_name, cmd, env)
@LOG.inject
def file_absent(path: Path, name: Optional[str] = None, log=LOG):
if name is None:
name = fmt(path)
if path.exists():
log.info(f"[holup]Removing {name}...[/holup]", path=path)
if path.is_dir():
rmtree(path)
else:
os.remove(path)
else:
log.info(f"[yeah]{name} already absent.[/yeah]", path=path)
@LOG.inject
def dir_present(path: Path, desc: Optional[str] = None, log=LOG):
if desc is None:
desc = fmt(path)
if path.exists():
if path.is_dir():
log.debug(
f"[yeah]{desc} directory already exists.[/yeah]", path=path
)
else:
raise RuntimeError(f"{path} exists and is NOT a directory")
else:
log.info(f"[holup]Creating {desc} directory...[/holup]", path=path)
os.makedirs(path)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 2.171875 | 2 |
cmdb-compliance/biz/ds/ds_uncom_vpc_peering.py | zjj1002/aws-cloud-cmdb-system | 0 | 12798897 | <reponame>zjj1002/aws-cloud-cmdb-system<gh_stars>0
from libs.db_context import DBContext
from models.uncom_vpc_peering import UncomVpcPeering
from models.vpc_peering import VpcPeering, model_to_dict
from models.owner_list import OwnerList
from models.owner_list import model_to_dict as owner_model_to_list
# 获取不合规的vpc peering账户id
def get_uncom_id():
with DBContext('r') as session:
# 获取vpc peering的账户id
vpc_peering_info = session.query(VpcPeering).all()
account_id = set()
for data in vpc_peering_info:
data_dict = model_to_dict(data)
account_id.add(data_dict["requester_owner_id"])
account_id.add(data_dict["accepter_owner_id"])
# 获取owner账户的id
owner_info = session.query(OwnerList).all()
owner_id = []
for data in owner_info:
data_dict = owner_model_to_list(data)
owner_id.append(data_dict["owner_id"])
# 找出不合规的vpc peering账户id
uncom_id = []
for a_id in account_id:
if a_id not in owner_id:
uncom_id.append(a_id)
return uncom_id
def get_uncom_vpc_peering():
"""获取到不合规的vpc peering数据列表"""
uncom_id_list = get_uncom_id()
with DBContext('r') as session:
uncom_vpc_peering_info_list = set()
for uncom_id in uncom_id_list:
uncom_vpc_peering_info_request = session.query(VpcPeering).filter(VpcPeering.requester_owner_id == uncom_id).all()
for uncom_vpc_peering_request in uncom_vpc_peering_info_request:
uncom_vpc_peering_info_list.add(uncom_vpc_peering_request)
uncom_vpc_peering_info_accepter = session.query(VpcPeering).filter(VpcPeering.accepter_owner_id == uncom_id).all()
for uncom_vpc_peering_accepter in uncom_vpc_peering_info_accepter:
uncom_vpc_peering_info_list.add(uncom_vpc_peering_accepter)
uncom_vpc_peering = []
for data in uncom_vpc_peering_info_list:
data_dict = model_to_dict(data)
uncom_vpc_peering.append(data_dict)
return uncom_vpc_peering
def uncom_vpc_peering_sync_cmdb():
"""把uncom_vpc_peering数据同步到数据库"""
uncom_vpc_peering_list = get_uncom_vpc_peering()
with DBContext('w') as session:
session.query(UncomVpcPeering).delete(synchronize_session=False) # 清空数据库的所有记录
for uncom_vpc_peering in uncom_vpc_peering_list:
vpc_peering_connection_id = uncom_vpc_peering["vpc_peering_connection_id"]
requester_cidr_block = uncom_vpc_peering["requester_cidr_block"]
requester_owner_id = uncom_vpc_peering["requester_owner_id"]
requester_vpc_id = uncom_vpc_peering["requester_vpc_id"]
requester_region = uncom_vpc_peering["requester_region"]
accepter_cidr_block = uncom_vpc_peering["accepter_cidr_block"]
accepter_owner_id = uncom_vpc_peering["accepter_owner_id"]
accepter_vpc_id = uncom_vpc_peering["accepter_vpc_id"]
accepter_region = uncom_vpc_peering["accepter_region"]
new_uncom_vpc_peering = UncomVpcPeering(
vpc_peering_connection_id=vpc_peering_connection_id, requester_cidr_block=requester_cidr_block,
requester_owner_id=requester_owner_id, requester_vpc_id=requester_vpc_id,
requester_region=requester_region, accepter_cidr_block=accepter_cidr_block,
accepter_owner_id=accepter_owner_id, accepter_vpc_id=accepter_vpc_id, accepter_region=accepter_region)
session.add(new_uncom_vpc_peering)
session.commit()
if __name__ == '__main__':
pass | 2.296875 | 2 |
2021/day02.py | iKevinY/advent | 11 | 12798898 | <reponame>iKevinY/advent
import fileinput
pos = 0
aim = 0
part_1_depth = 0
part_2_depth = 0
for line in fileinput.input():
ins, num = line.split()
num = int(num)
if ins == 'forward':
pos += num
part_2_depth += (aim * num)
elif ins == 'down':
part_1_depth += num
aim += num
elif ins == "up":
part_1_depth -= num
aim -= num
print "Part 1:", pos * part_1_depth
print "Part 2:", pos * part_2_depth
| 3.671875 | 4 |
PythonClients/service_add_command_raw.py | naporium/Cisco_Web_Services-master | 0 | 12798899 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import os
from json import dumps, loads
# for python 2
# from httplib import HTTPConnection
# for python 3
from http.client import HTTPConnection
# connect with REST server
connection = HTTPConnection('127.0.0.1', 80)
connection.connect()
data = {"ip": "192.168.2.254",
"list_of_commands_to_send": "show version"
}
# Get the method response
connection.request(
'POST',
'/add_command_raw',
dumps(data),
{'Content-Type': 'application/json'},
)
print("Waiting for Server response:")
response = connection.getresponse()
code = response.getcode()
headers = response.getheaders()
result = loads(response.read())
print(result)
print("code: ", code)
print("headers: ", headers)
print(dir(result))
# close the connection
connection.close() | 2.8125 | 3 |
cryptography/generate_hash_512.py | dgengtek/scripts | 0 | 12798900 | <reponame>dgengtek/scripts
#!/usr/bin/env python3
# generate password mac for dovecot
import sys
import os
import getopt
from cryptography.hazmat.backends import default_backend
from cryptography.hazmat.primitives import hashes
import base64
import binascii
def usage():
h="usage:\t"+sys.argv[0]
h+=" [-s salt] password"
print(h)
sys.exit(2)
password=""
salt=""
options="s:p:h"
opts, args = getopt.getopt(sys.argv[1:], options)
if len(args) is 1:
password=args[0]
else:
usage()
raw_options=options.replace(":","")
option_keys=[o for o,v in opts]
if "-h" in option_keys:
usage()
for opt,value in opts:
opt=opt.replace("-","").strip()
if "s" in opt:
salt=value
password=password.encode("utf8")
if salt:
salt=binascii.a2b_hex(salt)
else:
salt=os.urandom(16)
digest=hashes.Hash(hashes.SHA512(),backend=default_backend())
digest.update(password)
digest.update(salt)
hash_raw=digest.finalize()
hash_and_salt=hash_raw+salt
hash_base64=binascii.b2a_base64(hash_and_salt)
dovecot="{SSHA512}"+hash_base64.decode("utf-8")
print(dovecot)
| 2.421875 | 2 |
docs/fa/docs_src/set_commands/set_commands.py | AliRn76/rubika-bot | 1 | 12798901 | <gh_stars>1-10
import requests
data = {
"bot_commands": [
{
"command": "command1",
"description": "description1"
},
{
"command": "command2",
"description": "description2"
},
]
}
url = f'https://messengerg2b1.iranlms.ir/v3/{token}/setCommands'
response = requests.post(url, data=data)
print(response.text)
| 2.4375 | 2 |
services/dts/src/oci_cli_dts/nfs_dataset_client_proxy.py | honzajavorek/oci-cli | 0 | 12798902 | # coding: utf-8
# Copyright (c) 2016, 2019, Oracle and/or its affiliates. All rights reserved.
"""
NOTE: This class should always comply to the API definition of NfsDatasetClient present in
services/dts/src/oci_cli_dts/physical_appliance_control_plane/client/nfs_dataset_client.py
"""
from oci_cli import cli_util
from services.dts.src.oci_cli_dts.appliance_config_manager import ApplianceConfigManager
from services.dts.src.oci_cli_dts.appliance_constants import APPLIANCE_CONFIGS_BASE_DIR, APPLIANCE_AUTH_USER, \
APPLIANCE_CERT_FILE_NAME
from services.dts.src.oci_cli_dts.physical_appliance_control_plane.client.nfs_dataset_client import NfsDatasetClient
class NfsDatasetClientProxy:
def __init__(self, ctx, appliance_profile):
config_manager = ApplianceConfigManager(APPLIANCE_CONFIGS_BASE_DIR)
appliance_config = config_manager.get_config(appliance_profile)
self.auth_value = "{}:{}".format(APPLIANCE_AUTH_USER, appliance_config.get_access_token())
self.serial_id = appliance_config.get_appliance_serial_id()
config = cli_util.build_config(ctx.obj)
host_name = appliance_config.get_appliance_url()
self_signed_cert = "{}/{}".format(config_manager.get_config_dir(appliance_profile), APPLIANCE_CERT_FILE_NAME)
self.nfs_dataset_client = NfsDatasetClient(
config=config, service_endpoint=host_name, self_signed_cert=self_signed_cert)
def activate_nfs_dataset(self, dataset_name, **kwargs):
kwargs['auth_value'] = self.auth_value
kwargs['serial_id'] = self.serial_id
return self.nfs_dataset_client.activate_nfs_dataset(dataset_name, **kwargs)
def create_nfs_dataset(self, details, **kwargs):
kwargs['auth_value'] = self.auth_value
kwargs['serial_id'] = self.serial_id
return self.nfs_dataset_client.create_nfs_dataset(details, **kwargs)
def deactivate_nfs_dataset(self, dataset_name, **kwargs):
kwargs['auth_value'] = self.auth_value
kwargs['serial_id'] = self.serial_id
return self.nfs_dataset_client.deactivate_nfs_dataset(dataset_name, **kwargs)
def delete_nfs_dataset(self, dataset_name, **kwargs):
kwargs['auth_value'] = self.auth_value
kwargs['serial_id'] = self.serial_id
return self.nfs_dataset_client.delete_nfs_dataset(dataset_name, **kwargs)
def get_nfs_dataset(self, dataset_name, **kwargs):
kwargs['auth_value'] = self.auth_value
kwargs['serial_id'] = self.serial_id
return self.nfs_dataset_client.get_nfs_dataset(dataset_name, **kwargs)
def get_nfs_dataset_seal_manifest(self, dataset_name, **kwargs):
kwargs['auth_value'] = self.auth_value
kwargs['serial_id'] = self.serial_id
return self.nfs_dataset_client.get_nfs_dataset_seal_manifest(dataset_name, **kwargs)
def get_nfs_dataset_seal_status(self, dataset_name, **kwargs):
kwargs['auth_value'] = self.auth_value
kwargs['serial_id'] = self.serial_id
return self.nfs_dataset_client.get_nfs_dataset_seal_status(dataset_name, **kwargs)
def initiate_seal_on_nfs_dataset(self, dataset_name, **kwargs):
kwargs['auth_value'] = self.auth_value
kwargs['serial_id'] = self.serial_id
return self.nfs_dataset_client.initiate_seal_on_nfs_dataset(dataset_name, **kwargs)
def list_nfs_datasets(self, **kwargs):
kwargs['auth_value'] = self.auth_value
kwargs['serial_id'] = self.serial_id
return self.nfs_dataset_client.list_nfs_datasets(**kwargs)
def reopen_nfs_dataset(self, dataset_name, **kwargs):
kwargs['auth_value'] = self.auth_value
kwargs['serial_id'] = self.serial_id
return self.nfs_dataset_client.reopen_nfs_dataset(dataset_name, **kwargs)
def update_nfs_dataset(self, dataset_name, body, **kwargs):
kwargs['auth_value'] = self.auth_value
kwargs['serial_id'] = self.serial_id
return self.nfs_dataset_client.update_nfs_dataset(dataset_name, body, **kwargs)
| 1.882813 | 2 |
source/ui/ui_message.py | alexander-l-stone/RogueSpace | 0 | 12798903 | <gh_stars>0
class UIMessage:
def __init__(self, parent, x, y, message, color):
self.x = parent.x + x
self.y = parent.y + y
self.message = message
self.color = color
self.visible = True
self.priority = 2
def draw(self, root_console, tick_count) -> None:
if not self.visible:
return
root_console.print(self.x, self.y, self.message, fg=self.color)
| 2.53125 | 3 |
worker_template.py | rampeer/Spreaduler | 15 | 12798904 | from spreaduler import ParamsSheet
from train_attention import train
from options import get_parser
class YourParamsSheet(ParamsSheet):
"""
Your model Params Sheet class
"""
params_sheet_id = '...'
client_credentials = {
"type": "service_account",
"project_id": "....",
"private_key_id": "....",
"private_key": """-----BEGIN PRIVATE KEY-----
........
-----END PRIVATE KEY-----""",
"client_email": "<EMAIL>",
"client_id": "....",
"auth_uri": "https://accounts.google.com/o/oauth2/auth",
"token_uri": "https://accounts.google.com/o/oauth2/token",
"auth_provider_x509_cert_url": "https://www.googleapis.com/oauth2/v1/certs",
"client_x509_cert_url": "https://www.googleapis.com/robot/v1/metadata/x509/"
"yourworker%40yourproject.iam.gserviceaccount.com"
}
def __init__(self, parser, server_name):
writable_metrics_and_types = {
'your model precision': float
}
super(YourParamsSheet, self).__init__(
parser,
writable_column_types=writable_metrics_and_types,
experiment_id_column='exp_hash',
server_name=server_name)
if __name__ == '__main__':
server_name = os.environ.get('SERVERNAME', None)
params = YourParamsSheet(get_parser(), server_name)
params.exec_loop(train)
| 2.25 | 2 |
friday_5pm_helper/replicon_services/__init__.py | kyhau/friday-5pm-helper | 3 | 12798905 | <reponame>kyhau/friday-5pm-helper
# Define constants
#
HEADERS = {'content-type': 'application/json'}
SWIMLANE_FINDER_URL = 'https://global.replicon.com/DiscoveryService1.svc/GetTenantEndpointDetails'
| 0.957031 | 1 |
test.py | proto-n/listdiff-py | 0 | 12798906 | <filename>test.py
import listdiff
import numpy as np
import pandas as pd
reclist = pd.DataFrame({
'pid': [1,1,1,2,2,2,3,3,3],
'song_id': [4,5,6,4,5,6,4,5,12],
'pos': [1,2,3,1,2,3,3,2,1]
})
gt = pd.DataFrame({
'pid': [1,1,1,2,2,2,2],
'song_id': [1,5,9,4,5,12,9],
'pos': [1,2,3,4,3,2,1]
})
complement = pd.DataFrame({
'pid': [1,1,1,2,2,2,2,3,3,3,-1],
'song_id': [10,11,12,10,11,12,13,10,11,12,1],
'pos': [1,2,3,1,2,3,4,3,2,1,1]
})
res = listdiff.listdiff(
reclist[["pid","song_id","pos"]].sample(frac=1).values,
gt[["pid","song_id","pos"]].sample(frac=1).values,
complement=complement[["pid","song_id","pos"]].sample(frac=1).values,
extra_pids=np.array([3,4]),
cut_to_size=4
)
print(res)
assert np.all(res[res[:,0]==1] == np.array([
[1,4,1],
[1,6,2],
[1,10,3],
[1,11,4]
]))
assert np.all(res[res[:,0]==2] == np.array([
[2,6,1],
[2,10,2],
[2,11,3],
[2,13,4]
]))
assert np.all(res[res[:,0]==3] == np.array([
[3,12,1],
[3,5,2],
[3,4,3],
[3,11,4]
]))
assert np.all(res[res[:,0]==4] == np.array([
[4,1,1],
])) | 2.59375 | 3 |
c3bottles/views/api.py | lfuelling/c3bottles | 0 | 12798907 | <gh_stars>0
import json
from datetime import datetime
from flask import request, Response, Blueprint, jsonify
from flask_login import current_user
from c3bottles import app, db
from c3bottles.model.drop_point import DropPoint
from c3bottles.model.report import Report
from c3bottles.model.visit import Visit
bp = Blueprint("api", __name__)
@bp.route("/api", methods=("POST", "GET"))
def process():
if request.values.get("action") == "report":
return report()
elif request.values.get("action") == "visit":
return visit()
elif request.values.get("action") == "dp_json":
return dp_json()
return Response(
json.dumps(
"Invalid or missing API action.",
indent=4 if app.debug else None
),
mimetype="application/json",
status=400
)
@bp.route("/api/all_dp.json", methods=("POST", "GET"))
def all_dp():
return dp_json()
@bp.route("/api/map_source.json")
def map_source():
map_source = app.config.get('MAP_SOURCE', {})
return jsonify({
"attribution": map_source.get('attribution', ''),
"tileserver": map_source.get('tileserver', ''),
"tileserver_subdomains": map_source.get("tileserver_subdomains", []),
"bounds": map_source.get("bounds", None),
"initial_view": map_source.get("initial_view", None),
"level_config": map_source.get("level_config", None),
"min_zoom": map_source.get("min_zoom", 0),
"max_zoom": map_source.get("max_zoom", 0),
"simple_crs": map_source.get("simple_crs", False),
"hack_257px": map_source.get("hack_257px", False),
"tms": map_source.get("tms", False),
"no_wrap": map_source.get("no_wrap", False)
})
def report():
if not current_user.can_report:
return Response(
json.dumps(
[{"msg": "Not logged in or insufficient privileges."}],
indent=4 if app.debug else None
),
mimetype="application/json",
status=401
)
number = request.values.get("number")
try:
Report(
dp=DropPoint.query.get(number),
state=request.values.get("state")
)
except ValueError as e:
return Response(
json.dumps(e.args, indent=4 if app.debug else None),
mimetype="application/json",
status=400
)
else:
db.session.commit()
return Response(
DropPoint.get_dp_json(number),
mimetype="application/json"
)
def visit():
if not current_user.can_visit:
return Response(
json.dumps(
[{"msg": "Not logged in or insufficient privileges."}],
indent=4 if app.debug else None
),
mimetype="application/json",
status=401
)
number = request.values.get("number")
try:
Visit(
dp=DropPoint.query.get(number),
action=request.values.get("maintenance")
)
except ValueError as e:
return Response(
json.dumps(e.args, indent=4 if app.debug else None),
mimetype="application/json",
status=400
)
else:
db.session.commit()
return Response(
DropPoint.get_dp_json(number),
mimetype="application/json"
)
def dp_json():
ts = request.values.get("ts")
if ts:
try:
dps = DropPoint.get_dps_json(
time=datetime.fromtimestamp(float(ts))
)
except ValueError as e:
return Response(
json.dumps(e.args, indent=4 if app.debug else None),
mimetype="application/json",
status=400
)
else:
dps = DropPoint.get_dps_json()
return Response(
dps,
mimetype="application/json"
)
| 1.976563 | 2 |
solutions/codeforces/617A.py | forxhunter/ComputingIntro | 1 | 12798908 | '''
greedy algorithm
An elephant decided to visit his friend. It turned out that the elephant's house is located at point 0 and his friend's house is located at point x(x > 0) of the coordinate line. In one step the elephant can move 1, 2, 3, 4 or 5 positions forward. Determine, what is the minimum number of steps he need to make in order to get to his friend's house.
Input
The first line of the input contains an integer x (1≤x≤1000000) — The coordinate of the friend's house.
Output
Print the minimum number of steps that elephant needs to make to get from point 0 to point x.
'''
x = int(input())
possible_moves = [5, 4, 3, 2, 1]
count = 0
for move in possible_moves:
if x < move:
continue
count += (x//move)
x = x % move
if x == 0:
break
print(count)
| 4.09375 | 4 |
third-party/corenlp/third-party/stanza/stanza/text/dataset.py | arunchaganty/odd-nails | 5 | 12798909 | <gh_stars>1-10
"""
Dataset module for managing text datasets.
"""
__author__ = 'victor'
from collections import OrderedDict
import random
import numpy as np
class InvalidFieldsException(Exception):
pass
class Dataset(object):
"""
Generic Dataset object that encapsulates a list of instances.
The dataset stores the instances in an ordered dictionary of fields.
Each field maps to a list, the ith element of the list for field 'foo' corresponds to the attribute 'foo' for the ith instance in the dataset.
The dataset object supports indexing, iterating, slicing (eg. for iterating over batches), shuffling,
conversion to/from CONLL format, among others.
Example:
.. code-block:: python
d = Dataset({'Name': ['Alice', 'Bob', 'Carol', 'David', 'Ellen'], 'SSN': [1, 23, 45, 56, 7890]})
print(d) # Dataset(Name, SSN)
print(d[2]) # OrderedDict([('SSN', 45), ('Name', 'Carol')])
print(d[1:3]) # OrderedDict([('SSN', [23, 45]), ('Name', ['Bob', 'Carol'])])
for e in d:
print(e) # OrderedDict([('SSN', 1), ('Name', 'Alice')]) ...
"""
def __init__(self, fields):
"""
:param fields: An ordered dictionary in which a key is the name of an attribute and a value is a list of the values of the instances in the dataset.
:return: A Dataset object
"""
self.fields = OrderedDict(fields)
length = None
length_field = None
for name, d in fields.items():
if length is None:
length = len(d)
length_field = name
else:
if len(d) != length:
raise InvalidFieldsException('field {} has length {} but field {} has length {}'.format(length_field, length, name, len(d)))
def __len__(self):
"""
:return: The number of instances in the dataset.
"""
if len(self.fields) == 0:
return 0
return len(self.fields.values()[0])
def __repr__(self):
return "{}({})".format(self.__class__.__name__, ', '.join(self.fields.keys()))
@classmethod
def load_conll(cls, fname):
"""
The CONLL file must have a tab delimited header, for example::
# description tags
Alice
Hello t1
my t2
name t3
is t4
alice t5
Bob
I'm t1
bob t2
Here, the fields are `description` and `tags`. The first instance has the label `Alice` and the
description `['Hello', 'my', 'name', 'is', 'alice']` and the tags `['t1', 't2', 't3', 't4', 't5']`.
The second instance has the label `Bob` and the description `["I'm", 'bob']` and the tags `['t1', 't2']`.
:param fname: The CONLL formatted file from which to load the dataset
:return: loaded Dataset instance
"""
def process_cache(cache, fields):
cache = [l.split() for l in cache if l]
if not cache:
return None
fields['label'].append(cache[0][0])
instance = {k: [] for k in fields if k != 'label'}
for l in cache[1:]:
for i, k in enumerate(fields):
if k != 'label':
instance[k].append(None if l[i] == '-' else l[i])
for k, v in instance.items():
fields[k].append(v)
cache = []
with open(fname) as f:
header = f.next().strip().split('\t')
header[0] = header[0].lstrip('# ')
fields = OrderedDict([(head, []) for head in header])
fields['label'] = []
for line in f:
line = line.strip()
if line:
cache.append(line)
else:
# met empty line, process cache
process_cache(cache, fields)
cache = []
if cache:
process_cache(cache, fields)
return cls(fields)
def write_conll(self, fname):
"""
Serializes the dataset in CONLL format to fname
"""
if 'label' not in self.fields:
raise InvalidFieldsException("dataset is not in CONLL format: missing label field")
def instance_to_conll(inst):
tab = [v for k, v in inst.items() if k != 'label']
return '{}\n{}'.format(inst['label'], '\n'.join(['\t'.join(['-' if e is None else str(e) for e in row]) for row in zip(*tab)]))
with open(fname, 'wb') as f:
f.write('# {}'.format('\t'.join([k for k in self.fields if k != 'label'])))
for i, d in enumerate(self):
f.write('\n{}'.format(instance_to_conll(d)))
if i != len(self) - 1:
f.write('\n')
def convert(self, converters, in_place=False):
"""
Applies transformations to the dataset.
:param converters: A dictionary specifying the function to apply to each field. If a field is missing from the dictionary, then it will not be transformed.
:param in_place: Whether to perform the transformation in place or create a new dataset instance
:return: the transformed dataset instance
"""
dataset = self if in_place else self.__class__(OrderedDict([(name, data[:]) for name, data in self.fields.items()]))
for name, convert in converters.items():
if name not in self.fields.keys():
raise InvalidFieldsException('Converter specified for non-existent field {}'.format(name))
for i, d in enumerate(dataset.fields[name]):
dataset.fields[name][i] = convert(d)
return dataset
def shuffle(self):
"""
Re-indexes the dataset in random order
:return: the shuffled dataset instance
"""
order = range(len(self))
random.shuffle(order)
for name, data in self.fields.items():
reindexed = []
for _, i in enumerate(order):
reindexed.append(data[i])
self.fields[name] = reindexed
return self
def __getitem__(self, item):
"""
:param item: An integer index or a slice (eg. 2, 1:, 1:5)
:return: an ordered dictionary of the instance(s) at index/indices `item`.
"""
return OrderedDict([(name, data[item]) for name, data in self.fields.items()])
def __setitem__(self, key, value):
"""
:param key: An integer index or a slice (eg. 2, 1:, 1:5)
:param value: Sets the instances at index/indices `key` to the instances(s) `value`
"""
for name, data in self.fields.items():
if name not in value:
raise InvalidFieldsException('field {} is missing in input data: {}'.format(name, value))
data[key] = value[name]
def __iter__(self):
"""
:return: A iterator over the instances in the dataset
"""
for i in xrange(len(self)):
yield self[i]
def copy(self, keep_fields=None):
"""
:param keep_fields: if specified, then only the given fields will be kept
:return: A deep copy of the dataset (each instance is copied).
"""
keep_fields = self.fields.keys() or keep_fields
return self.__class__(OrderedDict([(name, data[:]) for name, data in self.fields.items() if name in keep_fields]))
@classmethod
def pad(cls, sequences, padding, pad_len=None):
"""
Pads a list of sequences such that they form a matrix.
:param sequences: a list of sequences of varying lengths.
:param padding: the value of padded cells.
:param pad_len: the length of the maximum padded sequence.
"""
max_len = max([len(s) for s in sequences])
pad_len = pad_len or max_len
assert pad_len >= max_len, 'pad_len {} must be greater or equal to the longest sequence {}'.format(pad_len, max_len)
for i, s in enumerate(sequences):
sequences[i] = [padding] * (pad_len - len(s)) + s
return np.array(sequences)
| 3.4375 | 3 |
SETTINGS.py | nsxsnx/py-tgbot-twitter-timeline | 1 | 12798910 | # Twitter AUTH:
APP_KEY = 'APP_KEY_HERE'
APP_SECRET = 'APP_SECRET_HERE'
OAUTH_TOKEN = 'TOKEN_HERE'
OAUTH_TOKEN_SECRET = 'TOKEN_SECRET_HERE'
# Telegram options:
TELEGRAM_CHANNEL = 'CHANNEL_NAME_HERE'
TELEGRAM_TOKEN = 'TOKEN_HERE'
# Misc:
TWITTER_USER_NAME = 'USER_NAME_HERE'
MSG = '<b>{NAME}</b>:\n{TEXT}\n\n<a href="{URL}">Source</a>'
# Technical stuff:
TWEET_BASE_URL = 'https://twitter.com/i/web/status/'
STATE_FILE = 'state.p'
SLEEP = 3
TG_LINK = 'https://api.telegram.org/bot{TOKEN}/sendMessage?chat_id=@{CHANNEL}&text={MESSAGE}&parse_mode=html'
UNSUPPORTED_TAGS = ['<span class="twython-tweet-suffix">', '<span class="twython-tweet-prefix">', '</span>', 'class="twython-url"', 'class="twython-media"', 'class="twython-mention"', 'class="twython-hashtag"', 'class="twython-symbol"', ]
| 2.03125 | 2 |
ada_aug/train.py | jamestszhim/adaptive_augment | 3 | 12798911 | import os
import sys
import time
import torch
import utils
import logging
import argparse
import torch.nn as nn
import torch.utils
from adaptive_augmentor import AdaAug
from networks import get_model
from networks.projection import Projection
from dataset import get_num_class, get_dataloaders, get_label_name, get_dataset_dimension
from config import get_warmup_config
from warmup_scheduler import GradualWarmupScheduler
parser = argparse.ArgumentParser("ada_aug")
parser.add_argument('--dataroot', type=str, default='./', help='location of the data corpus')
parser.add_argument('--dataset', type=str, default='cifar10', help='name of dataset')
parser.add_argument('--train_portion', type=float, default=0.5, help='portion of training data')
parser.add_argument('--batch_size', type=int, default=96, help='batch size')
parser.add_argument('--num_workers', type=int, default=0, help="num_workers")
parser.add_argument('--learning_rate', type=float, default=0.025, help='init learning rate')
parser.add_argument('--learning_rate_min', type=float, default=0.0001, help='min learning rate')
parser.add_argument('--momentum', type=float, default=0.9, help='momentum')
parser.add_argument('--weight_decay', type=float, default=3e-4, help='weight decay')
parser.add_argument('--grad_clip', type=float, default=5, help='gradient clipping')
parser.add_argument('--use_cuda', type=bool, default=True, help="use cuda default True")
parser.add_argument('--gpu', type=int, default=0, help='gpu device id')
parser.add_argument('--use_parallel', action='store_true', default=False, help="use data parallel default False")
parser.add_argument('--model_name', type=str, default='wresnet40_2', help="model name")
parser.add_argument('--model_path', type=str, default='saved_models', help='path to save the model')
parser.add_argument('--cutout', action='store_true', default=False, help='use cutout')
parser.add_argument('--cutout_length', type=int, default=16, help='cutout length')
parser.add_argument('--drop_path_prob', type=float, default=0.2, help='drop path probability')
parser.add_argument('--epochs', type=int, default=600, help='number of training epochs')
parser.add_argument('--report_freq', type=float, default=50, help='report frequency')
parser.add_argument('--save', type=str, default='EXP', help='experiment name')
parser.add_argument('--seed', type=int, default=0, help='seed')
parser.add_argument('--search_dataset', type=str, default='./', help='search dataset name')
parser.add_argument('--gf_model_name', type=str, default='./', help='gf_model name')
parser.add_argument('--gf_model_path', type=str, default='./', help='gf_model path')
parser.add_argument('--h_model_path', type=str, default='./', help='h_model path')
parser.add_argument('--k_ops', type=int, default=1, help="number of augmentation applied during training")
parser.add_argument('--delta', type=float, default=0.3, help="degree of perturbation in magnitude")
parser.add_argument('--temperature', type=float, default=1.0, help="temperature")
parser.add_argument('--n_proj_layer', type=int, default=0, help="number of additional hidden layer in augmentation policy projection")
parser.add_argument('--n_proj_hidden', type=int, default=128, help="number of hidden units in augmentation policy projection layers")
parser.add_argument('--restore_path', type=str, default='./', help='restore model path')
parser.add_argument('--restore', action='store_true', default=False, help='restore model default False')
args = parser.parse_args()
debug = True if args.save == "debug" else False
args.save = '{}-{}'.format(time.strftime("%Y%m%d-%H%M%S"), args.save)
if debug:
args.save = os.path.join('debug', args.save)
else:
args.save = os.path.join('eval', args.dataset, args.save)
utils.create_exp_dir(args.save)
log_format = '%(asctime)s %(message)s'
logging.basicConfig(stream=sys.stdout, level=logging.INFO,
format=log_format, datefmt='%m/%d %I:%M:%S %p')
fh = logging.FileHandler(os.path.join(args.save, 'log.txt'))
fh.setFormatter(logging.Formatter(log_format))
logging.getLogger().addHandler(fh)
def main():
if not torch.cuda.is_available():
logging.info('no gpu device available')
sys.exit(1)
torch.cuda.set_device(args.gpu)
utils.reproducibility(args.seed)
logging.info('gpu device = %d' % args.gpu)
logging.info("args = %s", args)
# dataset settings
n_class = get_num_class(args.dataset)
class2label = get_label_name(args.dataset, args.dataroot)
train_queue, valid_queue, _, test_queue = get_dataloaders(
args.dataset, args.batch_size, args.num_workers,
args.dataroot, args.cutout, args.cutout_length,
split=args.train_portion, split_idx=0, target_lb=-1,
search=True)
logging.info(f'Dataset: {args.dataset}')
logging.info(f' |total: {len(train_queue.dataset)}')
logging.info(f' |train: {len(train_queue)*args.batch_size}')
logging.info(f' |valid: {len(valid_queue)*args.batch_size}')
# task model settings
task_model = get_model(model_name=args.model_name,
num_class=n_class,
use_cuda=True, data_parallel=False)
logging.info("param size = %fMB", utils.count_parameters_in_MB(task_model))
# task optimization settings
optimizer = torch.optim.SGD(
task_model.parameters(),
args.learning_rate,
momentum=args.momentum,
weight_decay=args.weight_decay,
nesterov=True
)
scheduler = torch.optim.lr_scheduler.CosineAnnealingLR(
optimizer, float(args.epochs), eta_min=args.learning_rate_min)
m, e = get_warmup_config(args.dataset)
scheduler = GradualWarmupScheduler(
optimizer,
multiplier=m,
total_epoch=e,
after_scheduler=scheduler)
logging.info(f'Optimizer: SGD, scheduler: CosineAnnealing, warmup: {m}/{e}')
criterion = nn.CrossEntropyLoss()
criterion = criterion.cuda()
# restore setting
if args.restore:
trained_epoch = utils.restore_ckpt(task_model, optimizer, scheduler, args.restore_path, location=args.gpu) + 1
n_epoch = args.epochs - trained_epoch
logging.info(f'Restoring model from {args.restore_path}, starting from epoch {trained_epoch}')
else:
trained_epoch = 0
n_epoch = args.epochs
# load trained adaaug sub models
search_n_class = get_num_class(args.search_dataset)
gf_model = get_model(model_name=args.gf_model_name,
num_class=search_n_class,
use_cuda=True, data_parallel=False)
h_model = Projection(in_features=gf_model.fc.in_features,
n_layers=args.n_proj_layer,
n_hidden=args.n_proj_hidden).cuda()
utils.load_model(gf_model, f'{args.gf_model_path}/gf_weights.pt', location=args.gpu)
utils.load_model(h_model, f'{args.h_model_path}/h_weights.pt', location=args.gpu)
for param in gf_model.parameters():
param.requires_grad = False
for param in h_model.parameters():
param.requires_grad = False
after_transforms = train_queue.dataset.after_transforms
adaaug_config = {'sampling': 'prob',
'k_ops': args.k_ops,
'delta': args.delta,
'temp': args.temperature,
'search_d': get_dataset_dimension(args.search_dataset),
'target_d': get_dataset_dimension(args.dataset)}
adaaug = AdaAug(after_transforms=after_transforms,
n_class=search_n_class,
gf_model=gf_model,
h_model=h_model,
save_dir=args.save,
config=adaaug_config)
# start training
for i_epoch in range(n_epoch):
epoch = trained_epoch + i_epoch
lr = scheduler.get_last_lr()[0]
logging.info('epoch %d lr %e', epoch, lr)
train_acc, train_obj = train(
train_queue, task_model, criterion, optimizer, epoch, args.grad_clip, adaaug)
logging.info('train_acc %f', train_acc)
valid_acc, valid_obj, _, _ = infer(valid_queue, task_model, criterion)
logging.info('valid_acc %f', valid_acc)
scheduler.step()
if epoch % args.report_freq == 0:
test_acc, test_obj, test_acc5, _ = infer(test_queue, task_model, criterion)
logging.info('test_acc %f %f', test_acc, test_acc5)
utils.save_ckpt(task_model, optimizer, scheduler, epoch,
os.path.join(args.save, 'weights.pt'))
adaaug.save_history(class2label)
figure = adaaug.plot_history()
test_acc, test_obj, test_acc5, _ = infer(test_queue, task_model, criterion)
logging.info('test_acc %f %f', test_acc, test_acc5)
logging.info(f'save to {args.save}')
def train(train_queue, model, criterion, optimizer, epoch, grad_clip, adaaug):
objs = utils.AvgrageMeter()
top1 = utils.AvgrageMeter()
top5 = utils.AvgrageMeter()
for step, (input, target) in enumerate(train_queue):
target = target.cuda(non_blocking=True)
# get augmented training data from adaaug
aug_images = adaaug(input, mode='exploit')
model.train()
optimizer.zero_grad()
logits = model(aug_images)
loss = criterion(logits, target)
loss.backward()
nn.utils.clip_grad_norm_(model.parameters(), grad_clip)
optimizer.step()
prec1, prec5 = utils.accuracy(logits, target, topk=(1, 5))
n = input.size(0)
objs.update(loss.detach().item(), n)
top1.update(prec1.detach().item(), n)
top5.update(prec5.detach().item(), n)
global_step = step + epoch * len(train_queue)
if global_step % args.report_freq == 0:
logging.info('train %03d %e %f %f', global_step, objs.avg, top1.avg, top5.avg)
# log the policy
if step == 0:
adaaug.add_history(input, target)
return top1.avg, objs.avg
def infer(valid_queue, model, criterion):
objs = utils.AvgrageMeter()
top1 = utils.AvgrageMeter()
top5 = utils.AvgrageMeter()
model.eval()
with torch.no_grad():
for input, target in valid_queue:
input = input.cuda()
target = target.cuda(non_blocking=True)
logits = model(input)
loss = criterion(logits, target)
prec1, prec5 = utils.accuracy(logits, target, topk=(1, 5))
n = input.size(0)
objs.update(loss.detach().item(), n)
top1.update(prec1.detach().item(), n)
top5.update(prec5.detach().item(), n)
return top1.avg, objs.avg, top5.avg, objs.avg
if __name__ == '__main__':
main()
| 1.976563 | 2 |
Hana.py | dungpoke/dungpoke | 0 | 12798912 | ### Hi there 👋
<!--
**dungpoke/dungpoke** is a ✨ _special_ ✨ repository because its `README.md` (this file) appears on your GitHub profile.
Here are some ideas to get you started:
- 🔭 I’m currently working on ...
- 🌱 I’m currently learning ...
- 👯 I’m looking to collaborate on ...
- 🤔 I’m looking for help with ...
- 💬 Ask me about ...
- 📫 How to reach me: ...
- 😄 Pronouns: ...
- ⚡ Fun fact: ...
-->
| 1.1875 | 1 |
test.py | dongxzhang/TensorflowCookbookLearning | 1 | 12798913 | import tensorflow as tf
sess = tf.Session()
#在名字为foo的命名空间内创建名字为v的变量
with tf.variable_scope("foo"):
#创建一个常量为1的v
v= tf.get_variable('v1',[1],initializer = tf.constant_initializer(1.0))
#因为在foo空间已经创建v的变量,所以下面的代码会报错
#with tf.variable_scope("foo"):
# v= tf.get_variable('v',[1])
#在生成上下文管理器时,将参数reuse设置为True。这样tf.get_variable的函数将直接获取已声明的变量
#且调用with tf.variable_scope("foo")必须是定义的foo空间,而不能是with tf.variable_scope("")未命名或者其他空间。
with tf.variable_scope("foo",reuse =tf.AUTO_REUSE):
v1= tf.get_variable('v1',[1], initializer = tf.constant_initializer(5.0))
print(v1==v) #输出为True,代表v1与v是相同的变量
init = tf.initialize_all_variables()
sess.run(init)
print(sess.run(v1))
print(sess.run(v))
with tf.variable_scope("foo1",reuse = False):
v1= tf.get_variable('v1',[1], initializer = tf.constant_initializer(5.0))
print(v1==v) #输出为True,代表v1与v是相同的变量
init = tf.initialize_all_variables()
sess.run(init)
print(sess.run(v1))
print(sess.run(v))
print(foo.v1.name)
'''
#获取变量的方式主要有以下两种,实践中tf.get_variable产生的变量一定要搭配tf.variable_scope使用,不然运行脚本会报错
#v = tf.get_variable('v222',shape= [1],initializer = tf.constant_initializer(10.0))
#使用直接定义变量不会报错,可以一直调用
#vc = tf.Variable(tf.constant(1.0,shape = [1]),name = 'v')
#print(vc)
#以下使用with语法,将tf.get_variable与tf.variable_scope搭配使用,且reuse=True时,之前必须定义V
with tf.variable_scope('zdx',reuse = True):
v = tf.get_variable('v222',shape= [1],initializer = tf.constant_initializer(100.0))
print(v)
v1 = tf.get_variable('v222',shape= [1],initializer = tf.constant_initializer(2.0))
print(v1==v)
init = tf.initialize_all_variables()
sess.run(init)
print(sess.run(v1))
print(sess.run(v))
''' | 3.234375 | 3 |
test/utils/test_utils_audio.py | FabianGroeger96/deep-embedded-music | 10 | 12798914 | <reponame>FabianGroeger96/deep-embedded-music
import tensorflow as tf
from src.utils.utils_audio import AudioUtils
class TestUtilsAudio(tf.test.TestCase):
def setUp(self):
self.audio_file_path = "/tf/test_environment/audio/DevNode1_ex1_1.wav"
def test_audio_loading_mono(self):
expected_shape = (16000 * 10,)
audio = AudioUtils.load_audio_from_file(self.audio_file_path,
sample_rate=16000,
sample_size=10,
stereo_channels=4,
to_mono=True)
self.assertEqual(expected_shape, audio.shape)
def test_audio_loading_multi_channel(self):
expected_shape = (16000 * 10, 4)
audio = AudioUtils.load_audio_from_file(self.audio_file_path,
sample_rate=16000,
sample_size=10,
stereo_channels=4,
to_mono=False)
self.assertEqual(expected_shape, audio.shape)
if __name__ == '__main__':
tf.test.main()
| 2.703125 | 3 |
setup.py | philipwfowler/jitter | 1 | 12798915 | <gh_stars>1-10
from setuptools import setup
setup(
install_requires=[
"numpy >= 1.13"
],
name='jitter',
scripts=['bin/jitter.py'],
version='0.1.0',
url='https://github.com/philipwfowler/jitter',
author='<NAME>',
packages=['jitter'],
license='MIT',
long_description=open('README.md').read(),
)
| 1.109375 | 1 |
day06/day6_lib.py | el-hult/adventofcode2019 | 0 | 12798916 | from collections import namedtuple
from typing import Dict, List, Callable
Node = namedtuple('Node', 'name parent children data')
def make_tree_from_adj_list(adj_list):
root = 'COM'
nodes: Dict['str', Node] = {root: Node(root, None, [], {})}
for parent, child in adj_list:
node = Node(child, parent, [], {})
nodes[child] = node
# N.B. I modify node_lookup under iteration, so I cast to list and slice, to get a fixed iterator
for node in list(nodes.values())[:]:
if not (node.parent in nodes.keys()) and node.name != root:
parent_node = Node(node.parent, root, [], {})
nodes[node.parent] = parent_node
for node in nodes.values():
if node.name != root:
nodes[node.parent].children.append(node)
return nodes[root]
def compute_descendants(tree_root, f_descendants='n_descendants'):
topo_sorted_nodes = all_descendants_BFS(tree_root)
reverse_topo_sort = reversed(topo_sorted_nodes)
for n in reverse_topo_sort:
if len(n.children) == 0:
n.data[f_descendants] = 0
else:
n.data[f_descendants] = len(n.children) + sum(nn.data[f_descendants] for nn in n.children)
def all_descendants_BFS(tree_root: Node) -> List[Node]:
"""All descendents of a node, in Breadth First Search order"""
topo_sorted_nodes = [tree_root]
for n in topo_sorted_nodes:
topo_sorted_nodes += n.children
return topo_sorted_nodes
def find_DFS(predicate: Callable[[Node], bool], node: Node) -> List[Node]:
"""Returns the path in the tree from the root node to the first element that fulfils the predicate"""
def find_DFS_(predicate,node) -> List[Node]:
if predicate(node):
return [node]
elif len(node.children) == 0:
return []
else:
for c in node.children:
dfs1 = find_DFS_(predicate,c)
if len(dfs1) > 0:
return [node] + dfs1
return []
path_found = find_DFS_(predicate,node)
if len(path_found) > 0:
return path_found
else:
raise ValueError("There is no element in the tree that fulfils the predicate.")
def calculate_hops(root: Node) -> int:
nodes = all_descendants_BFS(root)
bottom_up = reversed(nodes)
for node in bottom_up:
try:
p1 = find_DFS(lambda n: n.name == 'YOU', node)
p2 = find_DFS(lambda n: n.name == 'SAN', node)
hops_to_santa = len(p1) + len(p2) - 4 #remove both endpoints of both paths
return hops_to_santa
except ValueError:
pass
raise ValueError("There is no common object that one can orbit hop through to get to Santa!") | 3.6875 | 4 |
Courses/Semester01/DSADAssignment/DSADAssignment01_UnitTest.py | KalpeshChavan12/BITS-DSE | 0 | 12798917 | <gh_stars>0
import unittest
import random
import DSADAssignment01V4
def add_childs_to_leaf(root, num):
if root is None:
return
if(len(root.children) == 0 ):
for i in range(num):
root.append_child(DSADAssignment01V4.TreeNode("child{0}".format(TestStringMethods.count)))
TestStringMethods.count += 1
else:
for i in root.children:
add_childs_to_leaf(i, num)
class TestStringMethods(unittest.TestCase):
count = 0
def setUp(self):
TestStringMethods.count = 0
self.root = DSADAssignment01V4.TreeNode("Root")
add_childs_to_leaf(self.root, 10)
add_childs_to_leaf(self.root, 10)
add_childs_to_leaf(self.root, 10)
add_childs_to_leaf(self.root, 10)
add_childs_to_leaf(self.root, 10)
add_childs_to_leaf(self.root, 10)
print("Total Number of nodes added", TestStringMethods.count)
def test_find_rand_node_exist(self):
# random element search
for i in range(1, 20):
key = "child{0}".format(random.randrange(TestStringMethods.count - 1))
print("Finding node=",key)
n, parent = self.root.find_node_and_parent(key)
self.assertTrue(n is not None)
self.assertTrue(parent is not None)
# Find root Node
n, parent = self.root.find_node_and_parent("Root")
self.assertTrue(n is not None)
self.assertTrue(parent is None)
# Node not exist
n, parent = self.root.find_node_and_parent("NodeNotExist")
self.assertTrue(n is None)
self.assertTrue(parent is None)
# delete
for i in range(1, 20):
key = "child{0}".format(random.randrange(TestStringMethods.count - 1))
print("Deleting node=",key)
n, parent = self.root.find_node_and_parent(key)
self.assertTrue(n is not None)
self.assertTrue(parent is not None)
parent.delete_child(n)
# verify deleted
n, parent = self.root.find_node_and_parent(key)
self.assertTrue(n is None)
self.assertTrue(parent is None)
if __name__ == '__main__':
unittest.main() | 3.140625 | 3 |
MillerArrays/millerArray2Dictionary.py | MooersLab/jupyterlabcctbxsnips | 0 | 12798918 | from iotbx import mtz
mtz_obj = mtz.object(file_name="3nd4.mtz")
# Only works with mtz.object.
# Does not work if mtz is read in with iotbx.file_reader.
miller_arrays_dict = mtz_obj.as_miller_arrays_dict()
| 1.789063 | 2 |
pytorch_toolkit/nncf/nncf/quantization/layers.py | aalborov/openvino_training_extensions | 1 | 12798919 | """
Copyright (c) 2019 Intel Corporation
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import logging
from collections import namedtuple
from functools import partial
import numpy as np
import torch
import torch.nn as nn
from torch import distributed
from .initializers import MIN_MAX_INITIALIZERS
from .quantize_functions import symmetric_quantize, asymmetric_quantize
from ..layer_utils import COMPRESSION_MODULES
from ..registry import Registry
from ..utils import get_per_channel_scale_shape
logger = logging.getLogger(__name__)
QUANTIZATION_MODULES = Registry('quantization_modules')
BINARIZATION_MODULES = Registry('binarization_modules')
class QuantizationMode:
SYMMETRIC = "symmetric"
ASYMMETRIC = "asymmetric"
class BinarizationMode:
XNOR = "xnor"
DOREFA = "dorefa"
QuantizationParams = namedtuple(
'QuantizationParams', ['bits', 'mode', 'signed', 'signed_scope', 'per_channel']
)
QuantizationParams.__new__.__defaults__ = (8, QuantizationMode.SYMMETRIC, False, [], False)
class QuantizerConfig:
def __init__(self, params: QuantizationParams, input_shape=None, is_weights=False, per_channel=False,
within_signed_scope=False):
self.params = params
self.is_weights = is_weights
self.within_signed_scope = within_signed_scope
self.per_channel = per_channel
self.input_shape = input_shape
class BaseQuantizer(nn.Module):
def __init__(self, config: QuantizerConfig):
super().__init__()
self.config = config
self.init_stage = False
self.initialized = False
self.state_dict_name = None
class LoadStateListener:
"""
Check whether a quantization module are going to be updated by new values from state_dict or checkpoint.
"""
def __init__(self, module):
# pylint: disable=protected-access
self.hook = module._register_load_state_dict_pre_hook(partial(self.hook_fn, module=module))
def hook_fn(self, state_dict, prefix, local_metadata, strict, missing_keys, unexpected_keys, error_msgs,
module):
if module.state_dict_name:
for module_key in module.state_dict().keys():
candidate = module.state_dict_name + '.' + module_key
if candidate in state_dict:
module.initialized = True
def close(self):
self.hook.remove()
self.load_listener = LoadStateListener(self)
def forward(self, x):
if self.init_stage:
return x
return self.quantize(x)
def quantize(self, x):
raise NotImplementedError
@COMPRESSION_MODULES.register()
@QUANTIZATION_MODULES.register(QuantizationMode.SYMMETRIC)
class SymmetricQuantizer(BaseQuantizer):
def __init__(self, config):
super().__init__(config)
self.input_shape = config.input_shape
self.per_channel = config.per_channel
self.is_weights = config.is_weights
self.within_signed_scope = config.within_signed_scope
params = config.params
self.num_bits = params.bits
self.signed_tensor = nn.Parameter(torch.IntTensor([params.signed]), requires_grad=False)
self.collect_scale_statistics = False
scale_shape = 1
if self.per_channel:
scale_shape = get_per_channel_scale_shape(self.input_shape, self.is_weights)
self.scale = nn.Parameter(torch.ones(scale_shape), requires_grad=True)
self.init_stage = False
self.eps = 1e-16
self.level_high = self.level_low = 0
self.levels = 2 ** self.num_bits
if self.is_weights:
self.levels -= 1
def set_level_ranges(self):
if self.signed:
self.level_high = 2 ** (self.num_bits - 1) - 1
self.level_low = -(self.level_high + 1)
if self.is_weights:
self.level_low += 1
else:
self.level_high = 2 ** self.num_bits - 1
self.level_low = 0
@property
def signed(self):
return self.signed_tensor.item() == 1
@signed.setter
def signed(self, signed: bool):
self.signed_tensor.fill_(signed)
def quantize(self, x):
self.set_level_ranges()
return symmetric_quantize(x, self.levels, self.level_low, self.level_high, self.scale, self.eps)
@MIN_MAX_INITIALIZERS.register('SymmetricQuantizer')
def _initializer(module, name, min_value, max_value, distributed_):
if min_value.item == np.inf or max_value.item() == -np.inf:
raise AttributeError('Statistics is not collected for {}'.format(name))
sign = min_value.item() < 0 or module.within_signed_scope
if sign != module.signed:
logger.warning("signed set incorrectly")
module.signed = int(sign)
if abs(max_value) > 0.1:
module.scale.data.fill_(max_value.item())
if distributed_:
distributed.broadcast(module.scale, 0)
distributed.broadcast(module.signed_tensor, 0)
logger.debug("Statistics: min={:.2f} max={:.2f}".format(min_value.item(), max_value.item()))
logger.info(
"Set sign: {} and scale: {:04.2f} for {}".format(module.signed, module.scale.item(), name))
@COMPRESSION_MODULES.register()
@QUANTIZATION_MODULES.register(QuantizationMode.ASYMMETRIC)
class AsymmetricQuantizer(BaseQuantizer):
def __init__(self, config):
super().__init__(config)
self.is_weights = config.is_weights
self.input_shape = config.input_shape
self.per_channel = config.per_channel
params = config.params
self.bits = params.bits
scale_shape = 1
if self.per_channel:
scale_shape = get_per_channel_scale_shape(self.input_shape, self.is_weights)
self.input_low = nn.Parameter(torch.zeros(scale_shape), requires_grad=True)
self.input_range = nn.Parameter(torch.ones(scale_shape), requires_grad=True)
self.eps = 1e-16
@property
def signed(self):
return True
@property
def level_high(self):
return 2 ** self.bits - 1
@property
def level_low(self):
return 0
@property
def levels(self):
return 2 ** self.bits
def quantize(self, x):
return asymmetric_quantize(x, self.levels, self.level_low, self.level_high, self.input_low, self.input_range,
self.eps)
@MIN_MAX_INITIALIZERS.register('AsymmetricQuantizer')
def _initializer(module, name, min_value, max_value, distributed_):
if min_value.item() == np.inf or max_value.item() == -np.inf:
raise AttributeError('Statistics is not collected for {}'.format(name))
module.input_low.data.fill_(min_value.item())
range_ = (max_value - min_value).item()
if range_ > 0.01:
module.input_range.data.fill_(range_)
if distributed_:
distributed.broadcast(module.input_low, 0)
distributed.broadcast(module.input_range, 0)
logger.debug("Statistics: min={:.2f} max={:.2f}".format(min_value.item(), max_value.item()))
logger.info("Set input_low: {:04.2f} and input_range: {:04.2f} for {}"
.format(module.input_low.item(), module.input_range.item(), name))
| 1.726563 | 2 |
answers/VanshBaijal/Day3/Question2.py | arc03/30-DaysOfCode-March-2021 | 22 | 12798920 | n=int(input("Enter the number:"))
s={}
a=0
c=0
while(n!=0):
r=n%10
n=int(n/10)
s[a]=r
for i in range (a):
if(r==s[i]):
c+=1
break
a+=1
if (c==0):
print("It is a unique number")
else:
print("It is not a unique number")
| 3.546875 | 4 |
gradio_infer.py | test-dan-run/SpeakerProfiling | 0 | 12798921 | <filename>gradio_infer.py
import gradio as gr
import numpy as np
import librosa
import torch
from NISP.lightning_model import LightningModel
from config import TestNISPConfig as cfg
INFERENCE_COUNT = 0
# load model checkpoint
model = LightningModel.load_from_checkpoint(cfg.model_checkpoint, csv_path=cfg.csv_path)
model.eval()
def predict_height(audio):
global INFERENCE_COUNT
INFERENCE_COUNT += 1
# resample audio to required format (16kHz Sample Rate, Mono)
input_sr, arr = audio
arr = arr.astype(np.float32, order='C') / 32768.0
arr = librosa.to_mono(arr.T)
arr = librosa.resample(arr, input_sr, cfg.sample_rate)
# convert to torch tensor
tensor = torch.Tensor([arr])
sample_length = cfg.slice_seconds * cfg.sample_rate
win_length = cfg.slice_window * cfg.sample_rate
if tensor.shape[-1] < sample_length:
tensor = torch.nn.functional.pad(tensor, (0, sample_length - tensor.size(1)), 'constant')
slices = tensor.unsqueeze(dim=0)
else:
# Split input audio into slices of input_length seconds
slices = tensor.unfold(1, sample_length, win_length).transpose(0,1)
# predict
h_preds, a_preds, g_preds = [], [], []
with torch.no_grad():
for slice in slices:
h_pred, a_pred, g_pred = model(slice)
h_preds.append((h_pred.view(-1) * model.h_std + model.h_mean).item())
a_preds.append((a_pred.view(-1) * model.a_std + model.a_mean).item())
g_preds.append(g_pred.view(-1).item())
height = round(sum(h_preds)/len(h_preds),2)
age = int(sum(a_preds)/len(a_preds))
gender = 'Female' if sum(g_preds)/len(g_preds) > 0.5 else 'Male'
print('Inference was run. Current inference count:', INFERENCE_COUNT)
return 'You\'re {}, your height is {}, and you are {} years old.'.format(gender, height, age)
iface = gr.Interface(
fn=predict_height,
inputs='mic',
outputs='text',
description='Predicts your height, age, gender based on your voice. \n Ideally, a clip of more than 5 seconds would be preferred. Any less, and your clip will be zero-padded to 5 seconds.'
).launch(share=False) | 2.09375 | 2 |
raspberry_car_PCA/motor.py | otaniemen-lukio/projects | 1 | 12798922 | import curses, sys, os
#Servo controller connected to IC2
import Adafruit_PCA9685
pwm = Adafruit_PCA9685.PCA9685()
pwm.set_pwm_freq(60)
from time import sleep
#ESC Brushles motor states: direction, on/off
toggleState = 400
throttle = 450
delta = 20
print("toggleState1")
pwm.set_pwm(2,0,toggleState)
sleep(0.2)
for i in range(1,6):
pwm_value = throttle -i*delta
if pwm_value < toggleState:
pwm.set_pwm(2,0,toggleState)
sleep(0.2)
pwm.set_pwm(2,0, pwm_value)
sleep(0.4)
print(pwm_value)
pwm.set_pwm(2,0,toggleState)
| 2.765625 | 3 |
tests/inputs/if-branching/outputs/15-builtin-functions-join-store.py | helq/pytropos | 4 | 12798923 | import pytropos.internals.values as pv
from pytropos.internals.values.builtin_values import *
from pytropos.internals.values.python_values.builtin_mutvalues import *
from pytropos.internals.values.python_values.wrappers import *
from pytropos.internals.values.python_values.python_values import PythonValue, PT
exitcode = 1
r = List([pv.int(21)], size=(1, 1))
store = {
'_': PythonValue(PT.Top),
'f': r.get_attrs()['append'],
'r': PythonValue(r),
}
| 2.140625 | 2 |
setup.py | guoli-lyu/document-scanner | 2 | 12798924 | import re
import setuptools
def find_version(fname):
"""Attempts to find the version number in the file names fname.
Raises RuntimeError if not found.
"""
version = ''
with open(fname, 'r') as fp:
reg = re.compile(r'__version__ = [\'"]([^\'"]*)[\'"]')
for line in fp:
m = reg.match(line)
if m:
version = m.group(1)
break
if not version:
raise RuntimeError('Cannot find version information')
return version
__version__ = find_version('doc_scanner/__init__.py')
with open("README.md", "r") as fh:
long_description = fh.read()
setuptools.setup(
name="doc_scanner",
version=__version__,
author="<NAME>",
author_email="<EMAIL>",
description="A document scanner based on openCV3 and scikit-image",
long_description=long_description,
long_description_content_type="text/markdown",
url="https://github.com/Guoli-Lyu/document-scanner",
packages=setuptools.find_packages(),
classifiers=(
'Development Status :: 4 - Beta',
"Programming Language :: Python :: 3.6",
"License :: OSI Approved :: MIT License",
),
test_suite='tests',
project_urls={
'Bug Reports': 'https://github.com/Guoli-Lyu/document-scanner/issues',
},
install_requires=[
'numpy',
'scikit-image',
'opencv-python',
'pandas',
],
)
| 2.09375 | 2 |
paraVerComoFuncionaAlgumasCoisas/01-coisas-E-Estudos/0diritories0to9/09keyboard.py | jonasht/pythonEstudos | 0 | 12798925 | <reponame>jonasht/pythonEstudos
import keyboard
print('\napertando A ')
qtd_apertos = 0
while True:
if keyboard.is_pressed('a'):
qtd_apertos += 1
print('A foi apertado ', qtd_apertos)
if keyboard.is_pressed('s'):
print('\nfim de programa')
break
| 3.75 | 4 |
eor_updater.py | segfaultx/ffxiv_eor_member_updater | 1 | 12798926 | #!/usr/bin/python3
import os.path
import openpyxl
import requests
import json
import argparse
BASE_URL_XIV_API_CHARACTER: str = "https://xivapi.com/character/"
GERMAN_TO_ENGLISH_CLASS_DICT: dict = {}
SUB_30_MAPPING_DICT: dict = {}
CONFIG_LOCATION = os.getcwd()
DEBUG_ENABLED = False
def main(filepath):
"""main method, used to process data and update the excel workbook"""
workbook: openpyxl.Workbook = openpyxl.load_workbook(filepath)
worksheet = workbook.active
class_range: tuple = generate_class_range(worksheet)
for i in range(worksheet.min_row + 1, worksheet.max_row):
current_row: tuple = worksheet[i]
if not current_row[0].value and not current_row[1].value:
break
current_character_name: str = f"{current_row[0].value} {current_row[1].value}"
current_character_info: dict = process_class_info(get_character_info(get_character_id(current_character_name)))
if not current_character_info:
print(f"Cant process data for character: {current_character_name}")
continue
update_character_info(current_character_info, worksheet, class_range, worksheet[worksheet.min_row], i)
workbook.save(filepath.replace(".xlsx", "_updated.xlsx"))
print("Finished!")
def update_character_info(current_character_info: dict, worksheet: openpyxl.workbook.workbook.Worksheet,
class_range: tuple, header_row: tuple, current_row: int):
"""method to update the character class information in the excel sheet"""
for i in range(class_range[0], class_range[1]):
# reduce i by one because column index is the actual index, while the header_row is a list,
# thus reducing the index by 1
mapped_class_name = GERMAN_TO_ENGLISH_CLASS_DICT.get(header_row[i - 1].value)
new_class_val = current_character_info.get(mapped_class_name, 0)
if DEBUG_ENABLED:
character_name = f"{worksheet.cell(current_row, 1).value} {worksheet.cell(current_row, 2).value}"
current_class = header_row[i - 1].value
print(f"Setting value {new_class_val} for class {current_class} for character {character_name}")
current_cell = worksheet.cell(row=current_row, column=i)
current_cell.value = new_class_val
def process_class_info(class_info: dict):
"""method to process the class info of every player, mapping it into a dictionary for easier usage"""
if class_info is None:
return None
data_to_process = class_info.get("Character", {}).get("ClassJobs", None)
if not data_to_process:
raise IOError
out: dict = {SUB_30_MAPPING_DICT.get(entry["UnlockedState"]["Name"], entry["UnlockedState"]["Name"]): entry["Level"]
for entry in data_to_process}
# special case -> arcanist branching into two main jobs
out["Summoner"] = out["Scholar"]
if DEBUG_ENABLED:
print("MAPPED CLASS VALUES:")
print(out)
return out
def generate_class_range(worksheet: openpyxl.workbook.workbook.Worksheet):
"""helper method, to create the excel ranges for the player classes"""
header_row: tuple = worksheet[worksheet.min_row]
end = 0
start = 0
start_set = False
for col in header_row:
if col.value is None:
break
if col.value in GERMAN_TO_ENGLISH_CLASS_DICT.keys() and not start_set:
start = end
start_set = True
end += 1
if DEBUG_ENABLED:
print("CLASS ROW RANGES:")
print(start, end)
return start + 1, end + 1
def do_http_get(request_url: str):
"""helper method to do http requests"""
resp: requests.Response = requests.get(request_url)
if resp.ok:
return resp.json()
else:
raise ConnectionError
def get_character_info(character_id: str):
"""helper method to receive character info via XIV API"""
if not character_id:
return None
current_request_url: str = f"{BASE_URL_XIV_API_CHARACTER}{character_id}"
resp_json: dict = do_http_get(current_request_url)
return resp_json
def get_character_id(character_name: str):
"""Help method to get the ID of an character via XIV API"""
current_request_url: str = f"{BASE_URL_XIV_API_CHARACTER}search?name={character_name}&server=Moogle"
resp_json: dict = do_http_get(current_request_url)
print(f"Processing data for: {character_name}")
return resp_json["Results"][0]["ID"] if resp_json["Results"] else None
def load_config(arguments: argparse.Namespace):
global GERMAN_TO_ENGLISH_CLASS_DICT, SUB_30_MAPPING_DICT
global CONFIG_LOCATION, DEBUG_ENABLED
if arguments.config:
CONFIG_LOCATION = arguments.config
if arguments.d:
DEBUG_ENABLED = arguments.d
with open(os.path.join(CONFIG_LOCATION, "eor_config.json")) as file:
config = json.load(file)
GERMAN_TO_ENGLISH_CLASS_DICT = config.get("class_config", None)
SUB_30_MAPPING_DICT = config.get("sub_30_class_config", None)
if not GERMAN_TO_ENGLISH_CLASS_DICT or not SUB_30_MAPPING_DICT:
raise IOError
if __name__ == '__main__':
parser = argparse.ArgumentParser(description="Process the EoR Membership excel.")
parser.add_argument("--filename", metavar='[path to file]', type=str, help="the location of the file to process")
parser.add_argument("--config", type=str, required=False)
parser.add_argument("--d", required=False, action='store_true')
args = parser.parse_args()
load_config(args)
main(args.filename)
| 3.234375 | 3 |
quickbuild/endpoints/configurations.py | pbelskiy/quickbuild | 7 | 12798927 | import datetime
from functools import partial
from typing import List, Optional, Union
from quickbuild.helpers import ContentType, response2py
class Configurations:
def __init__(self, quickbuild):
self.quickbuild = quickbuild
def _get(self, params: dict) -> List[dict]:
return self.quickbuild._request(
'GET',
'configurations',
callback=response2py,
params=params,
)
def get(self) -> List[dict]:
"""
Get all configurations in the system. For performance reason, only
brief information of the configuration will be returned here, including
`id`, `name`, `description`, `schedule`, `runMode`, `errorMessage`,
`parent id`. You may get the full xml representation using id if necessary.
Returns:
List[dict]: list of configurations.
"""
return self._get(dict(recursive=True))
def get_child(self, parent_id: int) -> List[dict]:
"""
Get a list of child configurations.
Args:
parent_id (int): parent configuration identifier.
Returns:
List[dict]: list of child configurations.
"""
return self._get(dict(parent_id=parent_id))
def get_descendent(self, parent_id: int) -> List[dict]:
"""
Get a list of descendent configurations.
Args:
parent_id (int): parent configuration identifier.
Returns:
List[dict]: list of descendent configurations.
"""
return self._get(dict(recursive=True, parent_id=parent_id))
def get_info(self,
configuration_id: int,
*,
content_type: Optional[ContentType] = None
) -> Union[dict, str]:
"""
Get full configuration info.
Args:
configuration_id (int):
Configuration identifier.
content_type (Optional[ContentType]):
Select needed content type if not set, default value of client
instance is used.
Returns:
Union[dict, str]: configuration content.
"""
return self.quickbuild._request(
'GET',
'configurations/{}'.format(configuration_id),
callback=partial(response2py, content_type=content_type),
content_type=content_type,
)
def get_path(self, configuration_id: int) -> str:
"""
Get configuration path.
Args:
configuration_id (int): configuration identifier.
Returns:
str: configuration path.
"""
return self.quickbuild._request(
'GET',
'configurations/{}/path'.format(configuration_id),
)
def get_id_by_path(self, path: str) -> int:
"""
Get configuration id by path.
Args:
path (str): configuration path.
Returns:
int: configuration identifier.
"""
return self.quickbuild.identifiers.get_configuration_id_by_path(path)
def get_name(self, configuration_id: int) -> str:
"""
Get configuration name.
Args:
configuration_id (int): configuration identifier.
Returns:
str: configuration name.
"""
return self.quickbuild._request(
'GET',
'configurations/{}/name'.format(configuration_id),
)
def get_description(self, configuration_id: int) -> str:
"""
Get configuration description.
Args:
configuration_id (int): configuration identifier.
Returns:
str: configuration description.
"""
return self.quickbuild._request(
'GET',
'configurations/{}/description'.format(configuration_id),
)
def get_error_message(self, configuration_id: int) -> str:
"""
Get configuration error message.
Args:
configuration_id (int): configuration identifier.
Returns:
str: configuration error message.
"""
return self.quickbuild._request(
'GET',
'configurations/{}/error_message'.format(configuration_id),
)
def get_run_mode(self, configuration_id: int) -> str:
"""
Get configuration run mode.
Args:
configuration_id (int): configuration identifier.
Returns:
str: configuration run mode.
"""
return self.quickbuild._request(
'GET',
'configurations/{}/run_mode'.format(configuration_id),
)
def get_schedule(self, configuration_id: int) -> dict:
"""
Get configuration schedule.
Args:
configuration_id (int): configuration identifier.
Returns:
dict: configuration schedule.
Raises:
QBProcessingError: will be raised if schedule is inherited from
parent configuration.
"""
return self.quickbuild._request(
'GET',
'configurations/{}/schedule'.format(configuration_id),
callback=response2py,
)
def get_average_duration(self,
configuration_id: int,
*,
from_date: Optional[datetime.date],
to_date: Optional[datetime.date]
) -> int:
"""
Get configuration average duration.
Args:
configuration_id (int): configuration identifier.
Returns:
int: milliseconds of average build duration.
"""
params = dict()
if from_date:
params['from_date'] = str(from_date)
if to_date:
params['to_date'] = str(to_date)
return self.quickbuild._request(
'GET',
'configurations/{}/average_duration'.format(configuration_id),
callback=response2py,
params=params,
)
def get_success_rate(self,
configuration_id: int,
*,
from_date: Optional[datetime.date],
to_date: Optional[datetime.date]
) -> int:
"""
Get configuration success rate.
Args:
configuration_id (int): configuration identifier.
Returns:
int: value in the range of 0~100, with 0 stands for 0%, and 100
stands for 100%.
"""
params = dict()
if from_date:
params['from_date'] = str(from_date)
if to_date:
params['to_date'] = str(to_date)
return self.quickbuild._request(
'GET',
'configurations/{}/success_rate'.format(configuration_id),
callback=response2py,
params=params,
)
def get_parent(self, configuration_id: int) -> int:
"""
Get parent configuration id.
Args:
configuration_id (int): configuration identifier.
Returns:
int: id of parent configuration.
Raises:
QBProcessingError: the configuration is root configuration and does
not have parent.
"""
return self.quickbuild._request(
'GET',
'configurations/{}/parent'.format(configuration_id),
callback=response2py,
)
def update(self, configuration: str) -> int:
"""
Update a configuration using XML configuration.
Normally you do not need to create the XML from scratch: you may get
XML representation of the configuration using `get_info()` method with
content_type=ContentType.XML and modify certain parts of the XML.
Args:
configuration (str): XML document.
Returns:
int: configuration id being updated.
"""
return self.quickbuild._request(
'POST',
'configurations',
callback=response2py,
data=configuration
)
def create(self, configuration: str) -> int:
"""
Create a configuration using XML/JSON configuration.
Please note that:
- The parent element denotes id of the parent configuration. Normally
you do not need to create the xml from scratch: you may retrieve xml
representation of a templating configuration using various configuration
access methods or `get_info()` with content_type=ContentType.XML, remove
the id element, modify certain parts and use it for create() method.
- Secret elements (Elements with attribute "secret=encrypt" in XML
representation of an existing configuration, typically they are
repository passwords, secret variable values, etc.) should not contain
the "secret" attribute; otherwise QuickBuild will think that the password
has already been encrypted. However if you creating configuration by
copying existing one and want to remain the passwords, the "secret"
attribute should then be preserved.
Args:
configuration (str): XML/JSON document.
Returns:
int: configuration id of newly created configuration.
Raises:
QBError: XML validation error
"""
self.quickbuild._validate_for_id(configuration)
return self.update(configuration)
def delete(self, configuration_id: int) -> None:
"""
Delete configuration.
Args:
configuration_id (int): configuration id.
Returns:
None
"""
return self.quickbuild._request(
'DELETE',
'configurations/{}'.format(configuration_id),
callback=response2py,
)
def copy(self,
configuration_id: int,
parent_id: int,
name: str,
recursive: bool
) -> int:
"""
Copy configuration (available since version 4.0.72)
Args:
configuration_id (int):
Configuration id to be copied.
parent_id (int):
Configuration id of the parent to place newly copied configuration.
name (str):
Name of the newly copied configuration.
recursive (bool):
Specify parameter recursive=true to copy specified configuration
and all its descendant configurations recursively; otherwise,
only the configuration itself will be copied.
Returns:
int: configuration id of the newly copied configuration.
"""
params = dict(
parent_id=parent_id,
name=name,
recursive=recursive,
)
return self.quickbuild._request(
'GET',
'configurations/{}/copy'.format(configuration_id),
callback=response2py,
params=params,
)
| 2.34375 | 2 |
leetcode/easy/Arrays and Strings/TwoSum.py | cheshtaaagarrwal/DS-Algos | 0 | 12798928 | # Given an array of integers, return indices of the two numbers such that they add up to a specific target.
# You may assume that each input would have exactly one solution, and you may not use the same element twice.
# Example:
# Given nums = [2, 7, 11, 15], target = 9,
# Because nums[0] + nums[1] = 2 + 7 = 9,
# return [0, 1].
class Solution:
def twoSum(self, nums, target):
"""
:type nums: List[int]
:type target: int
:rtype: List[int]
"""
seen = {}
result = []
index = 0
for curr in nums:
remain = target - curr
if remain not in seen:
seen[curr] = index
else:
result.append(seen[remain])
result.append(index)
index += 1
return result
| 3.734375 | 4 |
simple_vatic/server/web_app.py | happyharrycn/vatic_fpv | 6 | 12798929 | <gh_stars>1-10
"""A simple web server for video annotation"""
# parsing args
import argparse
# encoding / decoding
import json
# time / logging
import time
#import logging
#import traceback
# flask
import flask
from flask_cors import CORS, cross_origin
import tornado.wsgi
import tornado.httpserver
# database
import sqlite3
# redirect stdout and stderr for logging
import sys
# sys.stdout = open('./web_app.log', 'a', 1)
# sys.stderr = open('./web_app.err', 'a', 1)
import random
import boto.mturk.connection
# Obtain the flask app object (and make it cors)
app = flask.Flask(__name__) # pylint: disable=invalid-name
CORS(app)
# Maximum time allowed for one task
MAX_DELAY = 120
# maximum difference between correct start_time/end_time and verification attempt's start_time/end_time in seconds
TRIM_DIFFERENCE_MAX = 1.0
def dict_factory(cursor, row):
"""Helper function to convert sql item into a dict"""
d = {}
for idx, col in enumerate(cursor.description):
d[col[0]] = row[idx]
return d
def print_log_info(str_info):
"""Helper function for logging info"""
prefix = time.strftime("%Y-%m-%d %H:%M:%S", time.gmtime())
print "{:s} {:s}".format(prefix, str_info)
def collect_db_stats():
"""
Collect DB stats
"""
ant_tasks = app.annotation_tasks
db_cursor = ant_tasks.cursor()
# show us some stats
try:
db_cursor.execute('''SELECT count(*) FROM video_db WHERE named=1''')
num_clips_named = db_cursor.fetchone()['count(*)']
db_cursor.execute('''SELECT count(*) FROM video_db WHERE trimmed=1''')
num_clips_trimmed = db_cursor.fetchone()['count(*)']
db_cursor.execute('''SELECT count(*) FROM video_db
WHERE trim_locked=1 OR name_locked=1''')
num_clips_locked = db_cursor.fetchone()['count(*)']
db_cursor.execute('''SELECT count(*) FROM video_db
WHERE red_flag>=1''')
num_clips_flaged = db_cursor.fetchone()['count(*)']
print_log_info("All Stats: Named {:d}, Trimmed {:d}, flagged {:d}, Locked {:d}".format(
num_clips_named, num_clips_trimmed, num_clips_flaged, num_clips_locked))
except sqlite3.Error as e:
print_log_info(str(e))
return
def approve_assignments():
"""
Periodic callback decides whether assignments pending approval
can be automatically approved and then marks them accordingly
"""
# TODO verify correct verification labels here
# TODO make Mturk login details command line arguments
sandbox_host = 'mechanicalturk.sandbox.amazonaws.com'
real_host = 'mechanicalturk.amazonaws.com'
host = (sandbox_host if app.sandbox else real_host)
mturk = boto.mturk.connection.MTurkConnection(
aws_access_key_id=app.aws_access_key_id,
aws_secret_access_key=app.aws_secret_access_key,
host=host,
debug=1 # debug = 2 prints out all requests.
)
mturk_cur = app.mturk_db_connection.cursor()
db_cursor = app.annotation_tasks.cursor()
try:
# TODO make pending approval a separate table if we think that would be time-efficient
mturk_cur.execute("SELECT assignment_id, hit_id, task FROM hits WHERE status='pending_approval'")
except sqlite3.Error as e:
print_log_info(str(e))
return
query_result = mturk_cur.fetchall()
# We need to loop through every assignment/hit set pending approval
for result in query_result:
assignment_id = str(result["assignment_id"])
hit_id = str(result["hit_id"])
task = str(result["task"])
all_verifications_correct = True
print assignment_id
try:
if task == "name":
mturk_cur.execute("SELECT id, action_noun, action_verb FROM name_verification_attempts WHERE hit_id=?", (hit_id,))
action_query_result = mturk_cur.fetchall()
for attempt_action_set in action_query_result:
db_cursor.execute("SELECT action_noun, action_verb FROM video_db WHERE id=?",
(attempt_action_set['id'],))
verified_action_set = db_cursor.fetchone()
if attempt_action_set['action_verb'] != verified_action_set['action_verb']:
print_log_info("Verification Attempt failed! Attempt had verb "
+ str(attempt_action_set['action_verb'])
+ " but the verified had verb "
+ str(verified_action_set['action_verb']))
all_verifications_correct = False
break
if attempt_action_set['action_noun'] != verified_action_set['action_noun']:
print_log_info("Verification Attempt failed! Attempt had noun "
+ str(attempt_action_set['action_noun'])
+ " but the verified had noun "
+ str(verified_action_set['action_noun']))
all_verifications_correct = False
break
else: # ie. elif task == "trim":
print "trim thing"
mturk_cur.execute("SELECT id, start_time, end_time FROM trim_verification_attempts WHERE hit_id=?", (hit_id,))
times_query_result = mturk_cur.fetchall()
for attempt_times_set in times_query_result:
db_cursor.execute("SELECT start_time, end_time FROM video_db WHERE id=?",
(attempt_times_set['id'],))
verified_times_set = db_cursor.fetchone()
if abs(attempt_times_set['start_time'] - verified_times_set['start_time']) > TRIM_DIFFERENCE_MAX:
print_log_info("Verification Attempt failed! Attempt had start time "
+ str(attempt_times_set['start_time'])
+ " but the verified had start time "
+ str(verified_times_set['start_time']))
all_verifications_correct = False
break
if abs(attempt_times_set['end_time'] - verified_times_set['end_time']) > TRIM_DIFFERENCE_MAX:
print_log_info("Verification Attempt failed! Attempt had end time "
+ str(attempt_times_set['end_time'])
+ " but the verified had end time "
+ str(verified_times_set['end_time']))
all_verifications_correct = False
break
except sqlite3.Error as e:
print_log_info(str(e))
continue
if all_verifications_correct:
# TODO Find out if this needs to be a transaction
print_log_info("Approving assignment " + assignment_id)
try:
response = mturk.approve_assignment(assignment_id)
except boto.mturk.connection.MTurkRequestError as e:
print_log_info("MTurk verification rejected. Typically, this means the client's completion "
+ "has not propagated through Amazon's servers.")
print_log_info(str(e))
query_result = mturk_cur.fetchone()
continue
print_log_info(assignment_id + " approved. Amazon response: " + str(response))
try:
mturk_cur.execute('''UPDATE hits SET status='approved' WHERE hit_id=?''', (hit_id,))
app.mturk_db_connection.commit()
except sqlite3.Error as e:
print_log_info(str(e))
else:
try:
mturk_cur.execute('''UPDATE hits SET status='pending_manual_approval' WHERE hit_id=?''', (hit_id,))
app.mturk_db_connection.commit()
except sqlite3.Error as e:
print_log_info(str(e))
return
def expire_locked_items():
"""
Expires a locked item based on its time stamp
"""
ant_tasks = app.annotation_tasks
db_cursor = ant_tasks.cursor()
# Task: name
db_cursor.execute('''SELECT * FROM video_db WHERE name_locked=1 AND named=0''')
locked_items = db_cursor.fetchall()
for item in locked_items:
delay = time.time() - item['name_lock_time']
if delay > MAX_DELAY:
print_log_info("Expiring task {:d} (Name)".format(item['id']))
try:
db_cursor.execute('''UPDATE video_db SET name_locked=0, name_lock_time=?
WHERE id=?''', (0.0, item['id']))
ant_tasks.commit()
except sqlite3.Error as e:
print_log_info(str(e))
# Task: trim
db_cursor.execute('''SELECT * FROM video_db WHERE trim_locked=1 AND trimmed=0''')
locked_items = db_cursor.fetchall()
for item in locked_items:
delay = time.time() - item['trim_lock_time']
if delay > MAX_DELAY:
print_log_info("Expiring task {:d} (Trim)".format(item['id']))
try:
db_cursor.execute('''UPDATE video_db SET trim_locked=0, trim_lock_time=?
WHERE id=?''', (0.0, item['id']))
ant_tasks.commit()
except sqlite3.Error as e:
print_log_info(str(e))
return
def load_annotation_tasks(video_db):
"""
Wrapper for loading annotations
"""
# id integer primary key,
# url text,
# named integer,
# name_locked integer,
# name_lock_time real,
# named_by_user text,
# occluded integer,
# trimmed integer,
# trim_locked integer,
# trim_lock_time real,
# trimmed_by_user text,
# video_src text
# src_start_time integer,
# src_end_time integer,
# pad_start_frame integer,
# pad_end_frame integer,
# start_time real,
# end_time real,
# action_verb text,
# action_noun text,
# red_flag integer
# Instantiate a connection to db
annotation_tasks = sqlite3.connect(video_db)
annotation_tasks.row_factory = dict_factory
# returns the database
return annotation_tasks
def decide_if_needs_verification(json_res, mturk_db_connection):
"""
Makes the decision as to whether this request is going to be a verification video or not.
Let
a = the verification videos left
b = total number of videos left
The chance of getting a verification videos is a/b
This gives a uniform distribution of chance of getting a verification video across all requests.
Called by get_task().
:param json_res: JSON given by frontend's submit button; must have hitId key
:type json_res: dict
:param mturk_db_connection: connection to database containing mturk-related data
:type mturk_db_connection: sqlite3.Connection
:return boolean representing whether verification video will be returned
"""
print json_res
mturk_cur = mturk_db_connection.cursor()
try:
mturk_cur.execute('''SELECT verifications_total, labels_total,
verifications_completed, labels_completed FROM hits
WHERE hit_id=?''', (json_res['hitId'],))
except sqlite3.Error as e:
print_log_info(str(e))
query_result = mturk_cur.fetchone()
print_log_info(json_res['hitId'])
verifications_total, labels_total, verifications_completed, labels_completed = \
query_result["verifications_total"], query_result["labels_total"], \
query_result["verifications_completed"], query_result["labels_completed"]
chance_of_verification_video = (float(max(verifications_total - verifications_completed, 0))
/ max(verifications_total + labels_total
- verifications_completed - labels_completed, 1))
return chance_of_verification_video > random.random()
def get_verification_task(annotation_tasks, annotation_type):
"""
Wrapper for querying database for a verification task.
:param annotation_tasks: connection to database containing mturk-related data
:type annotation_tasks: sqlite3.Connection
:param annotation_type: client-defined string for the type of the annotations we're doing
:type annotation_type: string
:return dict from querying database
"""
db_cursor = annotation_tasks.cursor()
if annotation_type == 'name' or annotation_type == 'name_preview':
try:
# from https://stackoverflow.com/questions/4114940/select-random-rows-in-sqlite
db_cursor.execute('''SELECT * FROM video_db WHERE id IN
(SELECT id FROM named_verification_videos
ORDER BY RANDOM() LIMIT 1)''')
except sqlite3.Error as e:
print_log_info(str(e))
else:
db_cursor.execute('''SELECT * FROM video_db WHERE id IN
(SELECT id FROM trimmed_verification_videos
ORDER BY RANDOM() LIMIT 1)''')
return db_cursor.fetchone()
def task_completed(json_res, mturk_db_connection):
"""
Tells whether an mturk task has been completed
:param json_res: JSON given by frontend's submit button; must have hitId key
:type json_res: dict
:param mturk_db_connection: connection to database containing mturk-related data
:type mturk_db_connection: sqlite3.Connection
:return: boolean representing if task referred to in json_res' hitId has been completed
"""
mturk_cur = mturk_db_connection.cursor()
try:
mturk_cur.execute('''SELECT verifications_total, labels_total,
verifications_completed, labels_completed FROM hits
WHERE hit_id=?''', (json_res['hitId'],))
except sqlite3.Error as e:
print_log_info(str(e))
query_result = mturk_cur.fetchone()
verifications_total, labels_total, verifications_completed, labels_completed = \
query_result["verifications_total"], query_result["labels_total"], \
query_result["verifications_completed"], query_result["labels_completed"]
return verifications_total - verifications_completed <= 0 and labels_total - labels_completed <= 0
def get_next_available_task(annotation_tasks, annotation_type):
"""
Wrapper for querying database for a new labelling task.
Called by get_task().
:param annotation_tasks: connection to database containing mturk-related data
:type annotation_tasks: sqlite3.Connection
:param annotation_type: client-defined string for the type of the annotations we're doing
:type annotation_type: string
:return dict from querying database
"""
# get db cursor
db_cursor = annotation_tasks.cursor()
# Get the next task
if annotation_type == 'name':
try:
db_cursor.execute('''SELECT * FROM video_db WHERE named=0
AND name_locked=0
AND id not in
(SELECT id from named_verification_videos)
''') # LIMIT 1 maybe?
except sqlite3.Error as e:
print_log_info(str(e))
else: # So annotation_type == 'trim'
try:
db_cursor.execute('''SELECT * FROM video_db WHERE named=1
AND red_flag=0
AND trimmed=0
AND trim_locked=0
AND id not in
(SELECT id from trimmed_verification_videos)
''') # LIMIT 1 maybe?
except sqlite3.Error as e:
print_log_info(str(e))
item = db_cursor.fetchone()
# No task available
if item is None:
return None
# Otherwise return a task.
else:
task = item
cur_time = time.time()
# update the lock
if annotation_type == 'name':
try:
db_cursor.execute('''UPDATE video_db SET name_locked=1, name_lock_time=?
WHERE id=?''', (cur_time, task['id']))
except sqlite3.Error as e:
print_log_info(str(e))
else: # So annotation_type == 'trim'
try:
db_cursor.execute('''UPDATE video_db SET trim_locked=1, trim_lock_time=?
WHERE id=?''', (cur_time, task['id']))
except sqlite3.Error as e:
print_log_info(str(e))
annotation_tasks.commit()
return task
def update_task(mturk_db_connection, annotation_tasks, json_res, is_mturk):
"""
Updates the data for a labelling task plus relevant mturk variables if it's an mturk task.
:param mturk_db_connection: connection to database containing mturk-related data
:type mturk_db_connection: sqlite3.Connection
:param annotation_tasks: connection to database containing mturk-related data
:type annotation_tasks: sqlite3.Connection
:param json_res: JSON given by frontend's submit button; must have hitId key
:type json_res: dict
:param is_mturk: indicates if
:return dict from querying database
"""
# get db cursor
db_cursor = annotation_tasks.cursor()
mturk_cur = mturk_db_connection.cursor()
# get annotation_type and video id
ant_type = json_res['annotation_type']
# Update naming task
if ant_type == 'name':
try:
# Decide if video we are updating is a verification video
db_cursor.execute('''SELECT * FROM named_verification_videos where id=?''',
(json_res['id'],)) # todo find out if is good query
is_verification = not (db_cursor.fetchone() is None)
# Apply new label if it isn't a verification video
if not is_verification:
update_item = (int(json_res['occluded']),
json_res['nouns'], json_res['verb'],
json_res['user_name'], int(json_res['red_flag'])*1,
int(json_res['id']))
db_cursor.execute('''UPDATE video_db
SET named=1, name_locked=0, occluded=?,
action_noun=?, action_verb=?, named_by_user=?, red_flag=?
WHERE id=?''', update_item)
# Update MTurk database to reflect this change
if is_mturk and is_verification:
mturk_cur.execute('''UPDATE hits SET assignment_id=?, worker_id=?,
verifications_completed = verifications_completed + 1
WHERE hit_id=?''', (json_res['assignmentId'],
json_res['workerId'], json_res['hitId']))
mturk_cur.execute('''INSERT INTO name_verification_attempts(
hit_id, assignment_id, worker_id,
id, action_noun, action_verb)
VALUES (?,?,?,?,?,?)''', (json_res['hitId'],
json_res['assignmentId'], json_res['workerId'],
json_res['id'], json_res['nouns'], json_res['verb']))
mturk_db_connection.commit()
elif is_mturk and not is_verification:
print(json_res['assignmentId'],
json_res['workerId'], json_res['hitId'])
mturk_cur.execute('''UPDATE hits SET assignment_id=?, worker_id=?,
labels_completed = labels_completed + 1
WHERE hit_id=?''', (json_res['assignmentId'],
json_res['workerId'], json_res['hitId']))
mturk_db_connection.commit()
annotation_tasks.commit()
except sqlite3.Error as e:
print_log_info(str(e))
return False
else: # ie. it's a trimming task
try:
# Decide if video we are updating is a verification video
db_cursor.execute('''SELECT * FROM trimmed_verification_videos where id=?''',
(json_res['id'],)) # todo find out if is good query
is_verification = not (db_cursor.fetchone() is None)
# Apply new label if it isn't a verification video
if not is_verification:
update_item = (float(json_res['start_time']),
float(json_res['end_time']),
json_res['user_name'], int(json_res['red_flag'])*2,
int(json_res['id']))
db_cursor.execute('''UPDATE video_db
SET trimmed=1, trim_locked=0,
start_time=?, end_time=?, trimmed_by_user=?, red_flag=?
WHERE id=?''', update_item)
# Update MTurk database to reflect this change
if is_mturk and is_verification:
mturk_cur.execute('''UPDATE hits SET assignment_id=?, worker_id=?,
verifications_completed = verifications_completed + 1
WHERE hit_id=?''', (json_res['assignmentId'],
json_res['workerId'], json_res['hitId']))
mturk_cur.execute('''INSERT INTO trim_verification_attempts(
hit_id, assignment_id, worker_id,
id, start_time, end_time)
VALUES (?,?,?,?,?,?)''', (json_res['hitId'],
json_res['assignmentId'], json_res['workerId'],
json_res['id'], float(json_res['start_time']),
float(json_res['end_time'])))
mturk_db_connection.commit()
elif is_mturk and not is_verification:
print(json_res['assignmentId'],
json_res['workerId'], json_res['hitId'])
mturk_cur.execute('''UPDATE hits SET assignment_id=?, worker_id=?,
labels_completed = labels_completed + 1
WHERE hit_id=?''', (json_res['assignmentId'],
json_res['workerId'], json_res['hitId']))
mturk_db_connection.commit()
# TODO update mturk stuff
annotation_tasks.commit()
except sqlite3.Error as e:
print_log_info(str(e))
return False
# color print the red flag
if json_res['red_flag']:
print_log_info('\033[93m' + "Task ID ({:d}) Type ({:s}) has been RED_FLAGGED!".format(
json_res['id'], ant_type) + '\033[0m')
# return
return True
@app.errorhandler(404)
def not_found(error):
"""
Default error handler for 404
"""
return flask.make_response(json.dumps({'error': str(error)}), 404)
@app.route('/get_task', methods=['POST'])
def get_task():
"""
Get a task from the server
A request is a json file with the following fields:
- "annotation_type" which can have the values...
- name
- name_preview
- trim
- trim_preview
- "user_name"
If it is a request from an MTurk iFrame, it also has the following:
- "workerId"
- "hitId"
"""
# Dict holds the results to return to client
ret = {}
# Make sure the content type is json
try:
request_type = flask.request.headers.get('Content-Type')
if request_type != 'application/json':
raise ValueError('request type must be JSON')
request_data = flask.request.get_data()
except ValueError as err:
ret['code'] = -1
ret['error_msg'] = str(err)
return json.dumps(ret)
except:
ret['code'] = -2
ret['error_msg'] = 'unknown parameter error'
return json.dumps(ret)
# Decode json from request data into a dict, and make sure all required data is present
try:
json_file = json.JSONDecoder().decode(request_data)
print_log_info("Task request: {:s}".format(json_file))
is_mturk = "assignmentId" in json_file and "workerId" in json_file and \
"hitId" in json_file
if 'annotation_type' not in json_file:
raise ValueError('annotation_type missing in request')
else:
# more sanity check
ant_type = json_file['annotation_type']
if not ((ant_type == 'name') or (ant_type == 'trim')
or (ant_type == 'name_preview') or (ant_type == 'trim_preview')):
raise ValueError('unknown annotation_type')
except ValueError as err:
ret['code'] = -3
ret['error_msg'] = str(err)
return json.dumps(ret)
# Decide if we need a verification task
if ant_type == 'name_preview' or ant_type == 'trim_preview':
needs_verification_task = True
elif ((ant_type == 'name'
or ant_type == 'trim')
and is_mturk):
needs_verification_task = \
decide_if_needs_verification(json_file, app.mturk_db_connection)
else:
needs_verification_task = False
# Get a verification task or next available task, and return to user
try:
if needs_verification_task:
task = get_verification_task(app.annotation_tasks, ant_type)
else:
task = get_next_available_task(app.annotation_tasks, ant_type)
if not task:
raise ValueError('can not get a valid task. please re-try.')
else:
ret = task
except ValueError as err:
ret['code'] = -1
ret['error_msg'] = str(err)
return json.dumps(ret)
return json.dumps(ret)
@app.route('/return_task', methods=['POST'])
def return_task():
"""
Processes the JSON sent from the client to submit a label
JSON has the following fields:
- id, which is the video ID
- annotation_type, which can be "name" or "trim"
- user_name
If the request is coming from an mturk iFrame, it should have:
- assignmentId
- workerId
- hitId
If annotation_type is "name", it should have the following:
- verb, a string representing the word selected from the dropdown menu
- occluded, a boolean from the checkbox in the page
- nouns, a string filled out by the user for the objects being handled
TODO figure out the trim stuff
"""
# Dict holds the results to return to client
ret = {}
try:
# make sure the content type is json
request_type = flask.request.headers.get('Content-Type')
if request_type != 'application/json':
raise ValueError('request type must be JSON')
request_data = flask.request.get_data()
except ValueError as err:
ret['code'] = -1
ret['error_msg'] = str(err)
return json.dumps(ret)
except:
ret['code'] = -2
ret['error_msg'] = 'unknown parameter error'
return json.dumps(ret)
# decode json from request data into a dict
try:
json_file = json.JSONDecoder().decode(request_data)
print_log_info("Task returned: {:s}".format(json_file))
if 'annotation_type' not in json_file:
raise ValueError('annotation_type missing in request')
if 'id' not in json_file:
raise ValueError('id missing in request')
else:
# more sanity check
ant_type = json_file['annotation_type']
if not ((ant_type == 'name') or (ant_type == 'trim')):
raise ValueError('unknown annotation_type')
except ValueError as err:
ret['code'] = -3
ret['error_msg'] = str(err)
return json.dumps(ret)
is_mturk = "assignmentId" in json_file and "workerId" in json_file and \
"hitId" in json_file
# Get next available task
try:
flag = update_task(app.mturk_db_connection, app.annotation_tasks, json_file, is_mturk)
if not flag:
raise ValueError('can not update the task. Please re-try.')
else:
ret['code'] = 0
ret['error_msg'] = 'success'
except ValueError as err:
ret['code'] = -3
ret['error_msg'] = str(err)
return json.dumps(ret)
more_to_complete = not is_mturk or \
not task_completed(json_file, app.mturk_db_connection)
if not more_to_complete:
try:
mturk_db_connection = app.mturk_db_connection
mturk_cur = mturk_db_connection.cursor()
mturk_cur.execute('''UPDATE hits SET status='pending_approval' WHERE assignment_id=?''',
(json_file["assignmentId"],))
mturk_db_connection.commit()
except sqlite3.Error as err:
ret['code'] = -3
ret['error_msg'] = str(err)
return json.dumps(ret)
ret['more_to_complete'] = more_to_complete
return json.dumps(ret)
@app.route('/hello')
def hello():
return 'hello world'
def parse_args():
"""
Parse input arguments
"""
parser = argparse.ArgumentParser(description='Setup a web server for video annotation')
parser.add_argument('--port', dest='port',
help='which port to serve content on',
default=5050, type=int)
parser.add_argument('--video_db', dest='video_db',
help='SQLite3 database with normal videos',
default='video_db.db', type=str)
parser.add_argument('--mturk_db', dest='mturk_db',
help='SQLite3 database with logs for mturk',
default='mturk_db.db', type=str)
parser.add_argument('--sandbox', dest='sandbox',
help='If this is a sandbox HIT (otherwise is a real one)',
default=False, action='store_true')
parser.add_argument('--aws_key_id', dest='aws_access_key_id',
help='AWS Access Key ID',
default='', type=str)
parser.add_argument('--aws_key', dest='aws_secret_access_key',
help='AWS Secret Access Key',
default='', type=str)
parser.add_argument('--certfile', dest='certfile',
help='SSL certfile location',
default='', type=str)
parser.add_argument('--keyfile', dest='keyfile',
help='SSL keyfile location',
default='', type=str)
args = parser.parse_args()
return args
def start_from_terminal():
"""
entry of the main function
"""
# parse params
args = parse_args()
# load annotation tasks
app.annotation_tasks = load_annotation_tasks(args.video_db)
app.mturk_db_connection = load_annotation_tasks(args.mturk_db)
# Set global variables
app.aws_access_key_id = args.aws_access_key_id
app.aws_secret_access_key = args.aws_secret_access_key
app.sandbox = args.sandbox
# start server without cert if none provided
if args.certfile == '' and args.keyfile == '':
server = tornado.httpserver.HTTPServer(tornado.wsgi.WSGIContainer(app))
else:
server = tornado.httpserver.HTTPServer(tornado.wsgi.WSGIContainer(app), ssl_options={
"certfile": args.certfile,
"keyfile": args.keyfile,
})
server.bind(args.port)
# setup exist function
def save_db():
app.annotation_tasks.close()
app.mturk_db_connection.close()
import atexit
atexit.register(save_db)
# set up one server
server.start(1)
print_log_info("Tornado server starting on port {}".format(args.port))
# show stats every time we launch the service
collect_db_stats()
approve_assignments()
tornado.ioloop.PeriodicCallback(expire_locked_items, 20*1000).start()
tornado.ioloop.PeriodicCallback(collect_db_stats, 3600*1000).start()
tornado.ioloop.PeriodicCallback(approve_assignments, 20*1000).start()
tornado.ioloop.IOLoop.current().start()
if __name__ == '__main__':
start_from_terminal() | 2.5625 | 3 |
test/test_normalizer.py | simonpf/qrnn | 0 | 12798930 | <gh_stars>0
import numpy as np
from quantnn.normalizer import Normalizer, MinMaxNormalizer
def test_normalizer_2d():
"""
Checks that all feature indices that are not excluded have zero
mean and unit std. dev.
"""
x = np.random.normal(size=(100000, 10)) + np.arange(10).reshape(1, -1)
normalizer = Normalizer(x,
exclude_indices=range(1, 10, 2))
x_normed = normalizer(x)
# Included indices should have zero mean and std. dev. 1.0.
assert np.all(np.isclose(x_normed[:, ::2].mean(axis=0),
0.0,
atol=1e-1))
assert np.all(np.isclose(x_normed[:, ::2].std(axis=0),
1.0,
1e-1))
# Excluded indices
assert np.all(np.isclose(x_normed[:, 1::2].mean(axis=0),
np.arange(10)[1::2].reshape(1, -1),
1e-2))
assert np.all(np.isclose(x_normed[:, 1::2].std(axis=0),
1.0,
1e-2))
# Channels without variation should be set to -1.0
x = np.zeros((100, 10))
normalizer = Normalizer(x)
x_normed = normalizer(x)
assert np.all(np.isclose(x_normed, -1.0))
def test_min_max_normalizer_2d():
"""
Checks that all feature indices that are not excluded have zero
mean and unit std. dev.
"""
x = np.random.normal(size=(100000, 11)) + np.arange(11).reshape(1, -1)
normalizer = MinMaxNormalizer(x, exclude_indices=range(1, 10, 2))
x[:, 10] = np.nan
x_normed = normalizer(x)
# Included indices should have minimum value -0.9 and
# maximum value 1.0.
assert np.all(np.isclose(x_normed[:, :10:2].min(axis=0),
-1.0))
assert np.all(np.isclose(x_normed[:, :10:2].max(axis=0),
1.0))
# nan values should be set to -1.0.
assert np.all(np.isclose(x_normed[:, -1], -1.5))
# Channels without variation should be set to -1.0
x = np.zeros((100, 10))
normalizer = MinMaxNormalizer(x)
x_normed = normalizer(x)
assert np.all(np.isclose(x_normed, -1.0))
def test_invert():
"""
Ensure that the inverse function of the Normalizer works as expected.
"""
x = np.random.normal(size=(100000, 10)) + np.arange(10).reshape(1, -1)
normalizer = Normalizer(x, exclude_indices=[0, 1, 2])
x_normed = normalizer(x)
x = normalizer.invert(x_normed)
assert np.all(np.isclose(np.mean(x, axis=0),
np.arange(10, dtype=np.float32),
atol=1e-2))
def test_save_and_load(tmp_path):
"""
Ensure that saved and loaded normalizer yields same results as original.
"""
x = np.random.normal(size=(100000, 10)) + np.arange(10).reshape(1, -1)
normalizer = Normalizer(x,
exclude_indices=range(1, 10, 2))
normalizer.save(tmp_path / "normalizer.pckl")
loaded = Normalizer.load(tmp_path / "normalizer.pckl")
x_normed = normalizer(x)
x_normed_loaded = loaded(x)
assert np.all(np.isclose(x_normed,
x_normed_loaded))
def test_load_sftp(tmp_path):
"""
Ensure that saved and loaded normalizer yields same results as original.
"""
x = np.random.normal(size=(100000, 10)) + np.arange(10).reshape(1, -1)
normalizer = Normalizer(x,
exclude_indices=range(1, 10, 2))
normalizer.save(tmp_path / "normalizer.pckl")
loaded = Normalizer.load(tmp_path / "normalizer.pckl")
x_normed = normalizer(x)
x_normed_loaded = loaded(x)
assert np.all(np.isclose(x_normed,
x_normed_loaded,
rtol=1e-3))
| 2.578125 | 3 |
src/cool_chip/utils/io.py | leoank/cool_chip | 0 | 12798931 | from os import path
from pathlib import Path
def curr_file_path() -> Path:
"""Get cuurent file path."""
return Path(__file__).absolute()
def out_folder_path() -> Path:
"""Get output folder path."""
return curr_file_path().parents[3].joinpath("out").absolute()
def out_geom_path() -> Path:
"""Get output geometry folder path."""
return path.abspath(out_folder_path().joinpath("geometry").absolute())
| 2.453125 | 2 |
oppertions/09.py | mallimuondu/python-practice | 0 | 12798932 | i = 1
while i < 6:
print(i)
if (i == 3):
break
i += 1 | 3.546875 | 4 |
pyarubaoss/anycli.py | HPENetworking/pyarubaoss | 8 | 12798933 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import requests, json, base64
def post_cli(auth, command):
url_cli = "http://" + auth.ipaddr + "/rest/" + auth.version + "/cli"
command_dict = {"cmd": command}
try:
post_command = requests.post(url_cli, headers=auth.cookie, data=json.dumps(command_dict))
cli_response = post_command.json()['result_base64_encoded']
decoded_response = base64.b64decode(cli_response).decode('utf-8')
return decoded_response
except requests.exceptions.RequestException as error:
return "Error:\n" + str(error) + " post_cli: An Error has occurred"
| 2.65625 | 3 |
src/Archive_info_generate_charts.py | adeckert23/soccer-proj | 0 | 12798934 | #Saved information for generating plots:
#-------------------------------------------------------------------------------
# #Figure
# fig.clear()
# fig = plt.figure(figsize=(10, 10))
#Name to appear on each axis for offensive categories
# titles = ['Rating', 'Goals', 'Assists', 'SpG', 'Drb', 'KeyP','PS%',
# 'Crosses', 'Fouled', 'mis_cont','Tackles', 'Inter']
#-------------------------------------------------------------------------------
#Bund Playmakers:
# Sancho = bundesliga_chart_df.loc[1924].values
# Gnabry = bundesliga_chart_df.loc[1716].values
# Brandt = bundesliga_chart_df.loc[1654].values
# Muller = bundesliga_chart_df.loc[1719].values
# Hazard = bundesliga_chart_df.loc[1959].values
# Kostic = bundesliga_chart_df.loc[1677].values
# labels = [np.around(np.linspace(0,10,6),2), np.around(np.linspace(0,19,6),2),
# np.around(np.linspace(0,13,6),2), np.around(np.linspace(0,4.3,6),2),
# np.around(np.linspace(0,3.2,6),2), np.around(np.linspace(0,3,6),2),
# np.around(np.linspace(0,100,6),2), np.around(np.linspace(0,2.7,6),2),
# np.around(np.linspace(0,2.8,6),2), np.around(np.linspace(0,5.8,6),2),
# np.around(np.linspace(0,4,6),2), np.around(np.linspace(0,3,6),2)]
# radar = Radar1(fig, titles, labels)
# radar.plot(Sancho[1:], '-', lw=3, color='#FFFF00', alpha=0.4, label=Sancho[0])
# radar.plot(Gnabry[1:], '-', lw=3, color='r', alpha=0.4, label=Gnabry[0])
# radar.plot(Brandt[1:], '-', lw=3, color='k', alpha=0.4, label=Brandt[0])
# radar.plot(Muller[1:], '-', lw=3, color='m', alpha=0.4, label=Muller[0])
# radar.plot(Hazard[1:], '-', lw=3, color= '#0000FF', alpha=0.4, label=Hazard[0])
# radar.plot(Kostic[1:], '-', lw=3, color='#008080', alpha=0.4, label=Kostic[0])
# radar.ax.legend()
# fig.suptitle('Bundesliga Playmakers', fontsize=16)
# fig.savefig('Bund_Playmakers.png')
#-------------------------------------------------------------------------------
#Premier League Playmakers
# Hazard = Prem_chart_df.loc[285].values
# Eriksen = Prem_chart_df.loc[390].values
# Sane = Prem_chart_df.loc[140].values
# Sterling = Prem_chart_df.loc[144].values
# Salah = Prem_chart_df.loc[429].values
# labels = [np.around(np.linspace(0,10,6),2), np.around(np.linspace(0,19,6),2),
# np.around(np.linspace(0,11,6),2), np.around(np.linspace(0,3.9,6),2),
# np.around(np.linspace(0,3.2,6),2), np.around(np.linspace(0,3,6),2),
# np.around(np.linspace(0,100,6),2), np.around(np.linspace(0,2.3,6),2),
# np.around(np.linspace(0,3,6),2), np.around(np.linspace(0,8.8,6),2),
# np.around(np.linspace(0,4.3,6),2), np.around(np.linspace(0,2.9,6),2)]
# radar = Radar1(fig, titles, labels)
# radar.plot(Hazard[1:], '-', lw=3, color='#FFFF00', alpha=0.4, label=Hazard[0])
# radar.plot(Eriksen[1:], '-', lw=3, color='#000080', alpha=0.4, label=Eriksen[0])
# radar.plot(Sane[1:], '-', lw=3, color='m', alpha=0.4, label=Sane[0])
# radar.plot(Sterling[1:], '-', lw=3, color='#00FFFF', alpha=0.4, label=Sterling[0])
# radar.plot(Salah[1:], '-', lw=3, color= 'r', alpha=0.4, label=Salah[0])
# radar.plot(Pogba[1:], '-', lw=3, color='k', alpha=0.4, label=Pogba[0])
# radar.ax.legend()
# fig.suptitle('Premier League Playmakers', fontsize=16)
# fig.savefig('Prem_Playmakers.png')
#-------------------------------------------------------------------------------
#FIFA PLAYER OF THE YEAR COMPARISON FOLLOW UP
#Messi vs Ronaldo
# Modric = df_chart_df.loc[1504].values
# Ronaldo= df_chart_df.loc[729].values
# Salah = df_chart_df.loc[429].values
# Mbappe = df_chart_df.loc[2343].values
# Messi = df_chart_df.loc[1241].values
# labels = [np.around(np.linspace(0,10,6),2), np.around(np.linspace(0,32,6),2),
# np.around(np.linspace(0,13,6),2), np.around(np.linspace(0,6.1,6),2),
# np.around(np.linspace(0,4.8,6),2), np.around(np.linspace(0,3.2,6),2),
# np.around(np.linspace(0,100,6),2), np.around(np.linspace(0,3,6),2),
# np.around(np.linspace(0,3.2,6),2), np.around(np.linspace(0,8.8,6),2),
# np.around(np.linspace(0,6.5,6),2), np.around(np.linspace(0,3.2,6),2)]
# radar = Radar1(fig, titles, labels)
# radar.plot(Modric[1:], '-', lw=3, color='#000000', alpha=0.4, label=Modric[0])
# radar.plot(Ronaldo[1:], '-', lw=3, color='#800000', alpha=0.4, label=Ronaldo[0])
# radar.plot(Salah[1:], '-', lw=3, color='#FF0000', alpha=0.4, label=Salah[0])
# radar.plot(Mbappe[1:], '-', lw=3, color='#0000FF', alpha=0.4, label=Mbappe[0])
# radar.plot(Messi[1:], '-', lw=3, color='#00FFFF', alpha=0.4, label=Messi[0])
#
# radar.ax.legend()
# fig.suptitle('FIFA Player of the Year follow up', fontsize=16)
# fig.savefig('Fifa_POY.png')
#-------------------------------------------------------------------------------
#Serie A STRIKERS storeyline
# Ronaldo= serie_a_chart.loc[729].values
# Quag = serie_a_chart.loc[1004].values
# Icardi = serie_a_chart.loc[664].values
# PiatekG = serie_a_chart.loc[608].values
# PiatekM = serie_a_chart.loc[961].values
# labels = [np.around(np.linspace(0,10,6),2), np.around(np.linspace(0,21,6),2),
# np.around(np.linspace(0,9,6),2), np.around(np.linspace(0,6.1,6),2),
# np.around(np.linspace(0,2.9,6),2), np.around(np.linspace(0,3.2,6),2),
# np.around(np.linspace(0,100,6),2), np.around(np.linspace(0,2.9,6),2),
# np.around(np.linspace(0,3.2,6),2), np.around(np.linspace(0,6.3,6),2),
# np.around(np.linspace(0,4.2,6),2), np.around(np.linspace(0,3.2,6),2)]
# radar = Radar1(fig, titles, labels)
# radar.plot(Ronaldo[1:], '-', lw=3, color='#FF00FF', alpha=0.4, label=Ronaldo[0])
# radar.plot(Quag[1:], '-', lw=3, color='b', alpha=0.4, label=Quag[0])
# radar.plot(Icardi[1:], '-', lw=3, color='k', alpha=0.4, label=Icardi[0])
# radar.plot(PiatekG[1:], '-', lw=3, color='#00FFFF', alpha=0.4, label=PiatekG[0])
# radar.plot(PiatekM[1:], '-', lw=3, color='r', alpha=0.4, label=PiatekM[0])
#-------------------------------------------------------------------------------
#Piatek on Genoa vs Piatek on Milan
# PiatekG = serie_a_chart.loc[608].values
# PiatekM = serie_a_chart.loc[961].values
#
#labels/linspace from serie_a above
# radar = Radar1(fig, titles, labels)
# radar.plot(PiatekG[1:], '-', lw=3, color='#0000FF', alpha=0.4, label='<NAME>')
# radar.plot(PiatekM[1:], '-', lw=3, color='r', alpha=0.4, label='<NAME>')
#
# radar.ax.legend()
# fig.suptitle('Piatek Before and After the Transfer', fontsize=16)
# fig.savefig('Piatek.png')
#-------------------------------------------------------------------------------
#Identifying young defenders to scout further.
#Under 20 years old, compared to possibly best young CB in the world Varane
# Ndicka = young_center_backs_chart.loc[1676].values
# Zag = young_center_backs_chart.loc[1922].values
# Konate = young_center_backs_chart.loc[1806].values
# Muki = young_center_backs_chart.loc[2267].values
# Bastoni = young_center_backs_chart.loc[779].values
# Varane = young_center_backs_chart.loc[1510].values
# titles = ['Rating', 'AvgP','PS%', 'mis_cont','AerialsWon', 'Tackles', 'Inter',
# 'Fouls', 'Clear', 'Blocks']
#
# labels = [np.around(np.linspace(0,10,6),2), np.around(np.linspace(0,91,6),2),
# np.around(np.linspace(0,100,6),2), np.around(np.linspace(0,8.8,6),2),
# np.around(np.linspace(0,7.8,6),2), np.around(np.linspace(0,4.4,6),2),
# np.around(np.linspace(0,3,6),2), np.around(np.linspace(0,2.7,6),2),
# np.around(np.linspace(0,7.4,6),2), np.around(np.linspace(0,1.6,6),2)]
#
# radar = Radar1(fig, titles, labels)
# radar.plot(Varane[1:], '-', lw=3, color='#00FFFF', alpha=0.4, label=Varane[0])
# radar.plot(Ndicka[1:], '-', lw=3, color='r', alpha=0.4, label=Ndicka[0])
# radar.plot(Zag[1:], '-', lw=3, color='#FFFF00', alpha=0.4, label=Zag[0])
# radar.plot(Konate[1:], '-', lw=3, color='#FF00FF', alpha=0.4, label=Konate[0])
# radar.plot(Muki[1:], '-', lw=3, color='b', alpha=0.4, label=Muki[0])
# radar.plot(Bastoni[1:], '-', lw=3, color='g', alpha=0.4, label=Bastoni[0])
#
# radar.ax.legend()
# fig.suptitle('Young Defenders', fontsize=16)
# fig.savefig('Young_Defenders.png')
#-------------------------------------------------------------------------------
# Robertson = chart_prep.loc[419].values
# Shaw = chart_prep.loc[186].values
# Alonso = chart_prep.loc[291].values
# Rose = chart_prep.loc[391].values
# Kolasinac = chart_prep.loc[514].values
# Mendy = chart_prep.loc[128].values
#
# #Figure
# fig.clear()
#
# fig = plt.figure(figsize=(10, 10))
# #Name to appear
# titles =['Rating', 'Assists', 'Drb','PS%', 'Crosses', 'mis_cont', 'Tackles', 'Inter', 'Fouls', 'Clear', 'Blocks']
# #Numerical labels to be displayed along each axis
# labels = [np.around(np.linspace(0,10,6),2), np.around(np.linspace(0,11,6),2),
# np.around(np.linspace(0,3.2,6),2), np.around(np.linspace(0,100,6),2),
# np.around(np.linspace(0,2.3,6),2), np.around(np.linspace(0,8.8,6),2),
# np.around(np.linspace(0,4.3,6),2), np.around(np.linspace(0,2.9,6),2),
# np.around(np.linspace(0,2.3,6),2), np.around(np.linspace(0,5,6),2),
# np.around(np.linspace(0,1,6),2)]
#
#
# radar = Radar1(fig, titles, labels)
# radar.plot(Robertson[1:], '-', lw=5, color='r', alpha=0.4, label=Robertson[0])
# radar.plot(Shaw[1:], '-', lw=5, color='k', alpha=0.4, label=Shaw[0])
# radar.plot(Alonso[1:], '-', lw=5, color='b', alpha=0.4, label=Alonso[0])
# radar.plot(Rose[1:], '-', lw=5, color='m', alpha=0.4, label=Rose[0])
# radar.plot(Kolasinac[1:], '-', lw=5, color= 'g', alpha=0.4, label=Kolasinac[0])
# radar.plot(Mendy[1:], '-', lw=5, color= 'c', alpha=0.4, label=Mendy[0])
#
# radar.ax.legend()
# fig.suptitle('Premier League LB', fontsize=22)
# fig.savefig('Prem_LB.png')
#-------------------------------------------------------------------------------
# Bale = chart_prep.loc[1496].values
# Benzema = chart_prep.loc[1500].values
# Asensio = chart_prep.loc[1506].values
#
# #Figure
# fig.clear()
#
# fig = plt.figure(figsize=(10, 10))
# #Name to appear
# titles =['Rating', 'Goals', 'Assists', 'SpG', 'Drb', 'KeyP','PS%',
# 'Crosses', 'Fouled', 'mis_cont']
# #Numerical labels to be displayed along each axis
# labels = [np.around(np.linspace(0,10,6),2), np.around(np.linspace(0,32,6),2),
# np.around(np.linspace(0,12,6),2), np.around(np.linspace(0,5.2,6),2),
# np.around(np.linspace(0,4.1,6),2), np.around(np.linspace(0,2.9,6),2),
# np.around(np.linspace(0,100,6),2), np.around(np.linspace(0,3,6),2),
# np.around(np.linspace(0,3.2,6),2), np.around(np.linspace(0,7,6),2)]
#
#
# radar = Radar1(fig, titles, labels)
# radar.plot(Bale[1:], '-', lw=5, color='r', alpha=0.4, label=Bale[0])
# radar.plot(Benzema[1:], '-', lw=5, color='b', alpha=0.4, label=Benzema[0])
# radar.plot(Asensio[1:], '-', lw=5, color='g', alpha=0.4, label=Asensio[0])
#
# radar.ax.legend()
# fig.suptitle('Ronaldos Replacement', fontsize=22)
# fig.savefig('Madrid_front_three.png')
#-------------------------------------------------------------------------------
| 2.28125 | 2 |
fixture/group.py | Budanovvv/pythin_trainig | 1 | 12798935 | from model.group import Group
class GroupHelper:
def __init__(self, app):
self.app = app
def create(self, group):
wd = self.app.wd
self.go_to_group_page()
wd.find_element_by_name("new").click()
self.fill_form_group(group)
# Submit group creation
wd.find_element_by_name("submit").click()
self.back_to_group_page()
self.group_cache = None
def select_group_by_index(self, index):
wd = self.app.wd
wd.find_elements_by_name("selected[]")[index].click()
def select_all_groups(self):
wd = self.app.wd
self.go_to_group_page()
for i in range(len(wd.find_elements_by_name("selected[]"))):
wd.find_elements_by_name("selected[]")[i].click()
def test_delete_all_groups(self):
self.select_all_groups()
self.delete_groups()
def delete_group_by_index(self, index):
self.go_to_group_page()
self.select_group_by_index(index)
self.delete_groups()
self.group_cache = None
def delete_groups(self):
wd = self.app.wd
wd.find_element_by_name("delete").click()
self.back_to_group_page()
def delete_first_group(self):
self.delete_group_by_index(0)
def update_group_by_index(self, index, new_group_data):
wd = self.app.wd
self.go_to_group_page()
self.select_group_by_index(index)
wd.find_element_by_name("edit").click()
self.fill_form_group(new_group_data)
# Submit group update
wd.find_element_by_name("update").click()
self.back_to_group_page()
self.group_cache = None
def update_first_group(self):
self.update_group_by_index(0, new_group_data)
def select_first_group(self):
self.select_group_by_index(0)
def go_to_group_page(self):
wd = self.app.wd
if not (wd.current_url.endswith("/group.php") and
len(wd.find_elements_by_name("new")) > 0):
wd.find_element_by_link_text("groups").click()
def back_to_group_page(self):
wd = self.app.wd
wd.find_element_by_link_text("group page").click()
def change_group_value(self, field_name, text):
wd = self.app.wd
if text is not None:
wd.find_element_by_name(field_name).click()
wd.find_element_by_name(field_name).clear()
wd.find_element_by_name(field_name).send_keys(text)
def fill_form_group(self, group):
self.change_group_value("group_name", group.name)
self.change_group_value("group_header", group.header)
self.change_group_value("group_footer", group.footer)
def count(self):
wd = self.app.wd
self.go_to_group_page()
return len(wd.find_elements_by_name("selected[]"))
group_cache = None
def get_group_list(self):
if self.group_cache is None:
wd = self.app.wd
self.go_to_group_page()
self.group_cache = []
for element in wd.find_elements_by_css_selector("span.group"):
text = element.text
group_id = element.find_element_by_name("selected[]").get_attribute("value")
self.group_cache.append(Group(name=text, id=group_id))
return list(self.group_cache)
| 2.578125 | 3 |
examples/idioms/programs/024.0664-assign-to-string-the-japanese-word-.py | laowantong/paroxython | 31 | 12798936 | """Assign to string the japanese word ネコ.
Declare a new string _s and initialize it with the literal value "ネコ" (which means "cat" in japanese)
Source: programming-idioms.org
"""
# Implementation author: cym13
# Created on 2015-11-30T12:37:27.133314Z
# Last modified on 2015-11-30T12:37:27.133314Z
# Version 1
s = "ネコ"
| 2.78125 | 3 |
workflow/urls.py | ChalkLab/SciFlow | 1 | 12798937 | <reponame>ChalkLab/SciFlow<filename>workflow/urls.py
""" urls for the workflow app """
from django.urls import path
from workflow import views
urlpatterns = [
path('logs', views.logs, name='logs'),
path('logs/<lid>', views.viewlog, name='viewlog'),
]
| 1.851563 | 2 |
005/5.py | dkaisers/Project-Euler | 0 | 12798938 | import math
def sieve(n):
primes = list(range(2, n+1))
i = 0
while i < len(primes):
no = primes[i]
m = 2
while (no * m) <= max(primes):
if primes.count(no * m) > 0:
primes.remove(no * m)
m+=1
i+=1
return primes
def maxPower(n, limit):
i = 1
while math.pow(n, i + 1) <= limit:
i += 1
return i
limit = int(input('Limit: '))
primes = sieve(limit)
s = 1
for x in primes:
print(math.pow(x, maxPower(x, limit)))
s *= math.pow(x, maxPower(x, limit))
print(s)
| 3.90625 | 4 |
saleor/graphql/payment/resolvers.py | valcome-analytics/saleor | 0 | 12798939 | import stripe
from ...payment.gateways.stripe.plugin import StripeGatewayPlugin
from ...plugins import manager
from ... import settings
from ...payment import gateway as payment_gateway, models
from ...payment.utils import fetch_customer_id
from ..utils.filters import filter_by_query_param
PAYMENT_SEARCH_FIELDS = ["id"]
def resolve_client_token(user, gateway: str):
customer_id = fetch_customer_id(user, gateway)
return payment_gateway.get_client_token(gateway, customer_id)
def resolve_payments(info, query):
queryset = models.Payment.objects.all().distinct()
return filter_by_query_param(queryset, query, PAYMENT_SEARCH_FIELDS)
def resolve_payment_meta(payment_intent_id):
stripe_plugin = manager.get_plugins_manager().get_plugin("mirumee.payments.stripe")
if isinstance(stripe_plugin, StripeGatewayPlugin):
return stripe_plugin.get_payment_meta(payment_intent_id)
| 1.851563 | 2 |
sorting.py | joaovitorle/AnotacoesCoursera2 | 0 | 12798940 | print('='*15,'#1','='*15)
#Sort function - Vai organizar do menos para o maior valor em lista
L1 = [1, 7, 4, -2, 3]
L2 = ["Cherry", "Apple", "Blueberry"]
L1.sort()
print(L1)
L2.sort()
print(L2)
print('='*15,'#2','='*15)
#Sorted
L2= ["Cherry", "Apple", "Blueberry"]
L3= sorted(L2)
print(L3)
print(sorted(L2))
print(L2)
print("--------")
L2.sort()
print(L2)
print(L2.sort())
print('='*15,'#3','='*15)
#Optional reverse parameter
L2 = ["Cherry", "Apple", "Blueberry"]
print(sorted(L2,reverse = True))
print('='*15,'#4','='*15)
#Optional key parameters
L1 = [1, 7, 4, -2, 3]
def absolute(x):
if x >= 0:
return x
else:
return -x
print(absolute(3))
print(absolute(-119))
for y in L1:
print(absolute(y))
print('='*15,'#5','='*15)
#Optional key parameters
L1 = [1, 7, 4, -2, 3]
def absolute(x):
if x>=0:
return x
else:
return -x
L2 = sorted(L1, key=absolute ) #A "key" está falando "Eu quero que você organize de acordo com ... (irá organizar de acordo com os comandos dados depois do igual)"
print(L2)
#or in reverse order
print(sorted(L1, reverse = True, key = absolute))
print('='*15,'#6','='*15)
#Optional key parameters
L1 = [1, 7, 4, -2, 3]
def absolute(x):
print('--- figuring out what to write on the post-it note for ' +str(x))
if x >=0:
return x
else:
return -x
print("About to call sorted")
L2= sorted(L1, key=absolute)
print("Finish execution of sorted")
print(L2)
print('='*15,'#7','='*15)
#Sorting a dictionary
L = ['E', 'F', 'B', 'A', 'D', 'I', 'I', 'C', 'B', 'A', 'D', 'D', 'E', 'D']
d = {}
for x in L:
if x in d:
d[x] = d[x] +1 # "x" will be key and x will be insert in d and "d[x] is the value, so we gonna sum 1 to the x if this has already seen before
else:
d[x] = 1 #If x is not in d, so increment x as 1
for x in sorted(d.keys(), key=lambda k: d[k]): #The "x" is gonna make a comparation into the sorted
#(Organized by the highest value to lowest) choosing just the keys from d (ex: E, F, B etc)
# So, we gonna call the "key" (We gonna sorting sth after the key), we call the 'function' Lambda
# The variable for lambda is k and we gonna organize by the d[k] who is the value from the keys,
# so the lowest key value will be organized to be the first.
print(f'{x} appears {d[x]} times')
print('='*15,'#8','='*15)
#Breaking Ties: Second Sorting
tups = [("A", 3, 2),
("C", 1, 4),
("B", 3, 1),
("A", 2, 4),
("C", 1, 2)]
for tup in sorted(tups):
print(tup)
print('='*15,'#9','='*15)
#Breaking Ties: Second Sorting
fruits = ['peach', 'kiwi', 'apple', 'blueberry', 'papaya', 'mango', 'pear']
new_order = sorted(fruits, key = lambda fruit_name: (len(fruit_name), fruit_name))
for fruit in new_order:
print (fruit)
print('='*15,'#10','='*15)
#When to use a Lambda Expression - A lambda expression is short and simple use, when things get complicated use a function
states = {"Minnesota": ['St.Paul', 'Minneapolis', 'Saint Cloud', 'Stillwater'],
"Michigan":['Ann Arbor', 'Traverse City', 'Lansing', 'Kalamazoo'],
"Washington":["Seatle", "Tacoma", "Olympia", "Vancouver"]}
def s_cities_count(cities_list):
#return a count of how many cities begin with "S"
ct = 0
for city in cities_list:
if city[0] == 'S':
ct = ct +1
return ct
def s_cities_count_for_state(state):
cities_list = states[state]
return s_cities_count(cities_list)
print(sorted(states,key=s_cities_count_for_state)) | 4.28125 | 4 |
scripts/vista_pallet.py | agrc/vista | 0 | 12798941 | <reponame>agrc/vista
#!/usr/bin/env python
# * coding: utf8 *
'''
vista_pallet.py
A module that contains a forklift pallet definition for the vista project.
'''
from forklift.models import Pallet
from os.path import join
class VistaPallet(Pallet):
def __init__(self):
super(VistaPallet, self).__init__()
self.arcgis_services = [('Vista', 'MapServer')]
self.sgid = join(self.garage, 'SGID.sde')
self.political = join(self.staging_rack, 'political.gdb')
self.copy_data = [self.political]
def build(self, config):
self.add_crates(['VistaBallotAreas',
'VistaBallotAreas_Proposed'],
{'source_workspace': self.sgid,
'destination_workspace': self.political})
| 1.953125 | 2 |
python_exercises/22TrocaDeCartas.py | Matheus-IT/lang-python-related | 0 | 12798942 | from os import system
def ler_qtd(n, msg):
n = int(input(msg))
while (n < 1) or (n > 10000):
n = int(input(f' - Entrada invalida!{msg}'))
return n
def preencher_set_cartas(cartas, qtd, p):
""" set de cartas, qtd de cartas, p de pessoa """
from time import sleep
print() #Pular linha
for cont in range(qtd):
carta = int(input(f' - Digite a {cont+1} carta de {p}: '))
while (carta < 1) or (carta > 100000):
carta = int(input(f' - \033[1;31mEntrada invalida!\033[m Digite a {cont+1} carta de {p}: '))
cartas.append(carta)
print(' - OK!')
sleep(1) #Espera 1s
def retirar_repetidos(lista):
l = list()
for cont in range(len(lista)):
if lista[cont] not in l:
l.append(lista[cont])
return l
def qtd_trocas(cartasA, cartasB):
inter_a = list()
inter_b = list()
for i in range(len(cartasA)):
for j in range(len(cartasB)):
if cartasA[i] == cartasB[j]:
break
elif (j == len(cartasB)-1):
inter_a.append(cartasA[i])
inter_a = retirar_repetidos(inter_a) #Pego o conjunto interseccao de a e tiro os repetidos
for i in range(len(cartasB)):
for j in range(len(cartasA)):
if cartasB[i] == cartasA[j]:
break
elif (j == len(cartasA)-1):
inter_b.append(cartasB[i])
inter_b = retirar_repetidos(inter_b) #Pego o conjunto interseccao de b e tiro os repetidos
menor = inter_a if len(inter_a) < len(inter_b) else inter_b
return len(menor)
#Programa principal
qa = 0
a = list() #Set cartas Alice
qb = 0
b = list() #Set cartas Beatriz
system('cls')
print('{:=^50}'.format(' TROCA DE CARTAS POKEMON '))
qa = ler_qtd(qa, ' Quantas cartas Alice possui? ')
qb = ler_qtd(qb, ' Quantas cartas Beatriz possui? ')
preencher_set_cartas(a, qa, 'Alice')
preencher_set_cartas(b, qb, 'Beatriz')
print(sorted(a))
print(sorted(b))
maximo_trocas = qtd_trocas(a, b)
print(f' - Maximo de trocas e igual a {maximo_trocas}')
| 3.390625 | 3 |
wpcv/utils/data_aug/det_aug/pil_aug.py | Peiiii/wpcv | 0 | 12798943 | import numpy as np
import random
import numbers
import cv2
from PIL import Image
import wpcv
from wpcv.utils.ops import pil_ops, polygon_ops
from wpcv.utils.data_aug.base import Compose, Zip
from wpcv.utils.data_aug import img_aug
class ToPILImage(object):
def __init__(self):
self.to = img_aug.ToPILImage()
def __call__(self, img, *args):
if len(args):
return (self.to(img), *args)
else:
return self.to(img)
class BboxesToPoints(object):
def __call__(self, img, bboxes):
points = np.array(bboxes).reshape((-1, 2, 2))
return img, points
class PointsToBboxes(object):
def __call__(self, img, points):
bboxes = np.array(points).reshape((-1, 4))
return img, bboxes
class Reshape(object):
def __init__(self, shape):
self.target_shape = shape
def __call__(self, x):
return np.array(x).reshape(self.target_shape)
class Limitsize(object):
def __init__(self, maxsize):
limit = maxsize
if isinstance(limit, (tuple, list, set,)):
mw, mh = limit
else:
mw = mh = limit
self.size = (mw, mh)
def __call__(self, img, points):
mw, mh = self.size
w, h = img.size
rw = w / mw
rh = h / mh
r = max(rw, rh)
if r > 1:
nw, nh = int(w / r), int(h / r)
img = pil_ops.resize(img, (nw, nh))
points = polygon_ops.scale(points, 1 / r)
return img, points
class Scale(object):
def __init__(self, scales):
if isinstance(scales, (tuple, list)):
scaleX, scaleY = scales
else:
scaleX = scaleY = scales
self.scaleX, self.scaleY = scaleX, scaleY
def __call__(self, img, points):
scaleX, scaleY = self.scaleX, self.scaleY
img = pil_ops.scale(img, (scaleX, scaleY))
points = polygon_ops.scale(points, (scaleX, scaleY))
return img, points
class Resize(object):
def __init__(self, size, keep_ratio=False, fillcolor='black'):
self.size = size
self.keep_ratio = keep_ratio
self.fillcolor = fillcolor
def __call__(self, img, points):
w, h = img.size
tw, th = self.size
if not self.keep_ratio:
scaleX, scaleY = tw / w, th / h
img = pil_ops.resize(img, self.size)
points = polygon_ops.scale(points, (scaleX, scaleY))
else:
if self.fillcolor is 'random':
fillcolor = tuple(np.random.choice(range(256), size=3))
else:
fillcolor = self.fillcolor
img = pil_ops.resize_keep_ratio(img, self.size, fillcolor=fillcolor)
rx = w / tw
ry = h / th
r = max(rx, ry)
nw = w / r
nh = h / r
dw = (tw - nw) // 2
dh = (th - nh) // 2
points = polygon_ops.scale(points, 1 / r)
points = polygon_ops.translate(points, (dw, dh))
return img, points
class RandomHorizontalFlip(object):
def __init__(self, p=0.5):
self.p = p
def __call__(self, img, points):
imw, imh = img.size
if random.random() < self.p:
img = pil_ops.hflip(img)
points = [polygon_ops.hflip(pnts, imw) for pnts in points]
return img, points
def __repr__(self):
return self.__class__.__name__ + '(p={})'.format(self.p)
class RandomVerticalFlip(object):
def __init__(self, p=0.5):
self.p = p
def __call__(self, img, points):
imw, imh = img.size
if random.random() < self.p:
img = pil_ops.vflip(img)
points = [polygon_ops.vflip(pnts, imh) for pnts in points]
return img, points
def __repr__(self):
return self.__class__.__name__ + '(p={})'.format(self.p)
class RandomTranslate(object):
def __init__(self, max_offset=None, fillcolor='black'):
if max_offset is not None and len(max_offset) == 2:
mx, my = max_offset
max_offset = [-mx, -my, mx, my]
self.max_offset = max_offset
self.fillcolor = fillcolor
def __call__(self, img, points):
if self.fillcolor is 'random':
fillcolor = tuple(np.random.choice(range(256), size=3))
else:
fillcolor = self.fillcolor
rang = polygon_ops.get_translate_range(points, img.size)
if self.max_offset:
def limit_box(box, limits=None):
if limits is None: return box
if len(limits) == 2:
ml, mt = 0, 0
mr, mb = limits
else:
assert len(limits) == 4
ml, mt, mr, mb = limits
l, t, r, b = box
l = max(ml, l)
t = max(mt, t)
r = min(mr, r)
b = min(mb, b)
if l > r:
return None
if t > b: return None
return [l, t, r, b]
rang = limit_box(rang, self.max_offset)
if rang is None:
return img, points
ofx = random.randint(rang[0], rang[2])
ofy = random.randint(rang[1], rang[3])
img = pil_ops.translate(img, offset=(ofx, ofy), fillcolor=fillcolor)
points = [polygon_ops.translate(pnts, (ofx, ofy)) for pnts in points]
return img, points
class RandomRotate(object):
def __init__(self, degree, expand=True, fillcolor='black'):
self.degree = degree if not isinstance(degree, numbers.Number) else [-degree, degree]
self.expand = expand
self.fillcolor = fillcolor
def __call__(self, img, points):
if self.fillcolor is 'random':
fillcolor = tuple(np.random.choice(range(256), size=3))
else:
fillcolor = self.fillcolor
degree = random.random() * (self.degree[1] - self.degree[0]) + self.degree[0]
w, h = img.size
img = pil_ops.rotate(img, degree, expand=self.expand, fillcolor=fillcolor)
points = [polygon_ops.rotate(pnts, degree, (w // 2, h // 2), img_size=(w, h), expand=self.expand) for pnts in
points]
return img, points
class RandomShearX(object):
def __init__(self, degree):
self.degree = degree if not isinstance(degree, numbers.Number) else [-degree, degree]
def __call__(self, img, points):
degree = random.random() * (self.degree[1] - self.degree[0]) + self.degree[0]
w, h = img.size
img = pil_ops.shear_x(img, degree)
points = [polygon_ops.shear_x(pnts, degree, img_size=(w, h), expand=True) for pnts in points]
return img, points
class RandomShearY(object):
def __init__(self, degree):
self.degree = degree if not isinstance(degree, numbers.Number) else [-degree, degree]
def __call__(self, img, points):
degree = random.random() * (self.degree[1] - self.degree[0]) + self.degree[0]
w, h = img.size
img = pil_ops.shear_y(img, degree)
points = [polygon_ops.shear_y(pnts, degree, img_size=(w, h), expand=True) for pnts in points]
return img, points
class RandomShear(object):
def __init__(self, xdegree, ydegree=None, fillcolor='balck'):
def get_param(param, defualt=None):
if param is None: return defualt
return param if not isinstance(param, numbers.Number) else [-param, param]
self.xdegree = get_param(xdegree)
self.ydegree = get_param(ydegree)
self.fillcolor = fillcolor
def __call__(self, img, points):
if self.xdegree:
if self.fillcolor is 'random':
fillcolor = tuple(np.random.choice(range(256), size=3))
else:
fillcolor = self.fillcolor
degree = random.random() * (self.xdegree[1] - self.xdegree[0]) + self.xdegree[0]
w, h = img.size
img = pil_ops.shear_x(img, degree, fillcolor=fillcolor)
points = [polygon_ops.shear_x(pnts, degree, img_size=(w, h), expand=True) for pnts in points]
if self.ydegree:
if self.fillcolor is 'random':
fillcolor = tuple(np.random.choice(range(256), size=3))
else:
fillcolor = self.fillcolor
degree = random.random() * (self.ydegree[1] - self.ydegree[0]) + self.ydegree[0]
w, h = img.size
img = pil_ops.shear_y(img, degree, fillcolor=fillcolor)
points = [polygon_ops.shear_y(pnts, degree, img_size=(w, h), expand=True) for pnts in points]
return img, points
# class RandomPerspective:
| 2.40625 | 2 |
kvtest.py | termistotel/kvTest | 0 | 12798944 | <gh_stars>0
import kivy
kivy.require('1.10.0') # replace with your current kivy version !
from kivy.app import App
from kivy.uix.label import Label
from kivy.uix.boxlayout import BoxLayout
from kivy.uix.button import Button
from kivy.uix.textinput import TextInput
from kivy.uix.behaviors.codenavigation import CodeNavigationBehavior
from kivy.properties import ObjectProperty
from kivy.lang.builder import Builder
from kivy.lang.parser import ParserException
def kompajliraj(inp,outp,error):
try:
tmp = Builder.load_string(inp.text)
if not tmp:
error.text = "Nema nista napisano"
except AttributeError:
tmp = None
error.text = "Error: Krivi KV kod"
except ParserException:
tmp = None
error.text = "Error: ParserException"
outp.clear_widgets()
if tmp:
outp.add_widget(tmp)
error.text = "Nema greske"
class ErrorOut(Label):
pass
class ReloadButton(Button):
def on_press(self):
kompajliraj(self.textRead,self.textWrite,self.errorWrite)
class KVinputText(CodeNavigationBehavior,TextInput):
pass
class KVoutput(BoxLayout):
pass
class MainBox(BoxLayout):
pass
class KvtestApp(App):
def build(self):
mainbox = MainBox(orientation="vertical")
return mainbox
if __name__ == '__main__':
KvtestApp().run()
| 2.484375 | 2 |
netapp/santricity/models/symbol/ib_ioc_profile.py | NetApp/santricity-webapi-pythonsdk | 5 | 12798945 | # coding: utf-8
"""
IbIocProfile.py
The Clear BSD License
Copyright (c) – 2016, NetApp, Inc. All rights reserved.
Redistribution and use in source and binary forms, with or without modification, are permitted (subject to the limitations in the disclaimer below) provided that the following conditions are met:
* Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution.
* Neither the name of NetApp, Inc. nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission.
NO EXPRESS OR IMPLIED LICENSES TO ANY PARTY'S PATENT RIGHTS ARE GRANTED BY THIS LICENSE. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""
from pprint import pformat
from six import iteritems
class IbIocProfile(object):
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
def __init__(self):
"""
IbIocProfile - a model defined in Swagger
:param dict swaggerTypes: The key is attribute name
and the value is attribute type.
:param dict attributeMap: The key is attribute name
and the value is json key in definition.
"""
self.swagger_types = {
'ioc_guid': 'str', # (required parameter)
'vendor_id': 'str', # (required parameter)
'io_device_id': 'int', # (required parameter)
'device_version': 'int', # (required parameter)
'subsystem_vendor_id': 'str', # (required parameter)
'subsystem_id': 'int', # (required parameter)
'io_class': 'int', # (required parameter)
'io_subclass': 'int', # (required parameter)
'protocol': 'int', # (required parameter)
'protocol_version': 'int', # (required parameter)
'send_message_queue_depth': 'int', # (required parameter)
'rdma_read_queue_depth': 'int', # (required parameter)
'send_message_size': 'int', # (required parameter)
'rdma_transfer_size': 'int', # (required parameter)
'controller_ops_capability_mask': 'int', # (required parameter)
'service_entries': 'int', # (required parameter)
'id_string': 'str'
}
self.attribute_map = {
'ioc_guid': 'iocGuid', # (required parameter)
'vendor_id': 'vendorId', # (required parameter)
'io_device_id': 'ioDeviceId', # (required parameter)
'device_version': 'deviceVersion', # (required parameter)
'subsystem_vendor_id': 'subsystemVendorId', # (required parameter)
'subsystem_id': 'subsystemId', # (required parameter)
'io_class': 'ioClass', # (required parameter)
'io_subclass': 'ioSubclass', # (required parameter)
'protocol': 'protocol', # (required parameter)
'protocol_version': 'protocolVersion', # (required parameter)
'send_message_queue_depth': 'sendMessageQueueDepth', # (required parameter)
'rdma_read_queue_depth': 'rdmaReadQueueDepth', # (required parameter)
'send_message_size': 'sendMessageSize', # (required parameter)
'rdma_transfer_size': 'rdmaTransferSize', # (required parameter)
'controller_ops_capability_mask': 'controllerOpsCapabilityMask', # (required parameter)
'service_entries': 'serviceEntries', # (required parameter)
'id_string': 'idString'
}
self._ioc_guid = None
self._vendor_id = None
self._io_device_id = None
self._device_version = None
self._subsystem_vendor_id = None
self._subsystem_id = None
self._io_class = None
self._io_subclass = None
self._protocol = None
self._protocol_version = None
self._send_message_queue_depth = None
self._rdma_read_queue_depth = None
self._send_message_size = None
self._rdma_transfer_size = None
self._controller_ops_capability_mask = None
self._service_entries = None
self._id_string = None
@property
def ioc_guid(self):
"""
Gets the ioc_guid of this IbIocProfile.
The EUI-64 GUID used to uniquely identify the I/O controller.
:return: The ioc_guid of this IbIocProfile.
:rtype: str
:required/optional: required
"""
return self._ioc_guid
@ioc_guid.setter
def ioc_guid(self, ioc_guid):
"""
Sets the ioc_guid of this IbIocProfile.
The EUI-64 GUID used to uniquely identify the I/O controller.
:param ioc_guid: The ioc_guid of this IbIocProfile.
:type: str
"""
self._ioc_guid = ioc_guid
@property
def vendor_id(self):
"""
Gets the vendor_id of this IbIocProfile.
The I/O controller vendor ID in IEEE format.
:return: The vendor_id of this IbIocProfile.
:rtype: str
:required/optional: required
"""
return self._vendor_id
@vendor_id.setter
def vendor_id(self, vendor_id):
"""
Sets the vendor_id of this IbIocProfile.
The I/O controller vendor ID in IEEE format.
:param vendor_id: The vendor_id of this IbIocProfile.
:type: str
"""
self._vendor_id = vendor_id
@property
def io_device_id(self):
"""
Gets the io_device_id of this IbIocProfile.
A number assigned by vendor to identify the type of I/O controller
:return: The io_device_id of this IbIocProfile.
:rtype: int
:required/optional: required
"""
return self._io_device_id
@io_device_id.setter
def io_device_id(self, io_device_id):
"""
Sets the io_device_id of this IbIocProfile.
A number assigned by vendor to identify the type of I/O controller
:param io_device_id: The io_device_id of this IbIocProfile.
:type: int
"""
self._io_device_id = io_device_id
@property
def device_version(self):
"""
Gets the device_version of this IbIocProfile.
A number assigned by the vendor to identify the device version.
:return: The device_version of this IbIocProfile.
:rtype: int
:required/optional: required
"""
return self._device_version
@device_version.setter
def device_version(self, device_version):
"""
Sets the device_version of this IbIocProfile.
A number assigned by the vendor to identify the device version.
:param device_version: The device_version of this IbIocProfile.
:type: int
"""
self._device_version = device_version
@property
def subsystem_vendor_id(self):
"""
Gets the subsystem_vendor_id of this IbIocProfile.
The ID of the enclosure vendor in IEEE format, or else all zeros if there is no vendor ID.
:return: The subsystem_vendor_id of this IbIocProfile.
:rtype: str
:required/optional: required
"""
return self._subsystem_vendor_id
@subsystem_vendor_id.setter
def subsystem_vendor_id(self, subsystem_vendor_id):
"""
Sets the subsystem_vendor_id of this IbIocProfile.
The ID of the enclosure vendor in IEEE format, or else all zeros if there is no vendor ID.
:param subsystem_vendor_id: The subsystem_vendor_id of this IbIocProfile.
:type: str
"""
self._subsystem_vendor_id = subsystem_vendor_id
@property
def subsystem_id(self):
"""
Gets the subsystem_id of this IbIocProfile.
A number identifying the subsystem where the I/O controller resides.
:return: The subsystem_id of this IbIocProfile.
:rtype: int
:required/optional: required
"""
return self._subsystem_id
@subsystem_id.setter
def subsystem_id(self, subsystem_id):
"""
Sets the subsystem_id of this IbIocProfile.
A number identifying the subsystem where the I/O controller resides.
:param subsystem_id: The subsystem_id of this IbIocProfile.
:type: int
"""
self._subsystem_id = subsystem_id
@property
def io_class(self):
"""
Gets the io_class of this IbIocProfile.
The I/O class of the controller. 0x0000 -0xFFFE is reserved for I/O classes encompassed by the InfiniBand architecture. 0xFFFF is vendor-specific.
:return: The io_class of this IbIocProfile.
:rtype: int
:required/optional: required
"""
return self._io_class
@io_class.setter
def io_class(self, io_class):
"""
Sets the io_class of this IbIocProfile.
The I/O class of the controller. 0x0000 -0xFFFE is reserved for I/O classes encompassed by the InfiniBand architecture. 0xFFFF is vendor-specific.
:param io_class: The io_class of this IbIocProfile.
:type: int
"""
self._io_class = io_class
@property
def io_subclass(self):
"""
Gets the io_subclass of this IbIocProfile.
The I/O sub-class of the controller. 0x0000 -0xFFFE is reserved for I/O sub-classes encompassed by the InfiniBand architecture. 0xFFFF is vendor-specific.
:return: The io_subclass of this IbIocProfile.
:rtype: int
:required/optional: required
"""
return self._io_subclass
@io_subclass.setter
def io_subclass(self, io_subclass):
"""
Sets the io_subclass of this IbIocProfile.
The I/O sub-class of the controller. 0x0000 -0xFFFE is reserved for I/O sub-classes encompassed by the InfiniBand architecture. 0xFFFF is vendor-specific.
:param io_subclass: The io_subclass of this IbIocProfile.
:type: int
"""
self._io_subclass = io_subclass
@property
def protocol(self):
"""
Gets the protocol of this IbIocProfile.
The I/O protocol of the controller. 0x0000 -0xFFFE is reserved for I/O protocols encompassed by the InfiniBand architecture. 0xFFFF is vendor-specific.
:return: The protocol of this IbIocProfile.
:rtype: int
:required/optional: required
"""
return self._protocol
@protocol.setter
def protocol(self, protocol):
"""
Sets the protocol of this IbIocProfile.
The I/O protocol of the controller. 0x0000 -0xFFFE is reserved for I/O protocols encompassed by the InfiniBand architecture. 0xFFFF is vendor-specific.
:param protocol: The protocol of this IbIocProfile.
:type: int
"""
self._protocol = protocol
@property
def protocol_version(self):
"""
Gets the protocol_version of this IbIocProfile.
The protocol version (protocol-specific).
:return: The protocol_version of this IbIocProfile.
:rtype: int
:required/optional: required
"""
return self._protocol_version
@protocol_version.setter
def protocol_version(self, protocol_version):
"""
Sets the protocol_version of this IbIocProfile.
The protocol version (protocol-specific).
:param protocol_version: The protocol_version of this IbIocProfile.
:type: int
"""
self._protocol_version = protocol_version
@property
def send_message_queue_depth(self):
"""
Gets the send_message_queue_depth of this IbIocProfile.
The maximum depth of the Send Message Queue.
:return: The send_message_queue_depth of this IbIocProfile.
:rtype: int
:required/optional: required
"""
return self._send_message_queue_depth
@send_message_queue_depth.setter
def send_message_queue_depth(self, send_message_queue_depth):
"""
Sets the send_message_queue_depth of this IbIocProfile.
The maximum depth of the Send Message Queue.
:param send_message_queue_depth: The send_message_queue_depth of this IbIocProfile.
:type: int
"""
self._send_message_queue_depth = send_message_queue_depth
@property
def rdma_read_queue_depth(self):
"""
Gets the rdma_read_queue_depth of this IbIocProfile.
The maximum depth of the per-channel RDMA Read Queue
:return: The rdma_read_queue_depth of this IbIocProfile.
:rtype: int
:required/optional: required
"""
return self._rdma_read_queue_depth
@rdma_read_queue_depth.setter
def rdma_read_queue_depth(self, rdma_read_queue_depth):
"""
Sets the rdma_read_queue_depth of this IbIocProfile.
The maximum depth of the per-channel RDMA Read Queue
:param rdma_read_queue_depth: The rdma_read_queue_depth of this IbIocProfile.
:type: int
"""
self._rdma_read_queue_depth = rdma_read_queue_depth
@property
def send_message_size(self):
"""
Gets the send_message_size of this IbIocProfile.
The maximum size of Send Messages in bytes.
:return: The send_message_size of this IbIocProfile.
:rtype: int
:required/optional: required
"""
return self._send_message_size
@send_message_size.setter
def send_message_size(self, send_message_size):
"""
Sets the send_message_size of this IbIocProfile.
The maximum size of Send Messages in bytes.
:param send_message_size: The send_message_size of this IbIocProfile.
:type: int
"""
self._send_message_size = send_message_size
@property
def rdma_transfer_size(self):
"""
Gets the rdma_transfer_size of this IbIocProfile.
The maximum size of outbound RDMA transfers initiated by the controller.
:return: The rdma_transfer_size of this IbIocProfile.
:rtype: int
:required/optional: required
"""
return self._rdma_transfer_size
@rdma_transfer_size.setter
def rdma_transfer_size(self, rdma_transfer_size):
"""
Sets the rdma_transfer_size of this IbIocProfile.
The maximum size of outbound RDMA transfers initiated by the controller.
:param rdma_transfer_size: The rdma_transfer_size of this IbIocProfile.
:type: int
"""
self._rdma_transfer_size = rdma_transfer_size
@property
def controller_ops_capability_mask(self):
"""
Gets the controller_ops_capability_mask of this IbIocProfile.
Supported operation types of this controller.: Bit 0 on = Send Messages to IOCs Bit 1 on = Send Messages from IOCs Bit 2 on = RDMA Read Requests to IOCs Bit 3 on = RDMA Read Requests from IOCs Bit 4 on = RDMA Write Requests to IOCs Bit 5 on = RDMA Write Requests from IOCs Bit 6 on = Atomic operations to IOCs Bit 7 on = Atomic operations from IOCs
:return: The controller_ops_capability_mask of this IbIocProfile.
:rtype: int
:required/optional: required
"""
return self._controller_ops_capability_mask
@controller_ops_capability_mask.setter
def controller_ops_capability_mask(self, controller_ops_capability_mask):
"""
Sets the controller_ops_capability_mask of this IbIocProfile.
Supported operation types of this controller.: Bit 0 on = Send Messages to IOCs Bit 1 on = Send Messages from IOCs Bit 2 on = RDMA Read Requests to IOCs Bit 3 on = RDMA Read Requests from IOCs Bit 4 on = RDMA Write Requests to IOCs Bit 5 on = RDMA Write Requests from IOCs Bit 6 on = Atomic operations to IOCs Bit 7 on = Atomic operations from IOCs
:param controller_ops_capability_mask: The controller_ops_capability_mask of this IbIocProfile.
:type: int
"""
self._controller_ops_capability_mask = controller_ops_capability_mask
@property
def service_entries(self):
"""
Gets the service_entries of this IbIocProfile.
The number of entries in the service entries table
:return: The service_entries of this IbIocProfile.
:rtype: int
:required/optional: required
"""
return self._service_entries
@service_entries.setter
def service_entries(self, service_entries):
"""
Sets the service_entries of this IbIocProfile.
The number of entries in the service entries table
:param service_entries: The service_entries of this IbIocProfile.
:type: int
"""
self._service_entries = service_entries
@property
def id_string(self):
"""
Gets the id_string of this IbIocProfile.
A UTF-8 encoded string for identifying the controller to user.
:return: The id_string of this IbIocProfile.
:rtype: str
:required/optional: required
"""
return self._id_string
@id_string.setter
def id_string(self, id_string):
"""
Sets the id_string of this IbIocProfile.
A UTF-8 encoded string for identifying the controller to user.
:param id_string: The id_string of this IbIocProfile.
:type: str
"""
self._id_string = id_string
def to_dict(self):
"""
Returns the model properties as a dict
"""
result = {}
for attr, _ in iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""
Returns the string representation of the model
"""
return pformat(self.to_dict())
def __repr__(self):
"""
For `print` and `pprint`
"""
if self is None:
return None
return self.to_str()
def __eq__(self, other):
"""
Returns true if both objects are equal
"""
if self is None or other is None:
return None
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""
Returns true if both objects are not equal
"""
return not self == other
| 1.382813 | 1 |
yandex_tracker_client/uriutils.py | TurboKach/yandex_tracker_client | 17 | 12798946 | <filename>yandex_tracker_client/uriutils.py<gh_stars>10-100
# coding: utf-8
import re
VARIABLE = re.compile(r'{([\w\d\-_\.]+)}')
class Matcher(object):
def __init__(self):
self._patterns = []
def add(self, uri, resource, priority=0):
parts = uri.strip('/').split('/')
pattern_parts = []
for part in parts:
is_variable = VARIABLE.search(part)
if is_variable:
pattern_part = r'(?P<{0}>[\w\d\-\_\.]+)'.format(
is_variable.group(1)
)
pattern_parts.append(pattern_part)
else:
pattern_parts.append(part)
pattern = re.compile('/'.join(pattern_parts))
self._patterns.append((
priority,
pattern,
resource
))
#sort by priority
self._patterns.sort(key=lambda it: it[0], reverse=True) # ok for our N < 20
def match(self, uri):
path = uri.strip('/')
for _, pattern, value in self._patterns:
match = pattern.match(path)
if match:
return value
return None
| 2.703125 | 3 |
pokepay/response/bulk_transaction.py | pokepay/pokepay-partner-python-sdk | 0 | 12798947 | # DO NOT EDIT: File is generated by code generator.
from pokepay_partner_python_sdk.pokepay.response.response import PokepayResponse
class BulkTransaction(PokepayResponse):
def __init__(self, response, response_body):
super().__init__(response, response_body)
self.id = response_body['id']
self.request_id = response_body['request_id']
self.name = response_body['name']
self.description = response_body['description']
self.status = response_body['status']
self.error = response_body['error']
self.error_lineno = response_body['error_lineno']
self.submitted_at = response_body['submitted_at']
self.updated_at = response_body['updated_at']
def id(self):
return self.id
def request_id(self):
return self.request_id
def name(self):
return self.name
def description(self):
return self.description
def status(self):
return self.status
def error(self):
return self.error
def error_lineno(self):
return self.error_lineno
def submitted_at(self):
return self.submitted_at
def updated_at(self):
return self.updated_at
| 2.171875 | 2 |
python/main.py | M507/CellTower | 3 | 12798948 | from flask import Flask, request
import sys, requests, json
from multiprocessing import Process
app = Flask(__name__)
@app.after_request
def add_headers(response):
response.headers.add('Access-Control-Allow-Origin', '*')
response.headers.add('Access-Control-Allow-Headers', 'Content-Type,Authorization')
return response
@app.route('/', defaults={'path': ''}, methods=['POST'])
@app.route('/<path:path>', methods=['POST'])
def catch_all(path):
try:
# Get the body of the POST req
body = request.data
s = 'You want path: {} and data: {}'.format(path, body)
print("All: "+s, file=sys.stderr)
dataString = body.decode("utf-8")
# Get the first index of '&' char
ANDlndex = [pos for pos, char in enumerate(dataString) if char == '&'][0]
data = dataString[(ANDlndex+1):]
print("striped: "+data, file=sys.stderr)
jsonData = json.loads(data)
r = requests.post('http://10.10.1.140:9200/'+path, json={"username": jsonData['username'], "password": <PASSWORD>['password']})
print(r.status_code, file=sys.stderr)
return json.dumps({'success': True}), 200, {'ContentType': 'application/json'}
except:
return json.dumps({'rip': True}), 404, {'ContentType': 'application/json'}
def http_app(ip, httpsPort):
app.run(host=ip, port=httpsPort)
if __name__ == '__main__':
ip = '0.0.0.0'
httpPort = 5150
httpsPort = 5151
Process(target=http_app, args=(ip, httpPort), daemon=True).start()
app.run(host=ip, port=httpsPort, ssl_context=('cert.pem', 'key.pem'))
| 2.515625 | 3 |
setup.py | jackz314/PyEEGLAB | 1 | 12798949 | <filename>setup.py
import setuptools
import os
with open("README.md", "r", encoding="utf-8") as fh:
long_description = fh.read()
version = None
with open(os.path.join('eeglabio', '_version.py'), 'r') as fid:
for line in (line.strip() for line in fid):
if line.startswith('__version__'):
version = line.split('=')[1].strip().strip("'")
break
if version is None:
version = "0.0.1"
with open("requirements.txt") as f:
requires = f.read().splitlines()
GITHUB_URL = "https://github.com/jackz314/eeglabio"
setuptools.setup(
name="eeglabio",
version=version,
author="<NAME>",
author_email="<EMAIL>",
description="I/O support for EEGLAB files in Python",
license="BSD (3-clause)",
long_description=long_description,
long_description_content_type="text/markdown",
url=GITHUB_URL,
download_url=GITHUB_URL,
project_urls={
"Source": GITHUB_URL,
"Tracker": GITHUB_URL + '/issues',
},
classifiers=[
"Programming Language :: Python :: 3",
"License :: OSI Approved :: BSD License",
"Operating System :: OS Independent",
],
packages=setuptools.find_packages(exclude=("*tests",)),
python_requires=">=3.6",
include_package_data=True,
install_requires=requires,
keywords="EEG MEG MNE EEGLAB",
)
| 1.632813 | 2 |
scripts/steepestDescentDemo.py | nappaillav/pyprobml | 0 | 12798950 | <reponame>nappaillav/pyprobml<filename>scripts/steepestDescentDemo.py
# Author: <NAME>
import numpy as np
import matplotlib.pyplot as plt
from scipy.optimize import minimize, line_search
def aokiFn(x):
"""
F(x,y) = 0.5 x (x^2 - y)^2 + 0.5 x (x-1)^2
"""
f = 0.5 * np.square(np.square(x[:][0]) - x[:][1]) + 0.5 * np.square(x[:][0] - 1)
return f
def aoki(x):
"""
F(x,y) = 0.5 x (x^2 - y)^2 + 0.5 x (x-1)^2
"""
f = 0.5 * np.square(np.square(x[0]) - x[1]) + 0.5 * np.square(x[0] - 1)
return f
def aoki_gd(x):
"""
First-Order derivative of aoki function(Nabia - 1)
"""
g_x = 2 * np.dot((np.square(x[0]) - x[1]), x[0]) + x[0] - 1
g_y = -1 * (np.square(x[0]) - x[1])
return np.array((g_x, g_y))
def aoki_hess(x):
"""
Second-Order derivative - Hessian Matrix of aoki function(Nabia - 2)
"""
g_xx = 6 * np.square(x[0]) - 2*x[1] + 1
g_xy = -2 * x[0]
g_yy = 1
H = np.diag((2,2))
H[0][0] = g_xx
H[0][1] = g_xy
H[1][0] = g_xy
H[1][1] = g_yy
return H
def gradient_descent(x0, f, f_prime, hessian=None, adaptative=False):
"""
Steepest-Descent algorithm with option for line search
"""
x_i, y_i = x0
all_x_i = list()
all_y_i = list()
all_f_i = list()
for i in range(1, 100):
all_x_i.append(x_i)
all_y_i.append(y_i)
all_f_i.append(f([x_i, y_i]))
dx_i, dy_i = f_prime(np.asarray([x_i, y_i]))
if adaptative:
# Compute a step size using a line_search to satisfy the Wolf
# conditions
step = line_search(f, f_prime,
np.r_[x_i, y_i], -np.r_[dx_i, dy_i],
np.r_[dx_i, dy_i], c2=.05)
step = step[0]
if step is None:
step = 0
else:
step = 1
x_i += - step*dx_i
y_i += - step*dy_i
if np.abs(all_f_i[-1]) < 1e-16:
break
return all_x_i, all_y_i, all_f_i
def main():
x1 = np.arange(0, 2, 0.1)
x2 = np.arange(-0.5, 3, 0.1)
x = np.meshgrid(x1, x2)
z = aokiFn(np.array(x))
plt.contour(x1, x2, z, 50)
plt.plot(1, 1, 'go', MarkerSize=10)
r = gradient_descent(np.array((0.0, 0.0)), aoki, aoki_gd, hessian = aoki_hess, adaptative = True)
plt.scatter(r[0][:10], r[1][:10])
plt.plot(r[0][:10], r[1][:10])
plt.title('exact line search')
plt.savefig("steepestDescentDemo.png", dpi = 300)
plt.show()
if __name__ == "__main__":
main()
| 2.90625 | 3 |