max_stars_repo_path
stringlengths 4
197
| max_stars_repo_name
stringlengths 6
120
| max_stars_count
int64 0
191k
| id
stringlengths 1
8
| content
stringlengths 6
964k
| score
float64 -0.88
3.95
| int_score
int64 0
4
|
---|---|---|---|---|---|---|
em.py | Fatman13/gta_swarm | 0 | 89604 | <gh_stars>0
# down vote
def send_email(user, pwd, recipient, subject, body):
import smtplib
gmail_user = user
gmail_pwd = <PASSWORD>
FROM = user
TO = recipient if type(recipient) is list else [recipient]
SUBJECT = subject
TEXT = body
# Prepare actual message
message = """From: %s\nTo: %s\nSubject: %s\n\n%s
""" % (FROM, ", ".join(TO), SUBJECT, TEXT)
# try:
# server = smtplib.SMTP("smtp.gmail.com", 587)
# server.ehlo()
# server.starttls()
# server.login(gmail_user, gmail_pwd)
# server.sendmail(FROM, TO, message)
# server.close()
# print('successfully sent the mail')
# except:
# print("failed to send mail")
server = smtplib.SMTP("smtp.gmail.com", 587)
server.ehlo()
server.starttls()
server.login(gmail_user, gmail_pwd)
server.sendmail(FROM, TO, message)
server.close()
print('successfully sent the mail')
print("failed to send mail")
send_email('<EMAIL>', '<PASSWORD>', '<EMAIL>', 'Test from pyhton', 'TEST, TEST, TEST.')
# # SMTP_SSL Example
# server_ssl = smtplib.SMTP_SSL("smtp.gmail.com", 465)
# server_ssl.ehlo() # optional, called by login()
# server_ssl.login('<EMAIL>', '<PASSWORD>')
# # ssl server doesn't support or need tls, so don't call server_ssl.starttls()
# server_ssl.sendmail(FROM, TO, message)
# #server_ssl.quit()
# server_ssl.close()
# print 'successfully sent the mail' | 1.898438 | 2 |
python/oneflow/nn/modules/in_top_k.py | wangyuyue/oneflow | 1 | 89732 | <reponame>wangyuyue/oneflow
"""
Copyright 2020 The OneFlow Authors. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import oneflow as flow
from oneflow.framework.tensor import register_tensor_op
from oneflow.nn.module import Module
class InTopk(Module):
def __init__(self, k) -> None:
super().__init__()
self._in_top_k = (
flow.builtin_op("in_top_k")
.Input("targets")
.Input("predictions")
.Output("out")
.Attr("k", k)
.Build()
)
def forward(self, targets, predictions):
assert (
targets.shape[0] == predictions.shape[0]
), "The num of targets must equal the num of predictions"
assert len(targets.shape) == 1, "The dimension of targets must be 1"
assert len(predictions.shape) == 2, "The dimension of predictions must be 2"
return self._in_top_k(targets, predictions)
def in_top_k_op(targets, predictions, k):
"""Says whether the targets are in the top K predictions.
Args:
targets (Tensor): the target tensor of type int32 or int64.
predictions (Tensor): the predictions tensor of type float32 .
k (int): Number of top elements to look at for computing precision.
Returns:
oneflow.Tensor: A Tensor of type bool. Computed Precision at k as a bool Tensor.
For example:
.. code-block:: python
>>> import oneflow as flow
>>> import numpy as np
>>> targets1 = flow.Tensor(np.array([3, 1]), dtype=flow.int32)
>>> predictions1 = flow.Tensor(np.array([[0.0, 1.0, 2.0, 3.0], [3.0, 2.0, 1.0, 0.0],]), dtype=flow.float32)
>>> out1 = flow.in_top_k(targets1, predictions1, k=1)
>>> out1
tensor([1, 0], dtype=oneflow.int8)
>>> out2 = flow.in_top_k(targets1, predictions1, k=2)
>>> out2
tensor([1, 1], dtype=oneflow.int8)
>>> targets2 = flow.Tensor(np.array([3, 1]), dtype=flow.int32, device=flow.device('cuda'))
>>> predictions2 = flow.Tensor(np.array([[0.0, 1.0, 2.0, 3.0], [3.0, 2.0, 1.0, 0.0],]), dtype=flow.float32, device=flow.device('cuda'))
>>> out3 = flow.in_top_k(targets2, predictions2, k=1)
>>> out3
tensor([1, 0], device='cuda:0', dtype=oneflow.int8)
"""
return InTopk(k=k)(targets, predictions)[0]
@register_tensor_op("in_top_k")
def in_top_k_op_tensor(targets, predictions, k):
"""
in_top_k() -> Tensor
See :func:`oneflow.in_top_k`
"""
return InTopk(k=k)(targets, predictions)[0]
if __name__ == "__main__":
import doctest
doctest.testmod(raise_on_error=True)
| 1.820313 | 2 |
vars_file.py | Pradam/robot-framework | 0 | 89860 | <reponame>Pradam/robot-framework
class PradamClass(object):
def __init__(self, name, age):
self._name = name
self._age = age
@property
def name(self):
return self._name
@name.setter
def name(self, val):
if val.lower() == 'pradam':
print('Corrent Name.')
else:
raise NameError('Not a Good Name')
self._name = val
POG = PradamClass('pradam',78)
DICTIONARY = {1: 'one', 2: 'two', 3: 'three'} | 1.71875 | 2 |
Linux/etc/decript.py | Dave360-crypto/Oblivion | 339 | 89988 | #!/usr/bin/python
import os
import pathlib
from cryptography.fernet import Fernet
# Global variables/Variáveis globais.
path_atual_dc = str(pathlib.Path(__file__).parent.absolute())
path_dc_final = path_atual_dc.replace('/etc','')
def decript_file(arquivo, chave=None):
"""
Decrypt a file/Desencriptografa uma arquivo.
:param arquivo: Path file/Local do arquivo.
:param chave: Key/Chave
"""
if chave == None:
with open(f'{path_dc_final}/etc/key_crypt.txt', 'r') as pegar_key:
key = pegar_key.read()
input_file = arquivo #+ '.encrypted'
output_file = arquivo
with open(input_file, 'rb') as f:
data = f.read()
fernet = Fernet(key)
decrypted = fernet.decrypt(data)
with open(output_file, 'wb') as f:
f.write(decrypted)
arquivo_f = str(arquivo)
arquivo_f = arquivo_f.replace('.encrypted', '')
os.rename(arquivo, arquivo_f)
else:
try:
key = str(chave)
input_file = arquivo
output_file = arquivo
with open(input_file, 'rb') as f:
data = f.read()
fernet = Fernet(key)
try:
decrypted = fernet.decrypt(data)
with open(output_file, 'wb') as f:
f.write(decrypted)
arquivo_f = str(arquivo)
arquivo_f = arquivo_f.replace('.encrypted', '')
os.rename(arquivo, arquivo_f)
except:
pass
except:
pass
| 1.90625 | 2 |
LeetCode/Python/0747. Largest Number At Least Twice of Others.py | rayvantsahni/Competitive-Programming-Codes | 1 | 90116 | <filename>LeetCode/Python/0747. Largest Number At Least Twice of Others.py
class Solution:
def dominantIndex(self, nums: List[int]) -> int:
max_index = 0
for index, num in enumerate(nums):
if num > nums[max_index]:
max_index = index
for index, num in enumerate(nums):
if index == max_index:
continue
if not nums[max_index] >= 2 * num:
return -1
return max_index
| 2.453125 | 2 |
tests/unit/app/api/test_validators.py | datphan/moviecrab | 0 | 90244 | # -*- coding: utf-8 -*-
"""tests for api.validators"""
from mock import MagicMock
from tests.unit import UnitTestCase
from app.api.validators import DummyForm, DummyField, Base, password
class DummyFormTestCase(UnitTestCase):
def test_class(self):
self.assertTrue(issubclass(DummyForm, dict))
class DummyFieldTestCase(UnitTestCase):
def test_init(self):
dummy_field = DummyField(None)
self.assertEqual(dummy_field.data, None)
self.assertEqual(dummy_field.errors, [])
self.assertEqual(dummy_field.raw_data, None)
dummy_field = DummyField('a', ('something wrong', ), 'a ')
self.assertEqual(dummy_field.data, 'a')
self.assertEqual(dummy_field.errors, ['something wrong'])
self.assertEqual(dummy_field.raw_data, 'a ')
def test_gettext(self):
dummy_field = DummyField('a')
self.assertEqual(dummy_field.gettext('hello'), 'hello')
self.assertEqual(dummy_field.gettext(None), None)
def test_ngettext(self):
dummy_field = DummyField('a')
self.assertEqual(dummy_field.ngettext('one', 'many', 1), 'one')
self.assertEqual(dummy_field.ngettext('one', 'many', 2), 'many')
class BaseTestCase(UnitTestCase):
def test_init(self):
mock_validator_class = MagicMock()
mock_validator_class.return_value = 'validator'
base = Base(mock_validator_class, 'a', message='hello')
mock_validator_class.assert_called_once_with('a', message='hello')
self.assertEqual(base.validator, 'validator')
def test_call(self):
mock_validator = MagicMock()
mock_validator_class = MagicMock(return_value=mock_validator)
value = Base(mock_validator_class)('test')
self.assertEqual(mock_validator.call_count, 1)
self.assertEqual(value, 'test')
class LengthTestCase(UnitTestCase):
def test_init(self):
from app.api.validators import Length, wtf_length
self.assertRaises(AssertionError, Length)
self.assertRaises(AssertionError, Length, min=3, max=2)
length_validator = Length(min=0, max=5, message='something wrong')
validator = length_validator.validator
self.assertTrue(isinstance(validator, wtf_length))
self.assertEqual(validator.min, 0)
self.assertEqual(validator.max, 5)
self.assertEqual(validator.message, 'something wrong')
class NumberRangeTestCase(UnitTestCase):
def test_init(self):
from app.api.validators import NumberRange, wtf_number_range
number_range_validator = NumberRange(min=0, max=5, message='something wrong')
validator = number_range_validator.validator
self.assertTrue(isinstance(validator, wtf_number_range))
self.assertEqual(validator.min, 0)
self.assertEqual(validator.max, 5)
self.assertEqual(validator.message, 'something wrong')
def test_call(self):
from app.api.validators import NumberRange
number_range_validator = NumberRange(min=0, max=5)
self.assertEqual(number_range_validator('0'), 0)
self.assertEqual(number_range_validator('5'), 5)
self.assertRaises(ValueError, number_range_validator, 6)
self.assertRaises(ValueError, number_range_validator, '-1')
class EmailTestCase(UnitTestCase):
def test_init(self):
from app.api.validators import Email, wtf_email
email_validator = Email(message='something wrong')
validator = email_validator.validator
self.assertTrue(isinstance(validator, wtf_email))
self.assertEqual(validator.message, 'something wrong')
class IPAddressTestCase(UnitTestCase):
def test_init(self):
from app.api.validators import IPAddress, wtf_ip_address
ip_address_validator = IPAddress(ipv4=False, ipv6=True, message='something wrong')
validator = ip_address_validator.validator
self.assertTrue(isinstance(validator, wtf_ip_address))
self.assertFalse(validator.ipv4)
self.assertTrue(validator.ipv6)
self.assertEqual(validator.message, 'something wrong')
class MacAddressTestCase(UnitTestCase):
def test_init(self):
from app.api.validators import MacAddress, wtf_mac_address
mac_address_validator = MacAddress(message='something wrong')
validator = mac_address_validator.validator
self.assertTrue(isinstance(validator, wtf_mac_address))
self.assertEqual(validator.message, 'something wrong')
class AnyOfTestCase(UnitTestCase):
def test_init(self):
from app.api.validators import AnyOf, wtf_any_of
formatter = lambda x: x
any_of_validator = AnyOf([1, 3, 5], message='something wrong', values_formatter=formatter)
validator = any_of_validator.validator
self.assertTrue(isinstance(validator, wtf_any_of))
self.assertEqual(validator.values, [1, 3, 5])
self.assertEqual(validator.message, 'something wrong')
self.assertEqual(validator.values_formatter, formatter)
class NoneOfTestCase(UnitTestCase):
def test_unit(self):
from app.api.validators import NoneOf, wtf_none_of
formatter = lambda x: x
none_of_validator = NoneOf([1, 3, 5], message='something wrong', values_formatter=formatter)
validator = none_of_validator.validator
self.assertTrue(isinstance(validator, wtf_none_of))
self.assertEqual(validator.values, [1, 3, 5])
self.assertEqual(validator.message, 'something wrong')
self.assertEqual(validator.values_formatter, formatter)
class ValidatorsTestCase(UnitTestCase):
def test_password(self):
too_short_msg = 'password too short, length must be greater than 6'
space_contained_msg = 'password must not contain space(s)'
password_check_list = (
('a', False, too_short_msg),
('abcde', False, too_short_msg),
(' ', False, space_contained_msg),
('a bcdefgh', False, space_contained_msg),
(' abcde', False, space_contained_msg),
('abcdef', True, None),
(' abcdef ', False, space_contained_msg),
)
for pwd, correct, msg in password_check_list:
if correct:
self.assertEqual(password(pwd), pwd)
else:
with self.assertRaises(ValueError) as ve:
password(pwd)
self.assertEqual(ve.exception.message, msg, 'with the case of `{}`'.format(pwd))
| 1.703125 | 2 |
interfaces/preferencias.py | Hyago0897/locadora_carros_grupo1_info3 | 2 | 90372 | <reponame>Hyago0897/locadora_carros_grupo1_info3
import tkinter as tk
import tkinter.ttk as ttk
class TelaPreferencias(tk.Frame):
def __init__(self, master, arq_preferencias):
tk.Frame.__init__(self, master)
self.arq = arq_preferencias
master.title("PREFERÊNCIAS")
self.container1 = tk.Frame(master)
self.container1.pack(fill="both", expand=1)
self.container2 = tk.Frame(self.container1)
self.container2.pack(fill="y", expand=1, padx=3, pady=3)
self.btnOk = tk.Button(self.container1, text="OK \N{CHECK MARK}")
self.btnOk.pack(side="bottom", anchor="e", padx=6, pady=6)
self.checkVar = tk.IntVar()
self.check_backup = tk.Checkbutton(self.container2,
text="Fazer backup",
variable=self.checkVar,
command=self.habilita_backup)
self.check_backup.grid(column=0, row=0, sticky="w")
self.lbdir = tk.Label(self.container2, text="Diretórios para backup")
self.lbdir.grid(column=0, row=1, padx=(20, 0), sticky="w")
self.btn_preferencias = tk.Button(self.container2, text="...")
self.btn_preferencias.grid(column=1, row=1, padx=1, pady=1)
self.lbinter = tk.Label(self.container2,
text="Intervalo para backup (min)")
self.lbinter.grid(column=0, row=2, padx=(20, 0), sticky="w")
self.tempo_backup = ttk.Spinbox(self.container2,
from_=1,
to=60,
increment=1,
width=2,
state="readonly")
self.tempo_backup.grid(column=1, row=2, sticky="w", padx=2, pady=2)
self.tempo_backup.set(1)
self.habilita_backup()
tk.Label(self.container2,
text="Importar arquivo de backup").grid(column=0,
row=3,
sticky="w",
padx=(8, 2),
pady=6)
self.btn_importar = tk.Button(self.container2, text="...")
self.btn_importar.grid(column=1, row=3, padx=2, pady=2, sticky="w")
def habilita_backup(self):
status = bool(self.checkVar.get())
if status:
self.btn_preferencias.configure(state="normal")
self.tempo_backup.configure(state="readonly")
self.lbdir.configure(state="normal")
self.lbinter.configure(state="normal")
else:
self.btn_preferencias.configure(state="disabled")
self.tempo_backup.configure(state="disabled")
self.lbdir.configure(state="disabled")
self.lbinter.configure(state="disabled")
if __name__ == "__main__":
tela = tk.Tk()
TelaPreferencias(tela, '')
tela.mainloop()
| 1.796875 | 2 |
2018/11/code.py | Akumatic/Advent-of-Code | 22 | 90500 | """ https://adventofcode.com/2018/day/11 """
def readFile():
with open(f"{__file__.rstrip('code.py')}input.txt", "r") as f:
return int(f.read())
def getPowerlevel(x, y, serial):
rackId = x + 10
powerLevel = (rackId * y + serial) * rackId
return (int(powerLevel / 100) % 10) - 5
def createGrid(serial):
grid = []
for j in range(1, 301):
row = []
for i in range(1, 301):
row.append(getPowerlevel(i, j, serial))
grid.append(row)
return grid
def getBiggestField(grid, size):
maxSum = 0
maxCoords = (-1, -1)
for j in range(300 - size + 1):
for i in range(300 - size + 1):
curSum = 0
for n in range(size):
for m in range(size):
curSum += grid[j + m][i + n]
if curSum > maxSum:
maxSum = curSum
maxCoords = (i + 1, j + 1)
return maxCoords, maxSum
def getSat(grid):
# generates and returns summed-area table
sat = {}
size = len(grid)
for j in range(size):
for i in range(size):
value = grid[j][i] + sat.get(str((i - 1, j)), 0)
value += sat.get(str((i, j - 1)), 0) - sat.get(str((i - 1, j - 1)), 0)
sat[str((i, j))] = value
return sat
def getBiggestFieldSAT(sat, size):
maxSum = 0
maxCoords = (-1, -1)
for j in range(300 - size):
for i in range(300 - size):
ip, jp = i + size, j + size
curSum = sat[str((i, j))] + sat[str((ip, jp))] - sat[str((ip, j))] - sat[str((i, jp))]
if curSum > maxSum:
maxSum = curSum
maxCoords = (i + 2, j + 2)
return maxCoords, maxSum
def part1(value):
grid = createGrid(value)
return getBiggestField(grid, 3)[0]
def part2(value):
grid = createGrid(value)
sat = getSat(grid)
maxSum = 0
size = -1
maxCoords = (-1, -1)
for i in range(300):
curCoords, curSum = getBiggestFieldSAT(sat, i)
if curSum > maxSum:
maxSum = curSum
size = i
maxCoords = curCoords
return (maxCoords[0], maxCoords[1], size)
if __name__ == "__main__":
value = readFile()
print(f"Part 1: {part1(value)}")
print(f"Part 2: {part2(value)}") | 2.5625 | 3 |
mx_utils/gpu.py | hallmx/mx_utils | 0 | 90628 | # AUTOGENERATED! DO NOT EDIT! File to edit: 04_device.ipynb (unless otherwise specified).
__all__ = ['versions']
# Cell
def versions():
"Checks if GPU enabled and if so displays device details with cuda, pytorch, fastai versions"
print("GPU: ", torch.cuda.is_available())
if torch.cuda.is_available() == True:
print("Device = ", torch.device(torch.cuda.current_device()))
print("Cuda version - ", torch.version.cuda)
print("cuDNN version - ", torch.backends.cudnn.version())
print("PyTorch version - ", torch.__version__)
print("fastai version", fastai.__version__) | 1.914063 | 2 |
pycs/astro/wl/__init__.py | sfarrens/cosmostat | 3 | 90756 | # -*- coding: utf-8 -*-
"""WEAK LENSING ROUTINES
This module contains submodules for weak gravitational lensing.
"""
__all__ = ['lenspack', 'mass_mapping']
from . import *
| 0.585938 | 1 |
ParadoxTrading/Chart/BarSeries.py | gsamarakoon/ParadoxTrading | 95 | 90884 | import typing
from PyQt5.QtChart import QValueAxis, QChart, QBarSeries, QBarSet
from ParadoxTrading.Chart.SeriesAbstract import SeriesAbstract
class BarSeries(SeriesAbstract):
def __init__(
self, _name: str,
_x_list: typing.Sequence,
_y_list: typing.Sequence,
_color: typing.Any = None,
_show_value: bool = True,
):
super().__init__(_name, _x_list, _y_list, _color, _show_value)
self.type = SeriesAbstract.BAR
def addSeries(
self, _x2idx: typing.Dict, _idx2x: list, _chart: QChart,
_axis_x: QValueAxis, _axis_y: QValueAxis
):
bar_set = QBarSet(self.name)
tmp_dict = dict(zip(self.x_list, self.y_list))
for k in _idx2x:
if k in tmp_dict.keys():
bar_set.append(tmp_dict[k])
else:
bar_set.append(0)
if self.color is not None:
bar_set.setColor(self.color)
bar_series = QBarSeries()
bar_series.append(bar_set)
_chart.addSeries(bar_series)
_chart.setAxisX(_axis_x, bar_series)
_chart.setAxisY(_axis_y, bar_series)
if self.show_value:
self.createShow()
| 2.125 | 2 |
real_esrgan_gui/utils/logger.py | noeru-desu/Real-ESRGAN-GUI | 1 | 91012 | '''
Author : noeru_desu
Date : 2021-08-28 18:35:58
LastEditors : noeru_desu
LastEditTime : 2021-10-06 14:30:00
Description : logger
'''
from logging import StreamHandler, getLogger
from colorlog import ColoredFormatter
class Logger(object):
def __init__(self, logger_registered_name: str = 'logger', initial_level: int = 20, debug_format=False):
self.logger = getLogger(logger_registered_name)
self.logger.setLevel(initial_level)
# Console Handler
self.ch = StreamHandler()
self.ch.setFormatter(self.console_fmt(debug_format))
self.logger.addHandler(self.ch)
self.ch.setLevel(initial_level)
self.debug = self.logger.debug
self.info = self.logger.info
self.warning = self.logger.warning
self.error = self.logger.error
self.critical = self.logger.critical
self.exception = self.logger.exception
def console_fmt(self, debug_format):
if debug_format:
info_format = '[%(relativeCreated)d] [%(module)s] [%(lineno)s]'
datefmt = None
else:
info_format = '[%(asctime)s]'
datefmt = '%H:%M:%S'
return ColoredFormatter(
info_format + ' [%(log_color)s%(levelname)s%(reset)s] %(message_log_color)s%(message)s%(reset)s',
log_colors={
'DEBUG': 'blue',
'INFO': 'green',
'WARNING': 'yellow',
'ERROR': 'red',
'CRITICAL': 'bold_red',
},
secondary_log_colors={
'message': {
'WARNING': 'yellow',
'ERROR': 'red',
'CRITICAL': 'bold_red'
}
},
datefmt=datefmt
)
def set_level(self, level: int):
self.logger.setLevel(level)
self.ch.setLevel(level)
def remove(self):
self.logger.removeHandler(self.ch)
| 1.851563 | 2 |
nsff_scripts/run_midas.py | frankhome61/nsff | 330 | 91140 | <filename>nsff_scripts/run_midas.py
"""
Compute depth maps for images in the input folder.
"""
import os
import glob
import torch
import cv2
import numpy as np
from torchvision.transforms import Compose
from models.midas_net import MidasNet
from models.transforms import Resize, NormalizeImage, PrepareForNet
import sys
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
VIZ = True
def read_image(path):
"""Read image and output RGB image (0-1).
Args:
path (str): path to file
Returns:
array: RGB image (0-1)
"""
img = cv2.imread(path)
if img.ndim == 2:
img = cv2.cvtColor(img, cv2.COLOR_GRAY2BGR)
img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB) / 255.0
return img
def _minify(basedir, factors=[], resolutions=[]):
'''
Minify the images to small resolution for training
'''
needtoload = False
for r in factors:
imgdir = os.path.join(basedir, 'images_{}'.format(r))
if not os.path.exists(imgdir):
needtoload = True
for r in resolutions:
imgdir = os.path.join(basedir, 'images_{}x{}'.format(r[1], r[0]))
if not os.path.exists(imgdir):
needtoload = True
if not needtoload:
return
from shutil import copy
from subprocess import check_output
import glob
imgdir = os.path.join(basedir, 'images')
imgs = [os.path.join(imgdir, f) for f in sorted(os.listdir(imgdir))]
imgs = [f for f in imgs if any([f.endswith(ex) for ex in ['JPG', 'jpg', 'png', 'jpeg', 'PNG']])]
imgdir_orig = imgdir
wd = os.getcwd()
for r in factors + resolutions:
if isinstance(r, int):
name = 'images_{}'.format(r)
resizearg = '{}%'.format(100./r)
else:
name = 'images_{}x{}'.format(r[1], r[0])
resizearg = '{}x{}'.format(r[1], r[0])
imgdir = os.path.join(basedir, name)
if os.path.exists(imgdir):
continue
print('Minifying', r, basedir)
os.makedirs(imgdir)
check_output('cp {}/* {}'.format(imgdir_orig, imgdir), shell=True)
ext = imgs[0].split('.')[-1]
print(ext)
# sys.exit()
img_path_list = glob.glob(os.path.join(imgdir, '*.%s'%ext))
for img_path in img_path_list:
save_path = img_path.replace('.jpg', '.png')
img = cv2.imread(img_path)
print(img.shape, r)
cv2.imwrite(save_path,
cv2.resize(img,
(r[1], r[0]),
interpolation=cv2.INTER_AREA))
if ext != 'png':
check_output('rm {}/*.{}'.format(imgdir, ext), shell=True)
print('Removed duplicates')
print('Done')
to8b = lambda x : (255*np.clip(x,0,1)).astype(np.uint8)
import imageio
def run(basedir,
input_path,
output_path,
model_path,
resize_height=288):
"""Run MonoDepthNN to compute depth maps.
Args:
input_path (str): path to input folder
output_path (str): path to output folder
model_path (str): path to saved model
"""
print("initialize")
img0 = [os.path.join(basedir, 'images', f) \
for f in sorted(os.listdir(os.path.join(basedir, 'images'))) \
if f.endswith('JPG') or f.endswith('jpg') or f.endswith('png')][0]
sh = cv2.imread(img0).shape
height = resize_height
factor = sh[0] / float(height)
width = int(round(sh[1] / factor))
_minify(basedir, resolutions=[[height, width]])
# select device
device = torch.device("cuda")
print("device: %s" % device)
small_img_dir = input_path + '_*x' + str(resize_height) + '/'
print(small_img_dir)
small_img_path = sorted(glob.glob(glob.glob(small_img_dir)[0] + '/*.png'))[0]
small_img = cv2.imread(small_img_path)
print('small_img', small_img.shape)
# Portrait Orientation
if small_img.shape[0] > small_img.shape[1]:
input_h = 640
input_w = int(round( float(input_h) / small_img.shape[0] * small_img.shape[1]))
# Landscape Orientation
else:
input_w = 640
input_h = int(round( float(input_w) / small_img.shape[1] * small_img.shape[0]))
print('Monocular depth input_w %d input_h %d '%(input_w, input_h))
# load network
model = MidasNet(model_path, non_negative=True)
transform_1 = Compose(
[
Resize(
input_w,
input_h,
resize_target=None,
keep_aspect_ratio=True,
ensure_multiple_of=32,
resize_method="upper_bound",
image_interpolation_method=cv2.INTER_AREA,
),
NormalizeImage(mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225]),
PrepareForNet(),
]
)
model.to(device)
model.eval()
# get input
img_names = sorted(glob.glob(os.path.join(input_path, "*")))
num_images = len(img_names)
# create output folder
os.makedirs(output_path, exist_ok=True)
print("start processing")
for ind in range(len(img_names)):
img_name = img_names[ind]
print(" processing {} ({}/{})".format(img_name, ind + 1, num_images))
# input
img = read_image(img_name)
img_input_1 = transform_1({"image": img})["image"]
# compute
with torch.no_grad():
sample_1 = torch.from_numpy(img_input_1).to(device).unsqueeze(0)
prediction = model.forward(sample_1)
prediction = (
torch.nn.functional.interpolate(
prediction.unsqueeze(1),
size=[small_img.shape[0],
small_img.shape[1]],
mode="nearest",
)
.squeeze()
.cpu()
.numpy()
)
# output
filename = os.path.join(
output_path, os.path.splitext(os.path.basename(img_name))[0]
)
if VIZ:
if not os.path.exists('./midas_otuputs'):
os.makedirs('./midas_otuputs')
plt.figure(figsize=(12, 6))
plt.subplot(1,2,1)
plt.imshow(img)
plt.subplot(1,2,2)
plt.imshow(prediction, cmap='jet')
plt.savefig('./midas_otuputs/%s'%(img_name.split('/')[-1]))
plt.close()
print(filename + '.npy')
np.save(filename + '.npy', prediction.astype(np.float32))
print("finished")
import argparse
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("--data_path", type=str,
help='COLMAP Directory')
# parser.add_argument("--input_w", type=int, default=640,
# help='input image width for monocular depth network')
# parser.add_argument("--input_h", type=int, default=360,
# help='input image height for monocular depth network')
parser.add_argument("--resize_height", type=int, default=288,
help='resized image height for training \
(width will be resized based on original aspect ratio)')
args = parser.parse_args()
BASE_DIR = args.data_path
INPUT_PATH = BASE_DIR + "/images"
OUTPUT_PATH = BASE_DIR + "/disp"
MODEL_PATH = "model.pt"
if not os.path.exists(OUTPUT_PATH):
os.makedirs(OUTPUT_PATH)
# set torch options
torch.backends.cudnn.enabled = True
torch.backends.cudnn.benchmark = True
# compute depth maps
run(BASE_DIR, INPUT_PATH,
OUTPUT_PATH, MODEL_PATH,
args.resize_height)
| 2.125 | 2 |
scripts/kitti_submission.py | xingruiy/RAFT-3D | 133 | 91268 | <reponame>xingruiy/RAFT-3D<filename>scripts/kitti_submission.py
import sys
sys.path.append('.')
from tqdm import tqdm
import os
import numpy as np
import cv2
import argparse
import torch
from lietorch import SE3
import raft3d.projective_ops as pops
from utils import show_image, normalize_image
from data_readers.kitti import KITTIEval
import torch.nn.functional as F
from torch.utils.data import DataLoader
import matplotlib.pyplot as plt
from glob import glob
from data_readers.frame_utils import *
def display(img, tau, phi):
""" display se3 fields """
fig, (ax1, ax2, ax3) = plt.subplots(1,3)
ax1.imshow(img[:, :, ::-1] / 255.0)
tau_img = np.clip(tau, -0.1, 0.1)
tau_img = (tau_img + 0.1) / 0.2
phi_img = np.clip(phi, -0.1, 0.1)
phi_img = (phi_img + 0.1) / 0.2
ax2.imshow(tau_img)
ax3.imshow(phi_img)
plt.show()
def prepare_images_and_depths(image1, image2, depth1, depth2, depth_scale=1.0):
""" padding, normalization, and scaling """
ht, wd = image1.shape[-2:]
pad_h = (-ht) % 8
pad_w = (-wd) % 8
image1 = F.pad(image1, [0,pad_w,0,pad_h], mode='replicate')
image2 = F.pad(image2, [0,pad_w,0,pad_h], mode='replicate')
depth1 = F.pad(depth1[:,None], [0,pad_w,0,pad_h], mode='replicate')[:,0]
depth2 = F.pad(depth2[:,None], [0,pad_w,0,pad_h], mode='replicate')[:,0]
depth1 = (depth_scale * depth1).float()
depth2 = (depth_scale * depth2).float()
image1 = normalize_image(image1.float())
image2 = normalize_image(image2.float())
depth1 = depth1.float()
depth2 = depth2.float()
return image1, image2, depth1, depth2, (pad_w, pad_h)
@torch.no_grad()
def make_kitti_submission(model):
loader_args = {'batch_size': 1, 'shuffle': False, 'num_workers': 1, 'drop_last': False}
test_loader = DataLoader(KITTIEval(), **loader_args)
DEPTH_SCALE = .1
for i_batch, data_blob in enumerate(test_loader):
image1, image2, disp1, disp2, intrinsics = [item.cuda() for item in data_blob]
img1 = image1[0].permute(1,2,0).cpu().numpy()
depth1 = DEPTH_SCALE * (intrinsics[0,0] / disp1)
depth2 = DEPTH_SCALE * (intrinsics[0,0] / disp2)
ht, wd = image1.shape[2:]
image1, image2, depth1, depth2, _ = \
prepare_images_and_depths(image1, image2, depth1, depth2)
Ts = model(image1, image2, depth1, depth2, intrinsics, iters=16)
tau_phi = Ts.log()
# uncomment to diplay motion field
# tau, phi = Ts.log().split([3,3], dim=-1)
# tau = tau[0].cpu().numpy()
# phi = phi[0].cpu().numpy()
# display(img1, tau, phi)
# compute optical flow
flow, _, _ = pops.induced_flow(Ts, depth1, intrinsics)
flow = flow[0, :ht, :wd, :2].cpu().numpy()
# compute disparity change
coords, _ = pops.projective_transform(Ts, depth1, intrinsics)
disp2 = intrinsics[0,0] * coords[:,:ht,:wd,2] * DEPTH_SCALE
disp1 = disp1[0].cpu().numpy()
disp2 = disp2[0].cpu().numpy()
KITTIEval.write_prediction(i_batch, disp1, disp2, flow)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--model', help='path the model weights')
parser.add_argument('--network', default='raft3d.raft3d', help='network architecture')
parser.add_argument('--radius', type=int, default=32)
args = parser.parse_args()
import importlib
RAFT3D = importlib.import_module(args.network).RAFT3D
model = torch.nn.DataParallel(RAFT3D(args))
model.load_state_dict(torch.load(args.model))
model.cuda()
model.eval()
if not os.path.isdir('kitti_submission'):
os.mkdir('kitti_submission')
os.mkdir('kitti_submission/disp_0')
os.mkdir('kitti_submission/disp_1')
os.mkdir('kitti_submission/flow')
make_kitti_submission(model)
| 1.992188 | 2 |
modelator_py/apalache/__init__.py | informalsystems/modelator-py | 0 | 91396 | <reponame>informalsystems/modelator-py<gh_stars>0
from .args import ApalacheArgs
from .pure import PureCmd as ApalachePureCmd
from .pure import apalache_pure
from .raw import RawCmd as ApalacheRawCmd
from .raw import apalache_raw
| 0.605469 | 1 |
pidcmes_calibration.py | josmet52/amod | 0 | 91524 | <filename>pidcmes_calibration.py<gh_stars>0
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
pidcmes_calibration.py
author : <NAME>
date : 07.01.2011
version : 1.0.0
maturity : 4 - beta
pidcmes = raspberry PI Direct Current MEaSurement
This program measures the interruption latency time of the Raspberry PI used and save
the values of R1, C1, ref_voltage and interrupt_latency_time in the file pidcmes.ini
thus if the harware changes - modification of the value of electronic components
or use of another Raspberry PI model - the new values can be saved in the
'pidcmes.ini' file by this program
"""
# import the class Pidcmes and initialize it
from pidcmes_lib import Pidcmes
if __name__ == '__main__':
pidcmes = Pidcmes("calibration")
# Set the values of the measurement circuit
# this values are from used components
R1 = "100E3" # 100 kohms
C1 = "1E-6" # 1 uF
u_trig = "2.5" # LM336 characteristic
#Start the latency interrupt time measurement
print("CALIBRATION")
print("-----------")
print("Install the bridge between pin " + str(pidcmes.pin_cmd) + " and pin " + str(pidcmes.pin_mes) + " for the interrupt latency measurement")
v_ok = input("ENTER to continue")
print("\n... measurement in progress ... (it may take a few seconds)")
# execute the latency _time measurement
int_resp_time = pidcmes.get_interrupt_latency() - 1.1e-3
# print the results
print("\nThe following values are saved in the pidcmes.ini file")
print("--------------------------------------------------------")
print("Reference voltage (LM336) = ", u_trig + " Volts")
print("R1 = " + str(R1)+ " Ohms")
print("C1 = " + str(C1) + " Farads")
print("Interrupt latency = ", '{:.2f}'.format(int_resp_time * 1e3) + " milli seconds")
print("\nRemember to remove the bridge between pin ".upper() + str(pidcmes.pin_cmd) + " and pin ".upper() + str(pidcmes.pin_mes))
print("======================================================\n")
with open("".join([pidcmes.app_dir, '/pidcmes.ini']), 'w') as ini_file:
ini_file.writelines(u_trig + "," + R1 + "," + C1 + "," + '{:.6f}'.format(int_resp_time))
print("Calibration completed ... bye")
| 2.03125 | 2 |
ozone-framework-python-server/tests/test_system_version_view.py | aamduka/ozone | 6 | 91652 | <gh_stars>1-10
from rest_framework.test import APIClient
from django.test import TestCase
from django.conf import settings
requests = APIClient()
class SimpleSystemVersionTest(TestCase):
fixtures = ['resources/fixtures/default_data.json', ]
def test_authentication(self):
requests.login(email='<EMAIL>', password='password')
request = requests.get('/system-version')
self.assertEqual(request.status_code, 200)
requests.logout()
requests.login(email='<EMAIL>', password='password')
request = requests.get('/system-version')
self.assertEqual(request.status_code, 200)
requests.logout()
request = requests.get('/system-version')
self.assertEqual(request.status_code, 403)
def test_get_system_version(self):
requests.login(email='<EMAIL>', password='password')
request = requests.get('/system-version')
self.assertEqual(request.data, {'version': settings.SYSTEM_VERSION})
requests.logout()
def test_post_system_version(self):
requests.login(email='<EMAIL>', password='password')
request = requests.post('/system-version')
self.assertEqual(request.status_code, 405)
requests.logout()
def test_put_system_version(self):
requests.login(email='<EMAIL>', password='password')
request = requests.put('/system-version')
self.assertEqual(request.status_code, 405)
requests.logout()
def test_patch_system_version(self):
requests.login(email='<EMAIL>', password='password')
request = requests.patch('/system-version')
self.assertEqual(request.status_code, 405)
requests.logout()
def test_delete_system_version(self):
requests.login(email='<EMAIL>', password='password')
request = requests.delete('/system-version')
self.assertEqual(request.status_code, 405)
requests.logout()
| 1.367188 | 1 |
script/plot_mse.py | gitter-lab/prmf | 9 | 91780 | <gh_stars>1-10
#!/usr/bin/env python
import argparse, sys
import os, os.path
import re
import matplotlib as mpl
mpl.use('Agg')
import matplotlib.pyplot as plt
import numpy as np
# run/method/evaluate_mse.out
run_regexp = re.compile(r'run\d+')
def parse_eval(fp):
rv = None
with open(fp, 'r') as fh:
i = 0
for line in fh:
line = line.rstrip()
i += 1
if i == 2:
rv = float(line)
return rv
def plot(indir, outdir, eval_fname, title_part, plot_fname):
nmf_rel_path = os.path.join("NMF", eval_fname)
plier_rel_path = os.path.join("PLIER", eval_fname)
xs = []
ys = []
for fname in os.listdir(args.indir):
match_data = run_regexp.match(fname)
if match_data is not None:
nmf_eval_fp = os.path.join(args.indir, fname, nmf_rel_path)
plier_eval_fp = os.path.join(args.indir, fname, plier_rel_path)
nmf_eval_v = parse_eval(nmf_eval_fp)
plier_eval_v = parse_eval(plier_eval_fp)
if not nmf_eval_v is None and not plier_eval_v is None:
xs.append(nmf_eval_v)
ys.append(plier_eval_v)
xs = np.array(xs)
ys = np.array(ys)
#x_max = np.percentile(xs, 95)
#y_max = np.percentile(ys, 95)
x_max = np.max(xs)
y_max = np.max(ys)
both_max = np.max([x_max, y_max])
plt.scatter(xs, ys, linewidths=2.0)
plt.plot(np.linspace(0, both_max), np.linspace(0, both_max), 'k-')
plt.xlim([0, both_max])
plt.ylim([0, both_max])
plt.xlabel('NMF')
plt.ylabel('PLIER')
plt.title('{} from PLIER-based Simulation'.format(title_part))
ofp = os.path.join(args.outdir, plot_fname)
plt.savefig(ofp)
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument('--indir', '-i', help='Directory of results to plot')
parser.add_argument('--outdir', '-o', help='Directory to place plots')
args = parser.parse_args()
plots = [
('evaluate_mse.out', 'Average MSE', 'mse_plot.png'),
('evaluate_mse_match.out', 'Average Matched MSE', 'mse_match_plot.png'),
('evaluate_corr.out', 'Maximum Correlation', 'corr_plot.png'),
('evaluate_corr_match.out', 'Maximum Matched Correlation', 'corr_match_plot.png')
]
for plot_args in plots:
plot(args.indir, args.outdir, plot_args[0], plot_args[1], plot_args[2])
| 1.65625 | 2 |
addons/test_mail/models/__init__.py | SHIVJITH/Odoo_Machine_Test | 0 | 91908 | <filename>addons/test_mail/models/__init__.py
# -*- coding: utf-8 -*-
from . import test_mail_models
from . import test_mail_corner_case_models
from . import test_mail_thread_models
| 0.353516 | 0 |
lldb/packages/Python/lldbsuite/test/lang/cpp/class-template-parameter-pack/TestClassTemplateParameterPack.py | medismailben/llvm-project | 765 | 92036 | <filename>lldb/packages/Python/lldbsuite/test/lang/cpp/class-template-parameter-pack/TestClassTemplateParameterPack.py
from lldbsuite.test import lldbinline
from lldbsuite.test import decorators
lldbinline.MakeInlineTest(
__file__, globals(), [
decorators.expectedFailureAll(
compiler="gcc")])
| 0.574219 | 1 |
demo.py | pengyuanzhuo/Detection | 0 | 92164 | # coding: utf-8
import argparse
import torch
import torch.nn.functional as F
import numpy as np
import cv2
import matplotlib
matplotlib.use('agg')
import matplotlib.pyplot as plt
import models.ssd as ssd
import models.detection as detection
from config import Config as cfg
COLORS = [(255, 0, 0), (0, 255, 0), (0, 0, 255)]
FONT = cv2.FONT_HERSHEY_SIMPLEX
CLASSES = ('background',
'aeroplane', 'bicycle', 'bird', 'boat',
'bottle', 'bus', 'car', 'cat', 'chair',
'cow', 'diningtable', 'dog', 'horse',
'motorbike', 'person', 'pottedplant',
'sheep', 'sofa', 'train', 'tvmonitor'
)
def parse():
args = argparse.ArgumentParser('ssd demo')
args.add_argument('img', type=str, help='img path')
args.add_argument('model', type=str, help='model path')
args.add_argument('--topk', '-k', type=int, default=10,
help='top k bbox, default=10')
return args.parse_args()
def preprocess(image):
x = cv2.resize(image, (300, 300)).astype(np.float32)
x -= (104.0, 117.0, 123.0) # bgr
x = x[:, :, ::-1].copy()
x = torch.from_numpy(x).permute(2, 0, 1)
x = x.unsqueeze(0)
return x
def build_model(checkpoint, device):
model = ssd.build_ssd(cfg)
state = torch.load(checkpoint, map_location=device)
state_dict = dict()
for k, v in state['model'].items():
state_dict[k.replace('module.','')] = v
model.load_state_dict(state_dict)
return model
def main(args):
device = torch.device('cuda:0') if torch.cuda.is_available() else torch.device('cpu')
image = cv2.imread(args.img)
inputs = preprocess(image) # (1, c, h, w)
model = build_model(args.model, device).to(device)
with torch.no_grad():
inputs = inputs.to(device)
default_box, loc, conf = model(inputs)
conf = F.softmax(conf, dim=-1)
outputs = detection.detection(default_box, conf, loc,
conf_threshold=0.5,
nms_threshold=0.2,
topk=args.topk,
variance=cfg.variances) # shape=(b, num_classes, topk, 5)
scale = torch.Tensor([image.shape[1::-1]]).repeat(1, 2).squeeze() # [w, h, w, h]
for i in range(outputs.size(1)):
j = 0 # 每一类的bbox序号
while outputs[0, i, j, -1] > 0:
score = outputs[0, i, j, -1]
label = i
# label_name = TODO
bbox = (outputs[0, i, j, :-1]*scale).cpu().numpy() # (xmin, ymin, xmax, ymax)
cv2.rectangle(image,
(int(bbox[0]), int(bbox[1])),
(int(bbox[2]), int(bbox[3])),
COLORS[i % 3], 2)
cv2.putText(image, CLASSES[label], (int(bbox[0]), int(bbox[1])),
FONT, 2, (255, 255, 255), 2, cv2.LINE_AA)
j += 1
cv2.imwrite('./demo.jpg', image)
if __name__ == '__main__':
args = parse()
main(args)
| 2.09375 | 2 |
leetcode/973.py | 1005281342/learn | 1 | 92292 | <filename>leetcode/973.py<gh_stars>1-10
class Solution:
def count_l(self, a_list: list):
a, b = a_list
return a*a + b*b
def kClosest(self, points, K):
"""
:type points: List[List[int]]
:type K: int
:rtype: List[List[int]]
"""
m = list()
tmp = dict()
for point in points:
l = self.count_l(point)
if not tmp.get(l):
m.append(l)
tmp[l] = [point]
else:
tmp[l].append(point)
m.sort()
res = []
for x in m:
if len(res) == K:
break
for a in tmp[x]:
res.append(a)
if len(res) == K:
break
return res
if __name__ == '__main__':
s = Solution()
print(s.kClosest(points = [[1,3],[-2,2]], K = 1)) | 2.546875 | 3 |
repository/utils.py | d12y12/GitMirror | 0 | 92420 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
import logging
from os.path import exists, join
from .minisetting import Setting
def get_version(setting: Setting = None):
setting = setting if setting else Setting()
version = ""
with open(setting['VERSION'], "r", encoding='utf8') as version_f:
version = version_f.read().strip()
return version
def get_token(setting: Setting = None, token_type=''):
if not token_type or token_type.upper() not in ['GITHUB', 'GITEE']:
return ()
setting = setting if setting else Setting()
key = token_type.upper() + '_TOKEN'
if exists(setting[key]):
token = ()
with open(setting[key], "r", encoding='utf8') as token_f:
token = token_f.read().strip().split(':')
return (token[0], token[1]) if len(token) == 2 else ()
return ()
def set_logger(setting: Setting, log_enable=True, log_level='DEBUG', log_file=None, log_dir=''):
setting['LOG_ENABLED'] = log_enable
setting['LOG_LEVEL'] = log_level
setting['LOG_FILE'] = log_file
if log_dir:
setting['LOG_DIR'] = log_dir
def config_logging(setting=None):
setting = setting if setting else Setting()
logger = logging.getLogger()
logger.setLevel(setting['LOG_LEVEL'])
formatter = logging.Formatter(setting['LOG_FORMAT'])
if setting['LOG_FILE']:
log_file = logging.FileHandler(join(setting['LOG_DIR'], setting['LOG_FILE']))
log_file.setFormatter(formatter)
console = logging.StreamHandler()
console.setFormatter(formatter)
if setting['LOG_ENABLED']:
if setting['LOG_FILE']:
logger.addHandler(log_file)
logger.addHandler(console)
else:
logger.addHandler(logging.NullHandler())
| 1.585938 | 2 |
models/rcnn.py | alipsgh/deep-mix-nets | 0 | 92548 |
import torch
from models.deep_mix_net import DeepSeqNet
from torch import nn
from torch.nn import functional as F
class RCNN(DeepSeqNet):
def __init__(self, vocab_size, embeddings, embedding_size,
rcnn_num_hidden_layers, rcnn_hidden_size, rcnn_linear_size,
output_dim, dropout_rate, linear_layers_dim, tab_input_dim,
optimizer, learning_rate):
super(RCNN, self).__init__()
self.output_dim = output_dim
self.dropout_rate = dropout_rate
# =========
# R-CNN
# =========
self.vocab_size = vocab_size
self.embedding_size = embedding_size
self.rcnn_num_hidden_layers = rcnn_num_hidden_layers
self.rcnn_hidden_size = rcnn_hidden_size
self.rcnn_linear_size = rcnn_linear_size
# Embedding Layer
self.embeddings = nn.Embedding(vocab_size, self.embedding_size)
self.embeddings.weight = nn.Parameter(embeddings, requires_grad=False)
# BiLSTM
self.lstm = nn.LSTM(input_size=self.embedding_size,
num_layers=self.rcnn_num_hidden_layers,
hidden_size=self.rcnn_hidden_size,
dropout=self.dropout_rate,
bidirectional=True)
# Linear layer to get "convolution output" to be passed to Pooling Layer
self.linear = nn.Sequential(nn.Linear(self.embedding_size + 2 * self.rcnn_hidden_size,
self.rcnn_linear_size), nn.Tanh())
self.dropout = nn.Dropout(self.dropout_rate)
# ==============================
# Feed Forward Neural Networks
# ==============================
self.linear_layers = nn.ModuleList()
self.activation_layer = nn.ReLU()
for i, hidden_dim in enumerate(linear_layers_dim):
if i == 0:
self.linear_layers.append(nn.Linear(tab_input_dim, hidden_dim))
else:
self.linear_layers.append(nn.Linear(self.linear_layers[-1].out_features, hidden_dim))
self.dropout = nn.Dropout(self.dropout_rate)
self.fc = nn.Linear(self.rcnn_linear_size + self.linear_layers[-1].out_features, self.output_dim)
self.softmax = nn.Softmax(dim=1)
self.optimizer, self.scheduler, self.criterion = None, None, None
self._compile(optimizer, learning_rate)
if torch.cuda.is_available():
self.cuda()
def txt_net_forward(self, x_txt):
embedded_sequence = self.embeddings(x_txt)
# >> embedded_sequence: (seq_len, batch_size, embed_size)
embedded_sequence = embedded_sequence.permute(1, 0, 2)
# >> o_n: (seq_len, batch_size, 2 * hidden_size)
o_n, (_, _) = self.lstm(embedded_sequence)
# >> input_features: (batch_size, seq_len, embed_size + 2 * hidden_size)
input_features = torch.cat([o_n, embedded_sequence], 2).permute(1, 0, 2)
# >> linear_output: (batch_size, seq_len, hidden_size_linear)
linear_output = self.linear(input_features)
# >> Reshaping for max_pool
linear_output = linear_output.permute(0, 2, 1)
# >> out_features: (batch_size, hidden_size_linear)
out_features = F.max_pool1d(linear_output, linear_output.shape[2]).squeeze(2)
return out_features
| 2.265625 | 2 |
codeforces/dp动态规划/1300/189A剪带子_错误方向.py | yofn/pyacm | 0 | 92676 | #!/usr/bin/env python3
#https://codeforces.com/problemset/problem/189/A
#n分为a,b,c三个长度; 求最常分法.
#典型DP: 递归/记忆化搜索容易些? 或者用更适合py的构建?
#或许用bfs/BFS? 但和bfs相比,DP只需一层层记录,所以还是更像DP
n,a,b,c = list(map(int,input().split())) #<4000
ss = set([a,b,c])
ii = 0
while min(ss)<n:
l = list(ss)
ss = set([i+j for i in l for j in [a,b,c]])
ii += 1
print(ss)
print(ii)
| 1.617188 | 2 |
model/dataset.py | frankaging/Pragmatic-Color-Generation | 2 | 92804 | <filename>model/dataset.py
#!/usr/bin/env python
# coding: utf-8
from util.color_util import *
# In[70]:
'''
Generate the dataset needed for the model
TODO: Put it in as a inherent of the base pytorch dataset class
'''
import os
import pickle
import numpy as np
import pandas as pd
import torch
import matplotlib.pyplot as plt
plt.rcParams['figure.figsize'] = (4.0, 0.5)
import matplotlib.patches as mpatches
# In[2]:
BASE_DIR = "../munroe"
# ## Load labels, words and color mapping into memory
# In[78]:
# load label to word dictionary
label2words = {}
file_w2l = os.path.join(BASE_DIR, "words_to_labels.txt")
with open(file_w2l,encoding="utf-8") as f:
for line in f:
words, label = line.strip().split(',')
label2words[label] = words
# In[36]:
train, dev, test = load_splits()
# load color map
cdict_train = load_rgb(train)
cdict_dev = load_rgb(dev)
cdict_test = load_rgb(test)
# In[79]:
triple_train = load_triple(cdict_train, label2words)
triple_dev = load_triple(cdict_dev, label2words)
triple_test = load_triple(cdict_test, label2words)
# ## Saving all findings into disk for training use
# In[ ]:
import pickle
pickle.dump( triple_train, open( "../munroe/triple_train.p", "wb" ) )
pickle.dump( triple_dev, open( "../munroe/triple_dev.p", "wb" ) )
pickle.dump( triple_test, open( "../munroe/triple_test.p", "wb" ) )
pickle.dump( cdict_train, open( "../munroe/cdict_train.p", "wb" ) )
pickle.dump( cdict_dev, open( "../munroe/cdict_dev.p", "wb" ) )
pickle.dump( cdict_test, open( "../munroe/cdict_test.p", "wb" ) )
# non-extend version
triple_train_shrink = load_triple(cdict_train, label2words, extend=False)
triple_dev_shrink = load_triple(cdict_dev, label2words, extend=False)
triple_test_shrink = load_triple(cdict_test, label2words, extend=False)
pickle.dump( triple_train_shrink, open( "../munroe/triple_train_reduce.p", "wb" ) )
pickle.dump( triple_dev_shrink, open( "../munroe/triple_dev_reduce.p", "wb" ) )
pickle.dump( triple_test_shrink, open( "../munroe/triple_test_reduce.p", "wb" ) )
| 2.03125 | 2 |
comparator.py | LK-Peng/CNN-based-Cloud-Detection-Methods | 2 | 92932 | <reponame>LK-Peng/CNN-based-Cloud-Detection-Methods
import os
import json
import time
import argparse
import numpy as np
from tqdm import tqdm
from multiprocessing import Pool
from torch.utils.data import DataLoader
from dataloaders.dataset import MaskSet
from utils.metrics import Evaluator, BoundaryEvaluator
class Comparator(object):
def __init__(self, args):
self.args = args
# define dataloader
kwargs = {'num_workers': args.workers, 'pin_memory': True}
dataset = MaskSet(args)
self.mask_loader = DataLoader(dataset, batch_size=args.batch_size, shuffle=False, **kwargs)
# define multiprocess
if args.num_proc:
self.p = Pool(processes=args.num_proc)
else:
self.p = None
# Define Evaluator
self.evaluator = Evaluator(self.args.num_classes)
self.boundaryevaluator_3 = BoundaryEvaluator(self.args.num_classes, self.p, self.args.num_proc, bound_th=3)
self.boundaryevaluator_5 = BoundaryEvaluator(self.args.num_classes, self.p, self.args.num_proc, bound_th=5)
def cal_metric(self):
tbar = tqdm(self.mask_loader, desc='\r')
num_mask = len(self.mask_loader.dataset)
print('numImages: {}'.format(num_mask))
# metric_img = dict()
for i, sample in enumerate(tbar):
gt_mask, pre_mask = sample['gt'].numpy(), sample['pre'].numpy()
self.evaluator.add_batch(gt_mask, pre_mask)
self.boundaryevaluator_3.add_batch(gt_mask, pre_mask)
self.boundaryevaluator_5.add_batch(gt_mask, pre_mask)
metric_dct = {
'PA': self.evaluator.Pixel_Accuracy(),
'MPA': self.evaluator.Pixel_Accuracy_Class(),
'MIoU': self.evaluator.Mean_Intersection_over_Union(),
'FWIoU': self.evaluator.Frequency_Weighted_Intersection_over_Union(),
'Precision': self.evaluator.Precision(),
'Recall': self.evaluator.Recall(),
'F1': self.evaluator.F_score(),
'F_boundary_3': self.boundaryevaluator_3.F_score_boundary().tolist(),
'Pr_boundary_3': self.boundaryevaluator_3.Precision_boundary().tolist(),
'Re_boundary_3': self.boundaryevaluator_3.Recall_boundary().tolist(),
'F_boundary_5': self.boundaryevaluator_5.F_score_boundary().tolist(),
'Pr_boundary_5': self.boundaryevaluator_5.Precision_boundary().tolist(),
'Re_boundary_5': self.boundaryevaluator_5.Recall_boundary().tolist(),
}
with open(self.args.out_file, 'w') as f:
json.dump(metric_dct, f, indent=4)
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Compare two mask')
parser.add_argument('--workers', type=int, default=4,
metavar='N', help='dataloader threads')
parser.add_argument('--batch-size', type=int, default=24,
metavar='N', help='input batch size for comparison (default: auto)')
parser.add_argument('--pre-root', type=str,
default=None,
help='mask root of prediction')
parser.add_argument('--gt-root', type=str,
default='./example/test/Masks',
help='mask root of ground truth')
parser.add_argument('--merge-class', action='store_true', default=True,
help='if merge class in ground truth')
parser.add_argument('--num-classes', type=int, default=2,
help='the number of classes (default:2)')
parser.add_argument('--num-proc', type=int, default=4,
help='the number of processes (default:4)')
parser.add_argument('--selected-file', type=str,
default='./inference/cld_clr_tile_list.json',
help='list of files needed to compute boundary accuracy')
parser.add_argument('--out-file', type=str,
default=None,
help='output file')
args = parser.parse_args()
net_root = {
'DeeplabV3Plus-seed1': './inference/DeeplabV3Plus-seed1',
'DeeplabV3Plus-seed2': './inference/DeeplabV3Plus-seed2',
'DeeplabV3Plus-seed3': './inference/DeeplabV3Plus-seed3',
'DeeplabV3Plus-seed4': './inference/DeeplabV3Plus-seed4',
}
for net in net_root.keys():
args.pre_root = net_root[net]
args.out_file = os.path.join('./inference-mix', net + '.json')
print('prediction: {}'.format(args.pre_root))
print('ground truth: {}'.format(args.gt_root))
start = time.time()
comparator = Comparator(args)
comparator.cal_metric()
comparator.p.close() # 关闭进程池
print('Using {}s!'.format(time.time() - start))
| 2.203125 | 2 |
metrika/commands/counters.py | codex-bot/metrika | 3 | 93060 | <gh_stars>1-10
from .base import CommandBase
class CommandCounters(CommandBase):
async def __call__(self, payload):
counters = self.get_chat_counters(payload['chat'])
if not len(counters):
await self.sdk.send_text_to_chat(payload['chat'], 'Подключенных счетчиков не найдено')
return
users = {}
for counter in counters:
login = counter.get('user_login')
counter_name = counter.get('counter_name')
if login in users:
users[login].append(counter_name)
else:
users[login] = [counter_name]
message = 'Подключенные счетчики:\n\n'
for login in users.keys():
message += '*@{}*\n'.format(login)
for counter in users[login]:
message += '{}\n'.format(counter)
message += '\n'
await self.sdk.send_text_to_chat(payload['chat'], message, parse_mode='Markdown') | 1.757813 | 2 |
src/test-scripts/KfsTestLib.py | chanwit/qfs | 1 | 93188 | <gh_stars>1-10
#!/usr/bin/env python
#
# $Id$
#
# Created 2006
# Author: <NAME> (Kosmix Corp)
#
# Copyright 2006 Kosmix Corp.
#
# This file is part of Kosmos File System (KFS).
#
# Licensed under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied. See the License for the specific language governing
# permissions and limitations under the License.
#
#
# Helper functions for writing KFS test scripts
#
import os,os.path,sys,getopt,time
import threading
from ConfigParser import ConfigParser
class ServerInfo:
def __init__(self, n, p, rd, ra, ty):
self.node = n
self.port = p
self.runDir = rd
self.runArgs = ra
self.serverType = ty
self.state = "stopped"
def Start(self):
if (self.state == "started"):
return
cmd = "ssh %s 'cd %s; scripts/kfsrun.sh -s %s ' " % \
(self.node, self.runDir, self.runArgs)
print cmd
os.system(cmd)
self.state = "started"
def Stop(self):
if (self.state == "stopped"):
return
cmd = "ssh %s 'cd %s; scripts/kfsrun.sh -S %s ' " % \
(self.node, self.runDir, self.runArgs)
print cmd
os.system(cmd)
self.state = "stopped"
def ListChunks(self):
cmd = "ssh %s 'ls %s ' " % (self.node, self.chunkDir)
entries = []
for line in os.popen(cmd):
entries.append(line.strip())
return entries
def CheckLostFound(self, chunkId):
"""Check lost+found directory on chunkserver for chunkId"""
cmd = "ssh %s 'ls %s/lost+found ' " % (self.node, self.chunkDir)
for line in os.popen(cmd):
dirEntry = line.strip()
if (dirEntry == chunkId):
return
raise FileNotFoundException, "Chunk %s not in lost+found" % (chunkId)
def CheckLiveness(self):
"""Check if the node is alive"""
def CorruptChunk(self, chunkId):
"""Corrupt chunk defined by chunkId on a chunkserver"""
filename = "/tmp/%d" % chunkId
cmd = "scp %s:%s/%d %s" % (self.node, self.chunkDir, chunkId, filename)
os.system(cmd)
f = open(filename, 'r+')
f.seek(5)
f.write('@#$%')
f.close()
cmd = "scp %s %s:%s" % (filename, self.node, self.chunkDir)
os.system(cmd)
def Format(self):
"""Delete chunks/log files on this server"""
if (self.serverType == "metaserver"):
cmd = "ssh %s 'rm -f %s/bin/kfscp/* %s/bin/kfslog/*' " % (self.node, self.runDir, self.runDir)
os.system(cmd)
else:
cmd = "ssh %s 'rm -f %s/* %s/bin/kfslog/*' " % (self.node, self.chunkDir, self.runDir)
os.system(cmd)
def __cmp__(self, other):
if (self.node == other.node):
return cmp(self.port, other.port)
return cmp(self.node, other.node)
def __str__(self):
return '[%s] %s:%d' % (self.serverType, self.node, self.port)
metaServer = ""
chunkServers = []
# this is the path to the directory containing test binaries
testBinDir = "."
def loadSetup(filename):
config = ConfigParser()
config.readfp(open(filename, 'r'))
if not config.has_section('metaserver'):
raise config.NoSectionError, "No metaserver section"
sections = config.sections()
# read the meta-server separately
for s in sections:
node = config.get(s, 'node')
rundir = config.get(s, 'rundir')
port = int(config.get(s, 'baseport'))
if (s == 'metaserver'):
runargs = "-m -f bin/MetaServer.prp"
type = "metaserver"
global metaServer
metaServer = ServerInfo(node, port, rundir, runargs, type)
continue
else:
runargs = "-c -f bin/ChunkServer.prp"
type = "chunkserver"
chunkDir = config.get(s, 'chunkDir')
server = ServerInfo(node, port, rundir, runargs, type)
server.chunkDir = chunkDir
global chunkServers
chunkServers.append(server)
chunkServers.sort()
def setTestBinDir(path):
global testBinDir
testBinDir = path
def doWrite(kfsClntPrpFn, fn, numMB):
cmd = "%s/writer_perftest -p %s -m %d -f %s" % (testBinDir, kfsClntPrpFn, numMB, fn)
print "Executing cmd: %s" % cmd
os.system(cmd)
def doRead(kfsClntPrpFn, fn, numMB):
cmd = "%s/reader_perftest -p %s -m %d -f %s" % (testBinDir, kfsClntPrpFn, numMB, fn)
os.system(cmd)
def pruneSetup(numServers):
global chunkServers
if (len(chunkServers) > numServers):
chunkServers = chunkServers[:numServers]
def startServers(numServers):
for i in range(numServers):
chunkServers[i].Start()
def startAllServers():
metaServer.Start()
startServers(len(chunkServers))
def startMetaServer():
metaServer.Start()
def stopAllServers():
metaServer.Stop()
for i in xrange(len(chunkServers)):
chunkServers[i].Stop()
def stopAllChunkServers():
for i in xrange(len(chunkServers)):
chunkServers[i].Stop()
def restartChunkServer(index):
chunkServers[index].Stop()
time.sleep(5)
chunkServers[index].Start()
def formatAllServers():
metaServer.Format()
for i in xrange(len(chunkServers)):
chunkServers[i].Format()
class IOWorker(threading.Thread):
"""Worker thread that reads/writes data from a KFS file"""
def __init__(self, c, f, n, ioF):
threading.Thread.__init__(self)
self.kfsClntPrpFn = c
self.kfsFn = f
self.numMB = n
self.ioFunc = ioF
def run(self):
self.ioFunc(self.kfsClntPrpFn, self.kfsFn, self.numMB)
| 1.453125 | 1 |
assign_2.3.py | tramontana-software/Python_Coding_Carlos | 0 | 93316 | # My Script:
hrs=input('Enter Hours: ')
hrs=float(hrs)
rph=input('Enter your rate per hour: ')
rph=float(rph)
pay=hrs*rph
print('Pay:', pay)
| 1.617188 | 2 |
Examples/asposeimagingcloudexamples/AI/find_similar_images.py | aspose-imaging-cloud/aspose-imaging-cloud-python | 1 | 93444 | <gh_stars>1-10
# coding: utf-8
# ----------------------------------------------------------------------------
# <copyright company="Aspose" file="find_similar_images.py">
# Copyright (c) 2019 Aspose Pty Ltd. All rights reserved.
# </copyright>
# <summary>
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
# </summary>
# ----------------------------------------------------------------------------
import json
import os
import six
if six.PY2:
import urllib as urllib
else:
import urllib.parse as urllib
import requests as req
import asposeimagingcloud.models.requests as requests
from asposeimagingcloudexamples.AI.imaging_ai_base import ImagingAiBase
class FindSimilarImages(ImagingAiBase):
"""Find similar images example"""
def __init__(self, imaging_api):
ImagingAiBase.__init__(self, imaging_api)
self._print_header('Find similar images example:')
self.__image_to_find = '4.jpg'
self.__image_to_find_by_tag = 'ComparingImageSimilar75.jpg'
self.__images_path = 'FindSimilar'
def prepare_search_context(self):
"""Prepares the search context"""
self._create_search_context()
# Upload images to Cloud Storage
for image_name in ['1.jpg', '2.jpg', '3.jpg', '4.jpg', '5.jpg', '6.jpg', '7.jpg', '8.jpg', '9.jpg', '10.jpg']:
# Upload local image to Cloud Storage
self._upload_image_to_cloud(os.path.join(self.__images_path, image_name))
self._create_image_features(self.__images_path, True)
print()
def find_similar_images(self):
"""Finds the similar images"""
print('Finds the similar images:')
find_image_id = os.path.join(self.CLOUD_PATH, self.__images_path, self.__image_to_find)
similarity_threshold = 60
max_count = 3
folder = None
storage = None # We are using default Cloud Storage
request = requests.FindSimilarImagesRequest(self._search_context_id, similarity_threshold, max_count,
image_id=find_image_id, folder=folder, storage=storage)
print("Call FindSimilarImages with params: similarity threshold: {0}, max count: {1}, image id: {2}".format(
similarity_threshold, max_count, find_image_id))
response = self._imaging_api.find_similar_images(request)
print('Results count: ' + str(len(response.results)))
print()
def find_images_by_tag(self):
"""Finds the images by tag"""
print('Finds the images by tag:')
file_name = self.__image_to_find_by_tag
tag_name = 'ImageTag'
similarity_threshold = 60
max_count = 5
folder = ImagingAiBase.CLOUD_PATH # Folder with image to process
storage = None # We are using default Cloud Storage
input_stream = os.path.join(ImagingAiBase.EXAMPLE_IMAGES_FOLDER, file_name)
create_tag_request = requests.CreateImageTagRequest(input_stream, self._search_context_id, tag_name, folder,
storage)
print('Call CreateImageTag with params: tag name: ' + tag_name)
self._imaging_api.create_image_tag(create_tag_request)
tags = json.dumps([tag_name])
find_request = requests.FindImagesByTagsRequest(tags, self._search_context_id, similarity_threshold,
max_count, folder, storage)
print("Call FindImagesByTags with params: similarity threshold: {0}, max count: {1}, tags: {2}".format(
similarity_threshold, max_count, tags))
find_response = self._imaging_api.find_images_by_tags(find_request)
for find_result in find_response.results:
print('Image name: ' + find_result.image_id + ', similarity: ' + str(find_result.similarity))
print()
def search_image_from_web_source(self):
"""Finds the similar images from the URL source"""
print('Finds similar images from URL:')
similarity_threshold = 30.0
max_count = 3
folder = ImagingAiBase.CLOUD_PATH # Folder with image to process
storage = None # We are using default Cloud Storage
# Add images from the website to the search context
image_source_url = urllib.quote_plus('https://www.f1news.ru/interview/hamilton/140909.shtml')
self._imaging_api.create_web_site_image_features(
requests.CreateWebSiteImageFeaturesRequest(self._search_context_id, image_source_url, folder, storage))
self._wait_idle(self._search_context_id)
# Download the image from the website
image_data = req.get('https://cdn.f1ne.ws/userfiles/hamilton/140909.jpg')
path = os.path.abspath(os.path.join(ImagingAiBase.OUTPUT_FOLDER, 'WebSearchSample.jpg'))
with open(path, "wb") as f:
f.write(image_data.content)
# Resize downloaded image to demonstrate search engine capabilities
resized_image = self._imaging_api.create_resized_image(requests.CreateResizedImageRequest(
path, 600, 400, "jpg", storage=storage))
# Upload image to cloud
image_name = 'ReverseSearch.jpg'
self._imaging_api.upload_file(requests.UploadFileRequest(ImagingAiBase.CLOUD_PATH + "/" + image_name,
resized_image, storage))
# Find similar images in the search context
find_response = self._imaging_api.find_similar_images(
requests.FindSimilarImagesRequest(self._search_context_id, similarity_threshold, max_count,
image_id=ImagingAiBase.CLOUD_PATH + "/" + image_name,
folder=folder, storage=storage))
print('Similar images found: ' + str(len(find_response.results)))
print('Similar image id: ' + find_response.results[0].image_id)
print('Similarity: ' + str(find_response.results[0].similarity))
| 1.476563 | 1 |
src/methods/learning_rate_schedules.py | otiliastr/brain_task_effect | 4 | 93572 | import numpy as np
import tensorflow as tf
def get_lr_schedule(lr_decay_rate=None,
lr_decay_step=None,
lr_decay_per_iter=True,
lr_decay_start_step=0,
lr_decay_end_step=np.inf,
lr_warmup_init=1e-9,
lr_warmup_final=None,
lr_warmup_start_step=0,
lr_warmup_end_step=np.inf,
lr_warmup_per_iter=True):
schedules = []
if lr_warmup_final and lr_warmup_end_step is not None:
lr_schedule = get_lr_warmup_schedule(
lr_final=lr_warmup_final,
schedule_end_step=lr_warmup_end_step,
schedule_start_step=lr_warmup_start_step,
lr_init=lr_warmup_init)
lr_scheduler = LearningRateScheduler(
lr_schedule,
iteration_based=lr_warmup_per_iter,
verbose=False)
schedules.append(lr_scheduler)
if lr_decay_rate is not None and lr_decay_step is not None:
lr_schedule = get_lr_decay_schedule(
lr_decay_step=lr_decay_step,
lr_decay_rate=lr_decay_rate,
schedule_start_step=lr_decay_start_step,
schedule_end_step=lr_decay_end_step)
lr_scheduler = LearningRateScheduler(
lr_schedule,
iteration_based=lr_decay_per_iter,
verbose=False)
schedules.append(lr_scheduler)
return schedules
def get_lr_decay_schedule(lr_decay_step, lr_decay_rate=0.9, schedule_start_step=0,
schedule_end_step=np.inf):
"""Creates a learning rate decay schedule.
Learning rate is reduced every 10 epochs to decay * original value.
Arguments:
lr_decay_step: An integer representing the number of steps after which to decay learning
rate (i.e. the learning rate is decayed every `lr_decay_step` steps.
lr_decay_rate: A float representing the factor with which to multiply the current learning
rate. Default: 0.9.
schedule_start_step: An integer representing the step (either iteration or epoch) after
which to start applying the decay.
schedule_end_step: An integer representing the step (either iteration or epoch) after
which we stop applying the decay.
Returns
lr: A float representing the learning rate.
"""
if schedule_end_step is None:
schedule_end_step = np.inf
if schedule_start_step is None:
schedule_start_step = 0
assert schedule_start_step <= schedule_end_step
def lr_schedule(step, lr):
if schedule_start_step <= step <= schedule_end_step and (step + 1) % lr_decay_step == 0:
lr = lr * lr_decay_rate
# print('Step ', step, ' lr=', lr)
return lr
return lr_schedule
def get_lr_warmup_schedule(lr_final, schedule_end_step, schedule_start_step=0, lr_init=1e-10):
"""Creates a learning rate warm-up schedule.
The learning rate is increased linearly during the steps.
Arguments:
lr_final: A float representing the value of the learning rate at the end of the warmup.
schedule_start_step: An integer representing the step (either iteration or epoch) after
which to start applying the warmupi.
schedule_end_step: An integer representing the step (either iteration or epoch) after
which we stop applying the warmup, and the learning rate reaches its final value
`lr_final`.
lr_init: A float representing the value of the learning rate at the beginning of the
warm-up.
Returns
lr: A float representing the learning rate.
"""
total_steps = schedule_end_step - schedule_start_step
total_lr_diff = lr_final - lr_init
def lr_schedule(step, lr):
if schedule_start_step <= step < schedule_end_step:
lr = lr_init + total_lr_diff * step / total_steps
# print('Step ', step, ' lr=', lr)
return lr
return lr_schedule
class LearningRateScheduler(tf.keras.callbacks.Callback):
"""Learning rate scheduler.
Arguments
schedule: a function that takes an epoch index as input
(integer, indexed from 0) and current learning rate
and returns a new learning rate as output (float).
verbose: int. 0: quiet, 1: update messages.
"""
def __init__(self, schedule, verbose=0, model=None, iteration_based=True):
super(LearningRateScheduler, self).__init__()
self.schedule = schedule
self.verbose = verbose
self.model = model
self.iteration_based = iteration_based
def _update_lr(self, step):
lr = self.model.optimizer._lr if hasattr(self.model.optimizer, '_lr') else \
self.model.optimizer._learning_rate
try: # new API
lr = self.schedule(step, lr)
except TypeError: # old API for backward compatibility
lr = self.schedule(step)
if not isinstance(lr, (float, np.float32, np.float64)):
raise ValueError('The output of the "schedule" function should be float.')
if hasattr(self.model.optimizer, '_lr'):
self.model.optimizer._lr = lr
else:
self.model.optimizer._learning_rate = lr
if self.verbose > 0:
print('\nStep %05d: LearningRateScheduler setting learning '
'rate to %s.' % (step + 1, lr))
def on_epoch_begin(self, epoch, logs=None, **kwargs):
if not self.iteration_based:
self._update_lr(epoch)
def on_epoch_end(self, epoch, logs=None):
if not self.iteration_based:
logs = logs or {}
logs['lr'] = self.model.optimizer._lr if hasattr(self.model.optimizer, '_lr') else \
self.model.optimizer._learning_rate
def on_batch_begin(self, batch, logs):
if self.iteration_based:
self._update_lr(logs['iter'])
def on_batch_end(self, logs=None, **kwargs):
if self.iteration_based:
logs = logs or {}
logs['lr'] = self.model.optimizer._lr if hasattr(self.model.optimizer, '_lr') else \
self.model.optimizer._learning_rate
class FakeModel(object):
def __init__(self, optimizer=None):
self.optimizer = optimizer
| 1.84375 | 2 |
Mathematics/Exponentiation.py | charlie219/CSES-Solutions | 0 | 93700 | n=int(input())
while n:
n-=1
print(pow(*map(int,input().split()),10**9+7))
| 1.710938 | 2 |
tests/test_all.py | waadnakhleh/pythonformatter | 0 | 93828 | import filecmp
import os
import pathlib
import pytest
from lib import _rewrite
from _exceptions import NoSolutionError
import main
def confirm(output):
compare_to = "modified_file.py"
try:
assert filecmp.cmp(output, compare_to)
except AssertionError as e:
with open(compare_to) as f:
if not os.path.isdir("logs"):
os.mkdir("logs")
lines = f.readlines()
lines = [l for l in lines]
with open(
f"logs/log_{output[:len(output)-len('/output.py')]}.py", "w"
) as f1:
f1.writelines(lines)
raise e
finally:
open(compare_to, "w").close() # Empty file
def make_test(
input_file,
output_file,
max_line=88,
space_between_arguments=False,
multiple_imports=False,
vertical_definition_lines=2,
nested_lines=1,
):
input_file = pathlib.Path(__file__).parent.absolute().joinpath(input_file)
output_file = pathlib.Path(__file__).parent.absolute().joinpath(output_file)
args = (
"--target-file",
input_file,
"--max-line",
max_line,
"--vertical-definition-lines",
vertical_definition_lines,
"--nested-lines",
nested_lines,
)
if space_between_arguments:
args = args + ("--space-between-arguments",)
if multiple_imports:
args = args + ("--multiple-imports",)
main.main(*args)
confirm(output_file)
_rewrite.file = open("modified_file.py", "a")
def test_syntax_error():
with pytest.raises(SyntaxError):
input_file = "syntax_error/file.py"
input_file = pathlib.Path(__file__).parent.absolute().joinpath(input_file)
main.main("--target-file", input_file)
def test_import():
input_file, output_file = "test_import/input.py", "test_import/output.py"
make_test(input_file, output_file)
def test_from_import():
input_file, output_file = "test_from_import/input.py", "test_from_import/output.py"
make_test(input_file, output_file)
def test_constant():
# TODO: fix bug, binary and hex values change to decimal.
input_file, output_file = "test_constant/input.py", "test_constant/output.py"
make_test(input_file, output_file)
def test_unaryop():
input_file, output_file = "test_unaryop/input.py", "test_unaryop/output.py"
make_test(input_file, output_file)
def test_name():
input_file, output_file = "test_name/input.py", "test_name/output.py"
make_test(input_file, output_file)
def test_boolop():
input_file, output_file = "test_boolop/input.py", "test_boolop/output.py"
make_test(input_file, output_file)
def test_list():
input_file, output_file = "test_list/input.py", "test_list/output.py"
make_test(input_file, output_file)
def test_tuple():
input_file, output_file = "test_tuple/input.py", "test_tuple/output.py"
make_test(input_file, output_file)
def test_pass():
input_file, output_file = "test_pass/input.py", "test_pass/output.py"
make_test(input_file, output_file)
def test_assignment():
input_file, output_file = "test_assignment/input.py", "test_assignment/output.py"
make_test(input_file, output_file)
def test_binop():
input_file, output_file = "test_binop/input.py", "test_binop/output.py"
make_test(input_file, output_file)
def test_namedexpr():
input_file, output_file = "test_namedexpr/input.py", "test_namedexpr/output.py"
make_test(input_file, output_file)
def test_compare():
input_file, output_file = "test_compare/input.py", "test_compare/output.py"
make_test(input_file, output_file)
def test_assert():
input_file, output_file = "test_assert/input.py", "test_assert/output.py"
make_test(input_file, output_file)
def test_if():
input_file, output_file = "test_if/input.py", "test_if/output.py"
make_test(input_file, output_file)
def test_while():
input_file, output_file = "test_while/input.py", "test_while/output.py"
make_test(input_file, output_file)
def test_break():
input_file, output_file = "test_break/input.py", "test_break/output.py"
make_test(input_file, output_file)
def test_continue():
input_file, output_file = "test_continue/input.py", "test_continue/output.py"
make_test(input_file, output_file)
def test_return():
input_file, output_file = "test_return/input.py", "test_return/output.py"
make_test(input_file, output_file)
def test_call():
input_file, output_file = "test_call/input.py", "test_call/output.py"
make_test(input_file, output_file)
def test_functiondef():
input_file, output_file = "test_functiondef/input.py", "test_functiondef/output.py"
make_test(input_file, output_file)
def test_for():
input_file, output_file = "test_for/input.py", "test_for/output.py"
make_test(input_file, output_file)
def test_augassign():
input_file, output_file = "test_augassign/input.py", "test_augassign/output.py"
make_test(input_file, output_file)
def test_classdef():
input_file, output_file = "test_classdef/input.py", "test_classdef/output.py"
make_test(input_file, output_file)
def test_with():
input_file, output_file = "test_with/input.py", "test_with/output.py"
make_test(input_file, output_file)
def test_delete():
input_file, output_file = "test_delete/input.py", "test_delete/output.py"
make_test(input_file, output_file)
def test_attribute():
input_file, output_file = "test_attribute/input.py", "test_attribute/output.py"
make_test(input_file, output_file)
def test_try():
input_file, output_file = "test_try/input.py", "test_try/output.py"
make_test(input_file, output_file)
def test_raise():
input_file, output_file = "test_raise/input.py", "test_raise/output.py"
make_test(input_file, output_file)
def test_global():
input_file, output_file = "test_global/input.py", "test_global/output.py"
make_test(input_file, output_file)
def test_nonlocal():
input_file, output_file = "test_nonlocal/input.py", "test_nonlocal/output.py"
make_test(input_file, output_file)
def test_subscript():
input_file, output_file = "test_subscript/input.py", "test_subscript/output.py"
make_test(input_file, output_file)
def test_listcomp():
input_file, output_file = "test_listcomp/input.py", "test_listcomp/output.py"
make_test(input_file, output_file)
def test_docstring():
input_file, output_file = "test_docstring/input.py", "test_docstring/output.py"
make_test(input_file, output_file)
def test_ifexpr():
input_file, output_file = "test_ifexpr/input.py", "test_ifexpr/output.py"
make_test(input_file, output_file)
def test_dict():
input_file, output_file = "test_dict/input.py", "test_dict/output.py"
make_test(input_file, output_file)
def test_general():
input_file, output_file = "test_general/input.py", "test_general/output.py"
make_test(input_file, output_file)
def test_command_line_args():
input_file, output_file = (
"test_command_line_args/input.py",
"test_command_line_args/output.py",
)
make_test(input_file, output_file, max_line=100)
def test_bad_arguments():
with pytest.raises(ValueError, match="unknown argument --unsupported-argument"):
main.main("--target-file", "input_file", "--unsupported-argument", "")
def test_bad_max_line_length():
input_file, output_file = (
"test_command_line_args/input.py",
"test_command_line_args/output.py",
)
with pytest.raises(NoSolutionError, match="check maximum line length"):
make_test(input_file, output_file, max_line=30)
def test_space_arguments():
input_file, output_file = (
"test_space_arguments/input.py",
"test_space_arguments/output.py",
)
make_test(input_file, output_file, max_line=100, space_between_arguments=True)
def test_multiple_imports():
input_file, output_file = (
"test_multiple_imports/input.py",
"test_multiple_imports/output.py",
)
make_test(input_file, output_file, multiple_imports=True)
def test_vertical_definition_lines():
input_file, output_file = (
"test_vertical_definition_lines/input.py",
"test_vertical_definition_lines/output.py",
)
make_test(input_file, output_file, vertical_definition_lines=3)
def test_nested_lines():
input_file, output_file = (
"test_nested_lines/input.py",
"test_nested_lines/output.py",
)
make_test(input_file, output_file, nested_lines=3)
| 1.570313 | 2 |
tests/apps/compute/calc_test.py | item4/yui | 36 | 93956 | import asyncio
import math
from concurrent.futures.process import ProcessPoolExecutor
from datetime import date
from datetime import datetime
import pytest
from yui.apps.compute.calc import BadSyntax
from yui.apps.compute.calc import Decimal as D
from yui.apps.compute.calc import Evaluator
from yui.apps.compute.calc import calculate
from ...util import FakeBot
class GetItemSpy:
def __init__(self):
self.queue = []
def __getitem__(self, item):
self.queue.append(item)
def test_decimal():
assert -D('1') == D('-1')
assert +D('1') == D('1')
assert abs(D('-1')) == D('1')
assert D('1') + 1 == D('2')
assert 1 + D('1') == D('2')
assert D('1') - 1 == D('0')
assert 1 - D('1') == D('0')
assert D('2') * 3 == D('6')
assert 2 * D('3') == D('6')
assert D('10') // 2 == D('5')
assert 10 // D('2') == D('5')
assert D('10') / 2.5 == D('4')
assert 10 / D('2.5') == D('4')
assert D('5') % 2 == D('1')
assert 5 % D('2') == D('1')
assert divmod(D('5'), 2) == (D('2'), D('1'))
assert divmod(5, D('2')) == (D('2'), D('1'))
assert D('3') ** 2 == D('9')
assert 3 ** D('2') == D('9')
def test_annassign():
e = Evaluator()
err = 'You can not use annotation syntax'
with pytest.raises(BadSyntax, match=err):
e.run('a: int = 10')
assert 'a' not in e.symbol_table
def test_assert():
e = Evaluator()
err = 'You can not use assertion syntax'
with pytest.raises(BadSyntax, match=err):
e.run('assert True')
with pytest.raises(BadSyntax, match=err):
e.run('assert False')
def test_assign():
e = Evaluator()
e.run('a = 1 + 2')
assert e.symbol_table['a'] == 3
e.run('x, y = 10, 20')
assert e.symbol_table['x'] == 10
assert e.symbol_table['y'] == 20
e.symbol_table['dt'] = datetime.now()
err = 'This assign method is not allowed'
with pytest.raises(BadSyntax, match=err):
e.run('dt.year = 2000')
err = 'too many values to unpack'
with pytest.raises(ValueError, match=err):
e.run('year, month, day = 1,')
err = 'not enough values to unpack'
with pytest.raises(ValueError, match=err):
e.run('id, name = 1, "kirito", "black"')
err = 'cannot unpack non-iterable int object'
with pytest.raises(TypeError, match=err):
e.run('year, month, day = 1')
e.run('arr = [1, 2, 3]')
assert e.symbol_table['arr'] == [1, 2, 3]
e.run('arr[1] = 5')
assert e.symbol_table['arr'] == [1, 5, 3]
e.run('arr[:] = [10, 20, 30]')
assert e.symbol_table['arr'] == [10, 20, 30]
def test_asyncfor():
e = Evaluator()
e.symbol_table['r'] = 0
err = 'You can not use `async for` loop syntax'
with pytest.raises(BadSyntax, match=err):
e.run(
'''
async for x in [1, 2, 3, 4]:
r += x
'''
)
assert e.symbol_table['r'] == 0
def test_asyncfunctiondef():
e = Evaluator()
err = 'Defining new coroutine via def syntax is not allowed'
with pytest.raises(BadSyntax, match=err):
e.run(
'''
async def abc():
pass
'''
)
assert 'abc' not in e.symbol_table
def test_asyncwith():
e = Evaluator()
e.symbol_table['r'] = 0
err = 'You can not use `async with` syntax'
with pytest.raises(BadSyntax, match=err):
e.run(
'''
async with x():
r += 100
'''
)
assert e.symbol_table['r'] == 0
def test_attribute():
e = Evaluator()
e.symbol_table['dt'] = datetime.now()
e.run('x = dt.year')
assert e.symbol_table['x'] == e.symbol_table['dt'].year
err = 'You can not access `test_test_test` attribute'
with pytest.raises(BadSyntax, match=err):
e.run('y = dt.test_test_test')
assert 'y' not in e.symbol_table
err = 'You can not access `asdf` attribute'
with pytest.raises(BadSyntax, match=err):
e.run('z = x.asdf')
e.symbol_table['math'] = math
err = 'You can not access `__module__` attribute'
with pytest.raises(BadSyntax, match=err):
e.run('math.__module__')
e.symbol_table['datetime'] = datetime
err = 'You can not access `test_test` attribute'
with pytest.raises(BadSyntax, match=err):
e.run('datetime.test_test')
def test_augassign():
e = Evaluator()
e.symbol_table['a'] = 0
e.run('a += 1')
assert e.symbol_table['a'] == 1
e.symbol_table['l'] = [1, 2, 3, 4]
e.run('l[0] -= 1')
assert e.symbol_table['l'] == [0, 2, 3, 4]
err = 'This assign method is not allowed'
with pytest.raises(BadSyntax, match=err):
e.run('l[2:3] += 20')
e.symbol_table['dt'] = datetime.now()
err = 'This assign method is not allowed'
with pytest.raises(BadSyntax, match=err):
e.run('dt.year += 2000')
def test_await():
e = Evaluator()
err = 'You can not await anything'
with pytest.raises(BadSyntax, match=err):
e.run('r = await x()')
assert 'r' not in e.symbol_table
def test_binop():
e = Evaluator()
assert e.run('1 + 2') == 1 + 2
assert e.run('3 & 2') == 3 & 2
assert e.run('1 | 2') == 1 | 2
assert e.run('3 ^ 2') == 3 ^ 2
assert e.run('3 / 2') == 3 / 2
assert e.run('3 // 2') == 3 // 2
assert e.run('3 << 2') == 3 << 2
with pytest.raises(TypeError):
e.run('2 @ 3')
assert e.run('3 * 2') == 3 * 2
assert e.run('33 % 4') == 33 % 4
assert e.run('3 ** 2') == 3 ** 2
assert e.run('100 >> 2') == 100 >> 2
assert e.run('3 - 1') == 3 - 1
def test_boolop():
e = Evaluator()
assert e.run('True and False') == (True and False)
assert e.run('True or False') == (True or False)
def test_break():
e = Evaluator()
e.run('break')
assert e.current_interrupt.__class__.__name__ == 'Break'
def test_bytes():
e = Evaluator()
assert e.run('b"asdf"') == b'asdf'
e.run('a = b"asdf"')
assert e.symbol_table['a'] == b'asdf'
def test_call():
e = Evaluator()
e.symbol_table['date'] = date
e.run('x = date(2019, 10, day=7)')
assert e.symbol_table['x'] == date(2019, 10, day=7)
e.symbol_table['math'] = math
e.run('y = math.sqrt(121)')
assert e.symbol_table['y'] == math.sqrt(121)
e.symbol_table['datetime'] = datetime
e.run('z = datetime.now().date()')
assert e.symbol_table['z'] == datetime.now().date()
def test_classdef():
e = Evaluator()
err = 'Defining new class via def syntax is not allowed'
with pytest.raises(BadSyntax, match=err):
e.run(
'''
class ABCD:
pass
'''
)
assert 'ABCD' not in e.symbol_table
def test_compare():
e = Evaluator()
assert e.run('1 == 2') == (1 == 2)
assert e.run('3 > 2') == (3 > 2)
assert e.run('3 >= 2') == (3 >= 2)
assert e.run('"A" in "America"') == ('A' in 'America')
assert e.run('"E" not in "America"') == ('E' not in 'America')
assert e.run('1 is 2') == (1 is 2) # noqa
assert e.run('1 is not 2') == (1 is not 2) # noqa
assert e.run('3 < 2') == (3 < 2)
assert e.run('3 <= 2') == (3 <= 2)
def test_continue():
e = Evaluator()
e.run('continue')
assert e.current_interrupt.__class__.__name__ == 'Continue'
def test_delete():
e = Evaluator()
e.symbol_table['a'] = 0
e.symbol_table['b'] = 0
e.symbol_table['c'] = 0
e.run('del a, b, c')
assert 'a' not in e.symbol_table
assert 'b' not in e.symbol_table
assert 'c' not in e.symbol_table
e.symbol_table['l'] = [1, 2, 3, 4]
e.run('del l[0]')
assert e.symbol_table['l'] == [2, 3, 4]
err = 'This delete method is not allowed'
with pytest.raises(BadSyntax, match=err):
e.run('del l[2:3]')
e.symbol_table['dt'] = datetime.now()
err = 'This delete method is not allowed'
with pytest.raises(BadSyntax, match=err):
e.run('del dt.year')
def test_dict():
e = Evaluator()
assert e.run('{1: 111, 2: 222}') == {1: 111, 2: 222}
e.run('a = {1: 111, 2: 222}')
assert e.symbol_table['a'] == {1: 111, 2: 222}
def test_dictcomp():
e = Evaluator()
assert e.run('{k+1: v**2 for k, v in {1: 1, 2: 11, 3: 111}.items()}') == {
2: 1,
3: 121,
4: 12321,
}
assert 'k' not in e.symbol_table
assert 'v' not in e.symbol_table
e.run('a = {k+1: v**2 for k, v in {1: 1, 2: 11, 3: 111}.items()}')
assert e.symbol_table['a'] == {
2: 1,
3: 121,
4: 12321,
}
assert 'k' not in e.symbol_table
assert 'v' not in e.symbol_table
def test_ellipsis():
e = Evaluator()
assert e.run('...') == Ellipsis
def test_expr():
e = Evaluator()
assert e.run('True') is True
assert e.run('False') is False
assert e.run('None') is None
assert e.run('123') == 123
assert e.run('"abc"') == 'abc'
assert e.run('[1, 2, 3]') == [1, 2, 3]
assert e.run('(1, 2, 3, 3)') == (1, 2, 3, 3)
assert e.run('{1, 2, 3, 3}') == {1, 2, 3}
assert e.run('{1: 111, 2: 222}') == {1: 111, 2: 222}
def test_functiondef():
e = Evaluator()
err = 'Defining new function via def syntax is not allowed'
with pytest.raises(BadSyntax, match=err):
e.run(
'''
def abc():
pass
'''
)
assert 'abc' not in e.symbol_table
def test_for():
total = 0
for x in [1, 2, 3, 4, 5, 6]:
total = total + x
if total > 10:
continue
total = total * 2
else:
total = total + 10000
e = Evaluator()
e.run(
'''
total = 0
for x in [1, 2, 3, 4, 5, 6]:
total = total + x
if total > 10:
continue
total = total * 2
else:
total = total + 10000
'''
)
assert e.symbol_table['total'] == total
total2 = 0
for x in [1, 2, 3, 4, 5, 6]:
total2 = total2 + x
if total2 > 10:
break
total2 = total2 * 2
else:
total2 = total2 + 10000
e.run(
'''
total2 = 0
for x in [1, 2, 3, 4, 5, 6]:
total2 = total2 + x
if total2 > 10:
break
total2 = total2 * 2
else:
total2 = total2 + 10000
'''
)
assert e.symbol_table['total2'] == total2
def test_formattedvalue():
e = Evaluator()
e.symbol_table['before'] = 123456
e.run('after = f"change {before} to {before:,}!"')
assert e.symbol_table['after'] == 'change 123456 to 123,456!'
def test_generator_exp():
e = Evaluator()
e.symbol_table['r'] = [1, 2, 3]
err = 'Defining new generator expression is not allowed'
with pytest.raises(BadSyntax, match=err):
e.run('x = (i ** 2 for i in r)')
assert 'x' not in e.symbol_table
def test_global():
e = Evaluator()
err = 'You can not use `global` syntax'
with pytest.raises(BadSyntax, match=err):
e.run('global x')
def test_if():
e = Evaluator()
e.symbol_table['a'] = 1
e.run(
'''
if a == 1:
a = 2
b = 3
'''
)
assert e.symbol_table['a'] == 2
assert e.symbol_table['b'] == 3
e.run(
'''
if a == 1:
a = 2
b = 3
z = 1
else:
a = 3
b = 4
c = 5
'''
)
assert e.symbol_table['a'] == 3
assert e.symbol_table['b'] == 4
assert e.symbol_table['c'] == 5
assert 'z' not in e.symbol_table
e.run(
'''
if a == 1:
a = 2
b = 3
z = 1
elif a == 3:
d = 4
e = 5
f = 6
else:
a = 3
b = 4
c = 5
y = 7
'''
)
assert e.symbol_table['a'] == 3
assert e.symbol_table['b'] == 4
assert e.symbol_table['c'] == 5
assert e.symbol_table['d'] == 4
assert e.symbol_table['e'] == 5
assert e.symbol_table['f'] == 6
assert 'y' not in e.symbol_table
assert 'z' not in e.symbol_table
def test_ifexp():
e = Evaluator()
assert e.run('100 if 1 == 1 else 200') == 100
assert e.run('100 if 1 == 2 else 200') == 200
def test_import():
e = Evaluator()
err = 'You can not import anything'
with pytest.raises(BadSyntax, match=err):
e.run('import sys')
assert 'sys' not in e.symbol_table
def test_importfrom():
e = Evaluator()
err = 'You can not import anything'
with pytest.raises(BadSyntax, match=err):
e.run('from os import path')
assert 'path' not in e.symbol_table
def test_lambda():
e = Evaluator()
err = 'Defining new function via lambda syntax is not allowed'
with pytest.raises(BadSyntax, match=err):
e.run('lambda x: x*2')
def test_list():
e = Evaluator()
assert e.run('[1, 2, 3]') == [1, 2, 3]
e.run('a = [1, 2, 3]')
assert e.symbol_table['a'] == [1, 2, 3]
def test_listcomp():
e = Evaluator()
assert e.run('[x ** 2 for x in [1, 2, 3]]') == [1, 4, 9]
assert 'x' not in e.symbol_table
assert e.run('[x ** 2 + y for x in [1, 2, 3] for y in [10, 20, 30]]') == (
[x ** 2 + y for x in [1, 2, 3] for y in [10, 20, 30]]
)
assert 'x' not in e.symbol_table
assert 'y' not in e.symbol_table
assert e.run('[y ** 2 for x in [1, 2, 3] for y in [x+1, x+3, x+5]]') == (
[y ** 2 for x in [1, 2, 3] for y in [x + 1, x + 3, x + 5]]
)
assert 'x' not in e.symbol_table
assert 'y' not in e.symbol_table
def test_nameconstant():
e = Evaluator()
assert e.run('True') is True
assert e.run('False') is False
assert e.run('None') is None
e.run('x = True')
e.run('y = False')
e.run('z = None')
assert e.symbol_table['x'] is True
assert e.symbol_table['y'] is False
assert e.symbol_table['z'] is None
def test_nonlocal():
e = Evaluator()
err = 'You can not use `nonlocal` syntax'
with pytest.raises(BadSyntax, match=err):
e.run('nonlocal x')
def test_num():
e = Evaluator()
assert e.run('123') == 123
e.run('a = 123')
assert e.symbol_table['a'] == 123
def test_pass():
e = Evaluator()
e.run('pass')
def test_raise():
e = Evaluator()
err = 'You can not use `raise` syntax'
with pytest.raises(BadSyntax, match=err):
e.run('raise NameError')
def test_return():
e = Evaluator()
err = 'You can not use `return` syntax'
with pytest.raises(BadSyntax, match=err):
e.run('return True')
def test_set():
e = Evaluator()
assert e.run('{1, 1, 2, 3, 3}') == {1, 2, 3}
e.run('a = {1, 1, 2, 3, 3}')
assert e.symbol_table['a'] == {1, 2, 3}
def test_setcomp():
e = Evaluator()
assert e.run('{x ** 2 for x in [1, 2, 3, 3]}') == {1, 4, 9}
assert 'x' not in e.symbol_table
assert e.run('{x ** 2 + y for x in [1, 2, 3] for y in [10, 20, 30]}') == (
{x ** 2 + y for x in [1, 2, 3] for y in [10, 20, 30]}
)
assert 'x' not in e.symbol_table
assert 'y' not in e.symbol_table
assert e.run('{y ** 2 for x in [1, 2, 3] for y in [x+1, x+3, x+5]}') == (
{y ** 2 for x in [1, 2, 3] for y in [x + 1, x + 3, x + 5]}
)
assert 'x' not in e.symbol_table
assert 'y' not in e.symbol_table
def test_slice():
e = Evaluator()
e.symbol_table['obj'] = GetItemSpy()
e.run('obj[10:20:3]')
s = e.symbol_table['obj'].queue.pop()
assert isinstance(s, slice)
assert s.start == 10
assert s.stop == 20
assert s.step == 3
def test_str():
e = Evaluator()
assert e.run('"asdf"') == 'asdf'
e.run('a = "asdf"')
assert e.symbol_table['a'] == 'asdf'
def test_subscript():
e = Evaluator()
assert e.run('[10, 20, 30][0]') == 10
assert e.run('(100, 200, 300)[0]') == 100
assert e.run('{"a": 1000, "b": 2000, "c": 3000}["a"]') == 1000
e.run('a = [10, 20, 30][0]')
e.run('b = (100, 200, 300)[0]')
e.run('c = {"a": 1000, "b": 2000, "c": 3000}["a"]')
assert e.symbol_table['a'] == 10
assert e.symbol_table['b'] == 100
assert e.symbol_table['c'] == 1000
e.symbol_table['l'] = [11, 22, 33]
assert e.run('l[2]') == 33
e.run('l[2] = 44')
assert e.symbol_table['l'] == [11, 22, 44]
def test_try():
e = Evaluator()
err = 'You can not use `try` syntax'
with pytest.raises(BadSyntax, match=err):
e.run(
'''
try:
x = 1
except:
pass
'''
)
assert 'x' not in e.symbol_table
def test_tuple():
e = Evaluator()
assert e.run('(1, 1, 2, 3, 3)') == (1, 1, 2, 3, 3)
e.run('a = (1, 1, 2, 3, 3)')
assert e.symbol_table['a'] == (1, 1, 2, 3, 3)
def test_unaryop():
e = Evaluator()
assert e.run('~100') == ~100
assert e.run('not 100') == (not 100)
assert e.run('+100') == +100
assert e.run('-100') == -100
def test_while():
total = 0
i = 1
while total > 100:
total += i
i += i
if i % 10 == 0:
i += 1
else:
total = total + 10000
e = Evaluator()
e.run(
'''
total = 0
i = 1
while total > 100:
total += i
i += i
if i % 10 == 0:
i += 1
else:
total = total + 10000
'''
)
assert e.symbol_table['total'] == total
r = 0
while True:
break
else:
r += 10
e.run(
'''
r = 0
while True:
break
else:
r += 10
'''
)
assert e.symbol_table['r'] == 0
def test_with():
e = Evaluator()
err = 'You can not use `with` syntax'
with pytest.raises(BadSyntax, match=err):
e.run(
'''
with some:
x = 1
'''
)
assert 'x' not in e.symbol_table
def test_yield():
e = Evaluator()
err = 'You can not use `yield` syntax'
with pytest.raises(BadSyntax, match=err):
e.run('x = yield f()')
assert 'x' not in e.symbol_table
def test_yield_from():
e = Evaluator()
err = 'You can not use `yield from` syntax'
with pytest.raises(BadSyntax, match=err):
e.run('x = yield from f()')
assert 'x' not in e.symbol_table
@pytest.fixture(scope='module')
def event_loop():
loop = asyncio.new_event_loop()
yield loop
loop.close()
@pytest.fixture(scope='module')
async def bot(event_loop):
return FakeBot(
loop=event_loop,
process_pool_executor=ProcessPoolExecutor(),
)
@pytest.mark.asyncio
@pytest.mark.parametrize(
(
'expr, expected_decimal_result, expected_num_result,'
'expected_decimal_local, expected_num_local'
),
[
('1', D('1'), 1, {}, {}),
('1+2', D('3'), 3, {}, {}),
(
'0.1+0.1+0.1+0.1+0.1+0.1+0.1+0.1+0.1+0.1',
D('1'),
0.1 + 0.1 + 0.1 + 0.1 + 0.1 + 0.1 + 0.1 + 0.1 + 0.1 + 0.1,
{},
{},
),
('1-2', D('-1'), -1, {}, {}),
('4*5', D('20'), 20, {}, {}),
('1/2', D('0.5'), 0.5, {}, {}),
('10%3', D('1'), 1, {}, {}),
('2**3', D('8'), 8, {}, {}),
('(1+2)**3', D('27'), 27, {}, {}),
('max(1,2,3,4,5)', D('5'), 5, {}, {}),
('math.floor(3.2)', D('3'), 3, {}, {}),
('1+math.e', D(math.e) + D('1'), math.e + 1, {}, {}),
('[1,2,3]', [D('1'), D('2'), D('3')], [1, 2, 3], {}, {}),
(
'[x*10 for x in [0,1,2]]',
[D('0'), D('10'), D('20')],
[0, 10, 20],
{},
{},
),
('(1,2,3)', (D('1'), D('2'), D('3')), (1, 2, 3), {}, {}),
('{3,2,10}', {D('2'), D('3'), D('10')}, {2, 3, 10}, {}, {}),
('{x%2 for x in [1,2,3,4]}', {D('0'), D('1')}, {0, 1}, {}, {}),
('{"ab": 123}', {'ab': D('123')}, {'ab': 123}, {}, {}),
(
'{"k"+str(x): x-1 for x in [1,2,3]}',
{'k1': D('0'), 'k2': D('1'), 'k3': D('2')},
{'k1': 0, 'k2': 1, 'k3': 2},
{},
{},
),
('3 in [1,2,3]', True, True, {}, {}),
('[1,2,3,12,3].count(3)', 2, 2, {}, {}),
('{1,2} & {2,3}', {D('2')}, {2}, {}, {}),
('"item4"', 'item4', 'item4', {}, {}),
('"{}4".format("item")', 'item4', 'item4', {}, {}),
('money = 1000', None, None, {'money': D('1000')}, {'money': 1000}),
(
'money = 1000; money * 2',
D('2000'),
2000,
{'money': D('1000')},
{'money': 1000},
),
(
'money = 1000; f"{money}원"',
'1000원',
'1000원',
{'money': D('1000')},
{'money': 1000},
),
(
'a = 11;\nif a > 10:\n a += 100\na',
D('111'),
111,
{'a': D(111)},
{'a': 111},
),
],
)
async def test_calculate_fine(
bot,
expr: str,
expected_decimal_result,
expected_num_result,
expected_decimal_local: dict,
expected_num_local: dict,
):
decimal_result, decimal_local = await bot.run_in_other_process(
calculate,
expr,
decimal_mode=True,
)
num_result, num_local = await bot.run_in_other_process(
calculate,
expr,
decimal_mode=False,
)
assert expected_decimal_result == decimal_result
assert expected_decimal_local.keys() == decimal_local.keys()
for key in decimal_local.keys():
expected = expected_decimal_local[key]
local = decimal_local[key]
assert type(expected) == type(local)
if callable(expected):
assert expected(1) == local(1)
else:
assert expected == local
assert expected_num_result == num_result
assert expected_num_local.keys() == num_local.keys()
for key in num_local.keys():
expected = expected_num_local[key]
local = num_local[key]
assert type(expected) == type(local)
assert expected == local
| 1.984375 | 2 |
upass.py | yoshiumw/upass | 0 | 94084 | <reponame>yoshiumw/upass<gh_stars>0
"""U-Pass Auto Completer (upass.py)
This script allows university students in Vancouver to renew their U-Pass automatically.
This tool requires an additional Python script (init.py) that creates a textfile that this script reads.
This script requires that 'selenium' be installed within the Python environment you are running this script to.
"""
from selenium import webdriver
from selenium.webdriver.support.select import Select
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.common.by import By
from selenium.webdriver.support import expected_conditions as EC
from selenium.common import exceptions
import time
import sys
#Tries to open info.txt, if doesn't exist throws error.
try:
f = open("info.txt", "r")
school = int(f.readline())
email = f.readline()
pw = f.readline()
except:
print("Please run init.py before this script.")
sys.exit()
#Only chrome right now
#TODO: Other browser support.
driver = webdriver.Chrome("drivers\chromedriver.exe")
wait = WebDriverWait(driver, 10)
driver.set_page_load_timeout(10)
driver.get("https://upassbc.translink.ca/")
#U-Pass initial page
select_school = Select( driver.find_element_by_id("PsiId") )
select_school.select_by_index(school)
driver.find_element_by_id("goButton").click()
#School portal page, depends on what user put in text file.
if (school == 9 or school == 2 or school == 5): #sfu, kpu, bcit
user_element = WebDriverWait(driver, 10).until(lambda driver: driver.find_element_by_id("username"))
password_element = WebDriverWait(driver, 10).until(lambda driver: driver.find_element_by_id("password"))
submit_element = WebDriverWait(driver, 10).until(lambda driver: driver.find_element_by_name("submit"))
user_element.send_keys(email)
password_element.send_keys(pw)
try:
submit_element.click()
except exceptions.StaleElementReferenceException:
pass
elif (school == 4 or school == 7): #ubc, ecarr
user_element = WebDriverWait(driver, 10).until(lambda driver: driver.find_element_by_id("username"))
password_element = WebDriverWait(driver, 10).until(lambda driver: driver.find_element_by_id("password"))
submit_element = WebDriverWait(driver, 10).until(lambda driver: driver.find_element_by_name("_eventId_proceed"))
user_element.send_keys(email)
password_element.send_keys(pw)
try:
submit_element.click()
except exceptions.StaleElementReferenceException:
pass
elif (school == 1 or school == 3, school == 10): #douglas, nicola, vcc
user_element = WebDriverWait(driver, 10).until(lambda driver: driver.find_element_by_id("ctl00_ContentPlaceHolder1_UsernameTextBox"))
password_element = WebDriverWait(driver, 10).until(lambda driver: driver.find_element_by_id("ctl00_ContentPlaceHolder1_PasswordTextBox"))
submit_element = WebDriverWait(driver, 10).until(lambda driver: driver.find_element_by_name("ctl00$ContentPlaceHolder1$SubmitButton"))
user_element.send_keys(email)
password_element.send_keys(pw)
try:
submit_element.click()
except exceptions.StaleElementReferenceException:
pass
elif (school == 6 or school == 8): #capu, langara
user_element = WebDriverWait(driver, 10).until(lambda driver: driver.find_element_by_id("userNameInput"))
password_element = WebDriverWait(driver, 10).until(lambda driver: driver.find_element_by_id("passwordInput"))
submit_element = WebDriverWait(driver, 10).until(lambda driver: driver.find_element_by_name("submitButton"))
user_element.send_keys(email)
password_element.send_keys(pw)
try:
submit_element.click()
except exceptions.StaleElementReferenceException:
pass
#clicks everything on the page that has tag input but works...
checkboxes = wait.until(lambda driver: driver.find_elements_by_tag_name("input"))
for cb in checkboxes:
try:
cb.click()
except exceptions.ElementNotVisibleException:
print("element not interactable")
pass
time.sleep(4)
driver.quit()
| 1.726563 | 2 |
dpipe/predict/tests/test_functional.py | samokhinv/deep_pipe | 38 | 94212 | from dpipe.predict.functional import *
def test_chain_decorators():
def append(num):
def decorator(func):
def wrapper():
return func() + [num]
return wrapper
return decorator
@append(1)
@append(2)
@append(3)
def f():
return []
chained = chain_decorators(
append(1), append(2), append(3),
predict=lambda: []
)
assert f() == chained()
| 1.515625 | 2 |
python-scripts/lag_comp.py | tschiex/toulbar2-diverse | 0 | 94340 | <reponame>tschiex/toulbar2-diverse
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
import argparse
import os, sys
import matplotlib.pyplot as plt
from utils import dissim, read_cfn_gzip, read_sim_mat
python_path = "python3 /home/tschiex/toulbar2-diverse/python-scripts/"
tb2 = "/home/tschiex/toulbar2-diverse/build/bin/Linux/toulbar2"
AAs = "ARNDCQEGHILKMFPSTWYV"
n_aa = len(AAs)
AA3to1 = {'ALA': 'A', 'ARG': 'R', 'ASN': 'N', 'ASP': 'D', 'CYS': 'C', 'GLU': 'E', 'GLN': 'Q', 'GLY': 'G', 'HIS': 'H',
'ILE': 'I', 'LEU': 'L', 'LYS': 'K', 'MET': 'M', 'PHE': 'F', 'PRO': 'P', 'SER': 'S', 'THR': 'T', 'TRP': 'W',
'TYR': 'Y', 'VAL': 'V'}
parser = argparse.ArgumentParser()
parser.add_argument("--name", required=True,
help="Problem name")
parser.add_argument("--niter", default=20, type=int,
help="Number of lagrange iterations")
parser.add_argument("--divmin", default=1, type=int,
help="Minimum diversity between two solutions")
parser.add_argument("--cpd", action="store_true", default=False,
help="Computational Protein Design")
parser.add_argument("--msim", default=None,
help="Similarity matrix (cpd)")
args = parser.parse_args()
name = args.name
divmin = args.divmin
if args.cpd:
cpd_str = " --cpd "
else:
cpd_str = ""
cfn_filename = name + ".cfn.gz"
cfn = read_cfn_gzip(cfn_filename)
sols_mdd_filename = name + "_divmin" + str(divmin) + "_nsols.sols"
sol_filename = name + ".gmec"
mult_div_cmd = python_path + "mult_div_regular.py -i " + cfn_filename + " -o " + sols_mdd_filename + \
" --divmin " + str(divmin) + " --nsols 2 --type mdd" + cpd_str
os.system(mult_div_cmd)
if args.msim:
msim = read_sim_mat(args.msim)
else:
msim = None
# Recover cstar and gmec from sols_mdd_file
with open(sols_mdd_filename, 'r') as sols_mdd:
lines = sols_mdd.readlines()
sol_file = open(sol_filename, 'w')
sol = lines[1]
sol_file.write(sol)
sol = [int(i) for i in sol.split(" ")]
sol_file.close()
cstar_line = 5
xstar_line = 4
if (cpd_str != ""):
cstar_line = 7
xstar_line = 5
cstar = float(lines[cstar_line][:-1])
xstar = [int(xi) for xi in lines[xstar_line][:-1].split(' ')]
print("cstar " + str(cstar))
"""
# Compute qbest
ql_filename = name + "_ql.txt"
qplot_cmd = python_path + "qplot.py -i " + cfn_filename + " -o " + ql_filename + " -s " + \
sol_filename + " --divmin 1" + cpd_str
os.system(qplot_cmd)
with open(ql_filename, 'r') as ql_file:
lines = ql_file.readlines()
qbest = float(lines[-1].split(' ')[1])
print("qbest " + str(qbest))
"""
#######################################
############ Supergradient ############
#######################################
def read_ql_list(output_filename):
with open(output_filename, 'r') as f:
lines = f.readlines()
ql_line = lines[-1]
xbest = lines[-3][1:-2]
lbest = float(lines[-4].split(" ")[3])
ql_list = [float(ql) for ql in ql_line[1:-1].split(', ')]
xbest = [int(xi) for xi in xbest.split(', ')]
return (xbest, ql_list, lbest)
vars = list(cfn['variables'].keys())
def step_plot(step, params, l, divmin, niter):
for h in params:
output_filename = name + "_" + step + "_h" + str(h) + "2.lag"
cmd = python_path + "divmin_lagrangian.py -i " + cfn_filename + " -o " + output_filename + \
" -s " + sol_filename + " -l " + str(l) + " --divmins " + str(divmin) + \
" --niter " + str(niter) + " --step " + step + " --stepparam " \
+ str(h) + cpd_str
os.system(cmd)
(xbest, ql_list, lbest) = read_ql_list(output_filename)
# Compute diversity measure between the first solution and xbest
div = 0
for var_index, v in enumerate(vars):
if args.cpd:
div += dissim(cfn['variables'][v][sol[var_index]][0], cfn['variables'][v][xbest[var_index]][0], AAs,
msim)
else:
div += dissim(sol[var_index], xbest[var_index], None, None)
E = ql_list[-1] + lbest * (div - divmin)
plt.plot(ql_list, label=f'{step} {h}\n(D,E)= ({div}, {E:.4})')
plt.plot([0, niter], [cstar, cstar], label="cstar " + str(cstar))
plt.legend()
plt.title(f'{name}, {step}, divmin {divmin}')
plt.xlabel('Number of iterations t')
plt.ylabel("Best dual value qbest_t")
plt.savefig(name + "_" + step + "_divmin" + str(divmin))
plt.close()
"""
step_plot("cst_stepsize", [0.05, 0.01, 0.005, 0.001], 0, divmin, args.niter)
step_plot("cst_steplength", [0.05, 0.01, 0.005, 0.001], 0, divmin, args.niter)
step_plot("squaresum_stepsize", [0.1, 1, 10], 0, divmin, args.niter)
step_plot("nonsum_stepsize", [0.1, 1, 10], 0, divmin, args.niter)
step_plot("nonsum_steplength", [0.1, 1, 10], 0, divmin, args.niter)
step_plot("polyak", [0.1, 1, 10], 0, divmin, args.niter)
"""
step_plot("squaresum_stepsize", [0.1], 0, divmin, args.niter) | 1.648438 | 2 |
tests/interface_test.py | h0uter/sensor_director | 0 | 94468 | <reponame>h0uter/sensor_director
import sensor_director
def test_interface():
# frame_a =
point_b = (0, 0,0)
rot = sensor_director.determine_look_at_quat() | 0.9375 | 1 |
lib/udp_handler.py | varunmittal91/Distributed-Key-Value-Store | 0 | 94596 | from .handler import handler
try:
import socketserver
except ImportError:
import SocketServer as socketserver
class udp_handler:
def __init__(self, log, host, port):
log.log(1, "Starting TCP server on Host: '%s' and Port: '%d'" % (host, port))
socketserver.UDPServer.allow_reuse_address = True
self.server = socketserver.UDPServer((host, port), handler)
try:
log.log(2, "Staring infinite loop")
self.server.serve_forever()
except:
log.log(0, "Excetion, exiting loop")
self.server.server_close()
| 1.101563 | 1 |
src/openmovement/load/base_data.py | digitalinteraction/openmovement-python | 0 | 94724 | <reponame>digitalinteraction/openmovement-python<filename>src/openmovement/load/base_data.py
"""
Base class for timeseries data loader
"""
from abc import ABC, abstractmethod
class BaseData(ABC):
def __init__(self, filename, verbose=False):
"""
Construct a data object from a file.
:param filename: The path to the source file.
:param verbose: Output more detailed information.
"""
self.filename = filename
self.verbose = verbose
pass
# Nothing to do at start of 'with'
def __enter__(self):
return self
# Close handle at end of 'with'
def __exit__(self, exc_type, exc_value, traceback):
self.close()
# Close handle when destructed
def __del__(self):
self.close()
# Iterate
def __iter__(self):
return iter(self.get_sample_values())
@abstractmethod
def close(self):
pass
@abstractmethod
def get_sample_values(self):
"""
Get the sample values as a single ndarray.
:returns: An ndarray of (time, accel_x, accel_y, accel_z) or (time, accel_x, accel_y, accel_z, gyro_x, gyro_y, gyro_z)
where 'time' is in seconds since the epoch.
"""
pass
@abstractmethod
def get_samples(self, use_datetime64=True):
"""
Return an DataFrame for (time, accel_x, accel_y, accel_z) or (time, accel_x, accel_y, accel_z, gyro_x, gyro_y, gyro_z)
:param use_datetime64: (Default) time is in datetime64[ns]; otherwise in seconds since the epoch.
"""
pass
# Time of first sample (seconds since epoch)
@abstractmethod
def get_start_time(self):
pass
@abstractmethod
def get_sample_rate(self):
pass
# The total number of samples (only an estimate if not all loaded)
@abstractmethod
def get_num_samples(self):
pass
| 2.03125 | 2 |
migrations/versions/2021_10_04__ed04df659d38.py | spykard/popularplaces-app | 3 | 94852 | """empty message
Revision ID: ed0<PASSWORD>38
Revises: <PASSWORD>
Create Date: 2021-10-04 17:16:26.748838
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = '<PASSWORD>'
down_revision = '<PASSWORD>'
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.add_column('User', sa.Column('active', sa.Boolean(), nullable=False, server_default='True'))
op.create_index(op.f('ix_User_active'), 'User', ['active'], unique=False)
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_index(op.f('ix_User_active'), table_name='User')
op.drop_column('User', 'active')
# ### end Alembic commands ###
| 1.328125 | 1 |
SimPEG/Utils/interputils.py | KyuboNoh/HY | 1 | 94980 | <filename>SimPEG/Utils/interputils.py
import numpy as np
import scipy.sparse as sp
from matutils import mkvc, sub2ind, spzeros
try:
import interputils_cython as pyx
_interp_point_1D = pyx._interp_point_1D
_interpmat1D = pyx._interpmat1D
_interpmat2D = pyx._interpmat2D
_interpmat3D = pyx._interpmat3D
_interpCython = True
except ImportError, e:
print """Efficiency Warning: Interpolation will be slow, use setup.py!
python setup.py build_ext --inplace
"""
_interpCython = False
def interpmat(locs, x, y=None, z=None):
"""
Local interpolation computed for each receiver point in turn
:param numpy.ndarray loc: Location of points to interpolate to
:param numpy.ndarray x: Tensor vector of 1st dimension of grid.
:param numpy.ndarray y: Tensor vector of 2nd dimension of grid. None by default.
:param numpy.ndarray z: Tensor vector of 3rd dimension of grid. None by default.
:rtype: scipy.sparse.csr.csr_matrix
:return: Interpolation matrix
.. plot::
import SimPEG
import numpy as np
import matplotlib.pyplot as plt
locs = np.random.rand(50)*0.8+0.1
x = np.linspace(0,1,7)
dense = np.linspace(0,1,200)
fun = lambda x: np.cos(2*np.pi*x)
Q = SimPEG.Utils.interpmat(locs, x)
plt.plot(x, fun(x), 'bs-')
plt.plot(dense, fun(dense), 'y:')
plt.plot(locs, Q*fun(x), 'mo')
plt.plot(locs, fun(locs), 'rx')
plt.show()
"""
npts = locs.shape[0]
locs = locs.astype(float)
x = x.astype(float)
if y is None and z is None:
shape = [x.size,]
inds, vals = _interpmat1D(mkvc(locs), x)
elif z is None:
y = y.astype(float)
shape = [x.size, y.size]
inds, vals = _interpmat2D(locs, x, y)
else:
y = y.astype(float)
z = z.astype(float)
shape = [x.size, y.size, z.size]
inds, vals = _interpmat3D(locs, x, y, z)
I = np.repeat(range(npts),2**len(shape))
J = sub2ind(shape,inds)
Q = sp.csr_matrix((vals,(I, J)),
shape=(npts, np.prod(shape)))
return Q
if not _interpCython:
def _interp_point_1D(x, xr_i):
"""
given a point, xr_i, this will find which two integers it lies between.
:param numpy.ndarray x: Tensor vector of 1st dimension of grid.
:param float xr_i: Location of a point
:rtype: int,int,float,float
:return: index1, index2, portion1, portion2
"""
im = np.argmin(abs(x-xr_i))
if xr_i - x[im] >= 0: # Point on the left
ind_x1 = im
ind_x2 = im+1
elif xr_i - x[im] < 0: # Point on the right
ind_x1 = im-1
ind_x2 = im
ind_x1 = max(min(ind_x1, x.size-1), 0)
ind_x2 = max(min(ind_x2, x.size-1), 0)
if ind_x1 == ind_x2:
return ind_x1, ind_x1, 0.5, 0.5
hx = x[ind_x2] - x[ind_x1]
wx1 = 1 - (xr_i - x[ind_x1])/hx
wx2 = 1 - (x[ind_x2] - xr_i)/hx
return ind_x1, ind_x2, wx1, wx2
def _interpmat1D(locs, x):
"""Use interpmat with only x component provided."""
nx = x.size
npts = locs.shape[0]
inds, vals = [], []
for i in range(npts):
ind_x1, ind_x2, wx1, wx2 = _interp_point_1D(x, locs[i])
inds += [ind_x1, ind_x2]
vals += [wx1,wx2]
return inds, vals
def _interpmat2D(locs, x, y):
"""Use interpmat with only x and y components provided."""
nx = x.size
ny = y.size
npts = locs.shape[0]
inds, vals = [], []
for i in range(npts):
ind_x1, ind_x2, wx1, wx2 = _interp_point_1D(x, locs[i, 0])
ind_y1, ind_y2, wy1, wy2 = _interp_point_1D(y, locs[i, 1])
inds += [( ind_x1, ind_y1),
( ind_x1, ind_y2),
( ind_x2, ind_y1),
( ind_x2, ind_y2)]
vals += [wx1*wy1,
wx1*wy2,
wx2*wy1,
wx2*wy2]
return inds, vals
def _interpmat3D(locs, x, y, z):
"""Use interpmat."""
nx = x.size
ny = y.size
nz = z.size
npts = locs.shape[0]
inds, vals = [], []
for i in range(npts):
ind_x1, ind_x2, wx1, wx2 = _interp_point_1D(x, locs[i, 0])
ind_y1, ind_y2, wy1, wy2 = _interp_point_1D(y, locs[i, 1])
ind_z1, ind_z2, wz1, wz2 = _interp_point_1D(z, locs[i, 2])
inds += [( ind_x1, ind_y1, ind_z1),
( ind_x1, ind_y2, ind_z1),
( ind_x2, ind_y1, ind_z1),
( ind_x2, ind_y2, ind_z1),
( ind_x1, ind_y1, ind_z2),
( ind_x1, ind_y2, ind_z2),
( ind_x2, ind_y1, ind_z2),
( ind_x2, ind_y2, ind_z2)]
vals += [wx1*wy1*wz1,
wx1*wy2*wz1,
wx2*wy1*wz1,
wx2*wy2*wz1,
wx1*wy1*wz2,
wx1*wy2*wz2,
wx2*wy1*wz2,
wx2*wy2*wz2]
return inds, vals
if __name__ == '__main__':
from SimPEG import *
import matplotlib.pyplot as plt
locs = np.random.rand(50)*0.8+0.1
x = np.linspace(0,1,7)
dense = np.linspace(0,1,200)
fun = lambda x: np.cos(2*np.pi*x)
Q = Utils.interpmat(locs, x)
plt.plot(x, fun(x), 'bs-')
plt.plot(dense, fun(dense), 'y:')
plt.plot(locs, Q*fun(x), 'mo')
plt.plot(locs, fun(locs), 'rx')
plt.show()
| 1.539063 | 2 |
code/Experiments/neon-master/examples/babi/util.py | matthijsvk/convNets | 53 | 95108 | # ----------------------------------------------------------------------------
# Copyright 2015-2016 Nervana Systems Inc.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ----------------------------------------------------------------------------
"""
Utility functions for bAbI example and demo.
"""
from neon.data import BABI
from neon.initializers import GlorotUniform, Uniform, Orthonormal
from neon.layers import Affine, GRU, LookupTable, MergeMultistream, LSTM
from neon.models import Model
from neon.transforms import Logistic, Softmax, Tanh
# list of bAbI tasks
subset = 'en'
task_list = [
'qa1_single-supporting-fact',
'qa2_two-supporting-facts',
'qa3_three-supporting-facts',
'qa4_two-arg-relations',
'qa5_three-arg-relations',
'qa6_yes-no-questions',
'qa7_counting',
'qa8_lists-sets',
'qa9_simple-negation',
'qa10_indefinite-knowledge',
'qa11_basic-coreference',
'qa12_conjunction',
'qa13_compound-coreference',
'qa14_time-reasoning',
'qa15_basic-deduction',
'qa16_basic-induction',
'qa17_positional-reasoning',
'qa18_size-reasoning',
'qa19_path-finding',
'qa20_agents-motivations',
]
def babi_handler(data_dir, task_number):
"""
Handle for bAbI task.
Args:
data_dir (string) : Path to bAbI data directory.
task_number (int) : The task ID from the bAbI dataset (1-20).
Returns:
BABI : Handler for bAbI task.
"""
task = task_list[task_number - 1]
return BABI(path=data_dir, task=task, subset=subset)
def create_model(vocab_size, rlayer_type):
"""
Create LSTM/GRU model for bAbI dataset.
Args:
vocab_size (int) : String of bAbI data.
rlayer_type (string) : Type of recurrent layer to use (gru or lstm).
Returns:
Model : Model of the created network
"""
# recurrent layer parameters (default gru)
rlayer_obj = GRU if rlayer_type == 'gru' else LSTM
rlayer_params = dict(output_size=100, reset_cells=True,
init=GlorotUniform(), init_inner=Orthonormal(0.5),
activation=Tanh(), gate_activation=Logistic())
# if using lstm, swap the activation functions
if rlayer_type == 'lstm':
rlayer_params.update(dict(activation=Logistic(), gate_activation=Tanh()))
# lookup layer parameters
lookup_params = dict(vocab_size=vocab_size, embedding_dim=50, init=Uniform(-0.05, 0.05))
# Model construction
story_path = [LookupTable(**lookup_params), rlayer_obj(**rlayer_params)]
query_path = [LookupTable(**lookup_params), rlayer_obj(**rlayer_params)]
layers = [MergeMultistream(layers=[story_path, query_path], merge="stack"),
Affine(vocab_size, init=GlorotUniform(), activation=Softmax())]
return Model(layers=layers)
| 1.867188 | 2 |
daily_problems/problem_0_to_100/problem_14.py | rrwt/daily-coding-challenge | 1 | 95236 | <reponame>rrwt/daily-coding-challenge
"""
The area of a circle is defined as πr^2. Estimate π to 3 decimal places using a Monte Carlo method.
"""
import math
from random import random
def monte_carlo_pi():
"""
area of a circle of radius .5 = .25*PI. Area of square of side 1 = 1
The square will contain the circle 100%.
If we randomly generate 1000s of points within the square, we can say that
the ratio of points withing the circle and total number of points is going to
be the ratio of area of circle to that of rectangle
"""
inside: int = 0
for i in range(1_000_000):
x = random()
y = random()
distance = math.sqrt(pow(abs(0.5 - x), 2) + pow(abs(0.5 - y), 2))
if distance <= 0.5:
inside += 1
print(4 * inside / 1_000_000)
if __name__ == "__main__":
monte_carlo_pi()
| 3.015625 | 3 |
ui/colors.py | jdcanas/minesweeper | 0 | 95364 | <reponame>jdcanas/minesweeper<filename>ui/colors.py
# Define some colors
from game.Cell import Cell
BLACK = (0, 0, 0)
WHITE = (255, 255, 255)
GREEN = (0, 255, 0)
RED = (255, 0, 0)
YELLOW = (255, 255, 0)
GRAY = (211, 211, 211)
def getCellColor(cell: Cell):
if cell.isFlipped and not cell.isMine:
color = GRAY
elif cell.isFlipped and cell.isMine:
color = RED
elif cell.isFlagged:
color = YELLOW
else:
color = WHITE
return color | 1.78125 | 2 |
DynamicProgramming/ScoreProblem.py | kopok2/algorithms | 0 | 95492 | <reponame>kopok2/algorithms
# coding=utf-8
"""Score problem dynamic programming solution Python implementation."""
def sp(n):
dp = [0] * (n + 1)
dp[0] = 1
for i in range(3, n + 1):
dp[i] += dp[i - 3]
for i in range(5, n + 1):
dp[i] += dp[i - 5]
for i in range(10, n + 1):
dp[i] += dp[i - 10]
return dp[n]
if __name__ == "__main__":
for x in range(101):
print(x, sp(x))
| 2.546875 | 3 |
ref/v1.1/rocksat_gui.py | vt-rocksat-2017/dashboard | 1 | 95620 | #!/usr/bin/env python
import numpy as np
import sys
from PyQt4 import QtGui
from PyQt4 import QtCore
from PyQt4 import Qt
import PyQt4.Qwt5 as Qwt
#from adsb_table import *
from utilities import *
from plot import *
from mission_clock import *
class main_widget(QtGui.QWidget):
def __init__(self):
super(main_widget, self).__init__()
self.initUI()
def initUI(self):
self.grid = QtGui.QGridLayout()
#self.setLayout(self.grid)
#self.grid.setColumnStretch(0,1)
#self.grid.setColumnStretch(1,1)
class rocksat_gui(QtGui.QMainWindow):
def __init__(self, lock):
super(rocksat_gui, self).__init__()
#self.resize(1000,1000)
#self.move(50,50)
self.setWindowTitle('Rocksat-X 2016 Dashboard, V1.1')
self.setAutoFillBackground(True)
#self.ants_static_labels = []
self.main_window = main_widget()
self.callback = None #Callback accessor function
self.update_rate = 200 #Feedback Query Auto Update Interval in milliseconds
self.packet_list = []
self.valid_frames = []
self.fault_frames = []
self.recon_frames = []
self.valid_cnt = 0
self.fault_cnt = 0
self.recon_cnt = 0
self.total_cnt = 0
self.packet_status = 3 #0 = valid, 1 = FAULT, 2 = RECONNECT, 3 = Initialization
#self.use_rx_offset = False
#Plotting Vectors
self.time_tx = []
self.time_rx = []
self.rx_offset = []
self.temp = []
self.pressure = []
self.alt_ft = []
self.alt_m = []
self.yaw = []
self.pitch = []
self.roll = []
self.x_accel = []
self.y_accel = []
self.z_accel = []
self.initUI()
self.darken()
self.setFocus()
def initUI(self):
self.initMainWindow()
self.initMainFrames()
self.initStatusFrame()
self.initRTDFrame()
#self.initTimeFrame()
self.initTabControl()
#self.initMainTab()
self.initPressTab()
self.initAltitudeTab()
self.initTempTab()
self.initAttitudeTab()
self.initAccelTab()
self.initTimers()
self.connectSignals()
#self.show()
self.showMaximized()
def initTimers(self):
self.updateTimer = QtCore.QTimer(self)
def connectSignals(self):
self.useRxOffset_cb.stateChanged.connect(self.useRxOffset_event)
QtCore.QObject.connect(self.updateTimer, QtCore.SIGNAL('timeout()'), self.updatePackets)
self.updateTimer.start(self.update_rate)
def updatePackets(self):
self.updatePacketStatus()
self.updatePlots2()
if self.valid_cnt > 0:
self.updateRTD()
def updatePacketStatus(self):
self.packet_status = self.callback.get_last_frame_type()
if self.packet_status == 0: #Valid
self.status_lbl.setText("VALID")
#self.status_lbl.setAlignment(QtCore.Qt.AlignCenter|QtCore.Qt.AlignVCenter)
self.status_lbl.setStyleSheet("QLabel {font-size:18px; font-weight:bold; color:rgb(0,255,0);}")
elif self.packet_status == 1: #Serial Fault
self.status_lbl.setText("FAULT")
#self.status_lbl.setAlignment(QtCore.Qt.AlignCenter|QtCore.Qt.AlignVCenter)
self.status_lbl.setStyleSheet("QLabel {font-size:18px; font-weight:bold; color:rgb(255,0,0);}")
elif self.packet_status == 2: #Reconnection
self.status_lbl.setText("RECONN")
#self.status_lbl.setAlignment(QtCore.Qt.AlignCenter|QtCore.Qt.AlignVCenter)
self.status_lbl.setStyleSheet("QLabel {font-size:18px; font-weight:bold; color:rgb(255,255,0);}")
elif self.packet_status == 3: #Reconnection
self.status_lbl.setText("STANDBY")
#self.status_lbl.setAlignment(QtCore.Qt.AlignCenter|QtCore.Qt.AlignVCenter)
self.status_lbl.setStyleSheet("QLabel {font-size:18px; font-weight:bold; color:rgb(255,255,0);}")
[a,b,c,d] = self.callback.get_frame_counts()
self.total_cnt = a
self.valid_cnt = b
self.fault_cnt = c
self.recon_cnt = d
statusmsg = ("| Valid Count: %3i | Fault Count: %i | Reconnect Count: %i |" % (self.valid_cnt, self.fault_cnt, self.recon_cnt))
self.statusBar().showMessage(statusmsg)
self.total_cnt_lbl.setText(str(self.total_cnt))
self.valid_cnt_lbl.setText(str(self.valid_cnt))
self.fault_cnt_lbl.setText(str(self.fault_cnt))
self.recon_cnt_lbl.setText(str(self.recon_cnt))
def updatePlots2(self):
if self.useRxOffset_cb.isChecked() == True:
x = self.callback.get_rx_offset()
self.rx_offset = x
elif self.useRxOffset_cb.isChecked() == False:
x = self.callback.get_time_tx()
self.time_tx = x
try:
if self.tabs.currentIndex() == 0: #TEMP
self.temp = self.callback.get_temperature()
self.temp_plot.update_figure(x, self.temp)
elif self.tabs.currentIndex() == 1: #PRESSURE
self.pressure = self.callback.get_pressure()
self.press_plot.update_figure(x, self.pressure)
elif self.tabs.currentIndex() == 2: #ALTITUDE
self.alt_ft, self.alt_m = self.callback.get_altitude()
self.alt_ft_plot.update_figure(x, self.alt_ft)
self.alt_m_plot.update_figure(x, self.alt_m)
elif self.tabs.currentIndex() == 3: #ATTITUDE
self.yaw, self.pitch, self.roll = self.callback.get_attitude()
self.x_att_plot.update_figure(x, self.yaw)
self.y_att_plot.update_figure(x, self.pitch)
self.z_att_plot.update_figure(x, self.roll)
elif self.tabs.currentIndex() == 4:
self.x_accel, self.y_accel, self.z_accel = self.callback.get_acceleration()
self.x_accel_plot.update_figure(x, self.x_accel)
self.y_accel_plot.update_figure(x, self.y_accel)
self.z_accel_plot.update_figure(x, self.z_accel)
except Exception as e:
print self.utc_ts() + "Plotting Error"
print e
def updateRTD(self):
[a,b,c,d,e,f,g,h,i,j,k,l,m,n,o] = self.callback.get_last_measurements()
self.valid_rx_ts_lbl.setText(str(a))
self.call_lbl.setText(str(b))
self.pkt_id_lbl.setText(str(c))
self.ts_tx_lbl.setText(str(d))
self.temp_lbl.setText(str(e))
self.pres_lbl.setText(str(f))
self.alt_ft_lbl.setText("{:6.2f}".format(g))
self.alt_m_lbl.setText("{:6.2f}".format(h))
self.yaw_lbl.setText(str(i))
self.pitch_lbl.setText(str(j))
self.roll_lbl.setText(str(k))
self.x_accel_lbl.setText(str(l))
self.y_accel_lbl.setText(str(m))
self.z_accel_lbl.setText(str(n))
self.rx_offset_lbl.setText(str(o))
def initMainWindow(self):
self.setCentralWidget(self.main_window)
exitAction = QtGui.QAction('Exit', self)
exitAction.setShortcut('Ctrl+Q')
exitAction.triggered.connect(QtGui.qApp.quit)
exportAction = QtGui.QAction('Export', self)
exportAction.setShortcut('Ctrl+E')
exportAction.triggered.connect(QtGui.qApp.quit)
menubar = self.menuBar()
self.fileMenu = menubar.addMenu('&File')
self.fileMenu.addAction(exitAction)
self.fileMenu.addAction(exportAction)
self.statusBar().showMessage("| Disconnected | Current Count: 000 |")
def set_start_time(self, start):
self.callback.set_start_time(start)
self.useRxOffset_cb.setEnabled(True)
def initMainFrames(self):
#Mission Clock
self.time_fr = mission_clock(self)
#Status frame
self.status_fr = QtGui.QFrame(self)
self.status_fr.setFrameShape(QtGui.QFrame.StyledPanel)
#Real Time Data frame
self.rtd_fr = QtGui.QFrame(self)
self.rtd_fr.setFrameShape(QtGui.QFrame.StyledPanel)
#Control Frame
#self.time_fr.setFrameShape(QtGui.QFrame.StyledPanel)
#Plot Tab Frames
self.tab_fr = QtGui.QFrame(self)
self.tab_fr.setFrameShape(QtGui.QFrame.StyledPanel)
vbox = QtGui.QVBoxLayout()
vbox.addWidget(self.time_fr)
vbox.addWidget(self.status_fr)
vbox.addWidget(self.rtd_fr)
vbox.addStretch(1)
hbox = QtGui.QHBoxLayout()
hbox.addLayout(vbox)
#hbox.addStretch(1)
hbox.addWidget(self.tab_fr)
self.main_window.setLayout(hbox)
def initTabControl(self):
self.tabs = QtGui.QTabWidget()
self.tabs.setTabPosition(QtGui.QTabWidget.South)
#self.main_tab = QtGui.QWidget()
#self.main_tab.grid = QtGui.QGridLayout()
#self.tabs.addTab(self.main_tab,"Main")
#self.main_tab.setAutoFillBackground(True)
#p = self.main_tab.palette()
#p.setColor(self.main_tab.backgroundRole(), QtCore.Qt.black)
#self.main_tab.setPalette(p)
self.temp_tab = QtGui.QWidget()
self.temp_tab.grid = QtGui.QGridLayout()
self.tabs.addTab(self.temp_tab,"Temperature")
self.temp_tab.setAutoFillBackground(True)
p = self.temp_tab.palette()
p.setColor(self.temp_tab.backgroundRole(), QtCore.Qt.black)
self.temp_tab.setPalette(p)
self.press_tab = QtGui.QWidget()
self.press_tab_grid = QtGui.QGridLayout()
self.tabs.addTab(self.press_tab,"Pressure")
self.press_tab.setAutoFillBackground(True)
p = self.press_tab.palette()
p.setColor(self.press_tab.backgroundRole(), QtCore.Qt.black)
self.press_tab.setPalette(p)
self.alt_tab = QtGui.QWidget()
self.alt_tab_grid = QtGui.QGridLayout()
self.tabs.addTab(self.alt_tab,"Altitude")
self.alt_tab.setAutoFillBackground(True)
p = self.alt_tab.palette()
p.setColor(self.alt_tab.backgroundRole(), QtCore.Qt.black)
self.alt_tab.setPalette(p)
self.attitude_tab = QtGui.QWidget()
self.attitude_tab_grid = QtGui.QGridLayout()
self.tabs.addTab(self.attitude_tab,"Attitude")
self.attitude_tab.setAutoFillBackground(True)
p = self.attitude_tab.palette()
p.setColor(self.attitude_tab.backgroundRole(), QtCore.Qt.black)
self.attitude_tab.setPalette(p)
self.accel_tab = QtGui.QWidget()
self.accel_tab_grid = QtGui.QGridLayout()
self.tabs.addTab(self.accel_tab,"Acceleration")
self.accel_tab.setAutoFillBackground(True)
p = self.accel_tab.palette()
p.setColor(self.accel_tab.backgroundRole(), QtCore.Qt.black)
self.accel_tab.setPalette(p)
self.tab_fr_grid = QtGui.QGridLayout()
self.tab_fr.setLayout(self.tab_fr_grid)
self.tab_fr_grid.addWidget(self.tabs)
def useRxOffset_event(self):
if self.useRxOffset_cb.isChecked() == True:
x_lbl = 'RX Time Offset [s]'
color = 'r'
elif self.useRxOffset_cb.isChecked() == False:
x_lbl = 'TX Time Offset [s]'
color = 'b'
self.temp_plot.set_label_x(x_lbl)
self.temp_plot.set_color(color)
self.press_plot.set_label_x(x_lbl)
self.press_plot.set_color(color)
self.alt_ft_plot.set_label_x(x_lbl)
self.alt_ft_plot.set_color(color)
self.alt_m_plot.set_label_x(x_lbl)
self.alt_m_plot.set_color(color)
self.x_att_plot.set_label_x(x_lbl)
self.x_att_plot.set_color(color)
self.y_att_plot.set_label_x(x_lbl)
self.y_att_plot.set_color(color)
self.z_att_plot.set_label_x(x_lbl)
self.z_att_plot.set_color(color)
self.x_accel_plot.set_label_x(x_lbl)
self.x_accel_plot.set_color(color)
self.y_accel_plot.set_label_x(x_lbl)
self.y_accel_plot.set_color(color)
self.z_accel_plot.set_label_x(x_lbl)
self.z_accel_plot.set_color(color)
def initTempTab(self):
self.temp_plot = MyDynamicMplCanvas(self.temp_tab, width=10, height=1, dpi=70, )
x_lbl = 'TX Time Offset [s]'
y_lbl = 'Temperature [C]'
self.temp_plot.set_labels(x_lbl, y_lbl)
self.temp_tab_grid = QtGui.QGridLayout()
self.temp_tab_grid.addWidget(self.temp_plot)
self.temp_tab.setLayout(self.temp_tab_grid)
def initPressTab(self):
self.press_plot = MyDynamicMplCanvas(self.press_tab, width=2, height=1, dpi=70, )
x_lbl = 'TX Time Offset [s]'
y_lbl = 'Pressure [mbar]'
self.press_plot.set_labels(x_lbl, y_lbl)
self.press_tab_grid = QtGui.QGridLayout()
self.press_tab_grid.addWidget(self.press_plot)
self.press_tab.setLayout(self.press_tab_grid)
def initAltitudeTab(self):
self.alt_ft_plot = MyDynamicMplCanvas(self.alt_tab, width=2, height=1, dpi=70, )
self.alt_m_plot = MyDynamicMplCanvas(self.alt_tab, width=2, height=1, dpi=70, )
x_lbl = 'TX Time Offset [s]'
y1_lbl = 'Altitude [ft]'
y2_lbl = 'Altitude [m]'
self.alt_ft_plot.set_labels(x_lbl, y1_lbl)
self.alt_m_plot.set_labels(x_lbl, y2_lbl)
self.alt_tab_grid = QtGui.QGridLayout()
self.alt_tab_grid.addWidget(self.alt_ft_plot)
self.alt_tab_grid.addWidget(self.alt_m_plot)
self.alt_tab.setLayout(self.alt_tab_grid)
def initAttitudeTab(self):
self.x_att_plot = MyDynamicMplCanvas(self.attitude_tab, width=2, height=.5, dpi=70, )
self.y_att_plot = MyDynamicMplCanvas(self.attitude_tab, width=2, height=.5, dpi=70, )
self.z_att_plot = MyDynamicMplCanvas(self.attitude_tab, width=2, height=.5, dpi=70, )
x_lbl = 'TX Time Offset [s]'
y1_lbl = 'Yaw [deg]'
y2_lbl = 'Pitch [deg]'
y3_lbl = 'Roll [deg]'
self.x_att_plot.set_labels(x_lbl, y1_lbl)
self.y_att_plot.set_labels(x_lbl, y2_lbl)
self.z_att_plot.set_labels(x_lbl, y3_lbl)
self.attitude_tab_grid = QtGui.QGridLayout()
self.attitude_tab_grid.addWidget(self.x_att_plot)
self.attitude_tab_grid.addWidget(self.y_att_plot)
self.attitude_tab_grid.addWidget(self.z_att_plot)
self.attitude_tab.setLayout(self.attitude_tab_grid)
def initAccelTab(self):
self.x_accel_plot = MyDynamicMplCanvas(self.accel_tab, width=2, height=1, dpi=70, )
self.y_accel_plot = MyDynamicMplCanvas(self.accel_tab, width=2, height=1, dpi=70, )
self.z_accel_plot = MyDynamicMplCanvas(self.accel_tab, width=2, height=1, dpi=70, )
x_lbl = 'TX Time Offset [s]'
y1_lbl = 'X Acceleration [g]'
y2_lbl = 'Y Acceleration [g]'
y3_lbl = 'Z Acceleration [g]'
self.x_accel_plot.set_labels(x_lbl, y1_lbl)
self.y_accel_plot.set_labels(x_lbl, y2_lbl)
self.z_accel_plot.set_labels(x_lbl, y3_lbl)
self.accel_tab_grid = QtGui.QGridLayout()
self.accel_tab_grid.addWidget(self.x_accel_plot)
self.accel_tab_grid.addWidget(self.y_accel_plot)
self.accel_tab_grid.addWidget(self.z_accel_plot)
self.accel_tab.setLayout(self.accel_tab_grid)
def initStatusFrame(self):
fr_lbl = QtGui.QLabel("Packet Status:")
fr_lbl.setAlignment(QtCore.Qt.AlignRight|QtCore.Qt.AlignVCenter)
fr_lbl.setStyleSheet("QLabel {font-size:18px; font-weight:bold; text-decoration:underline; color:rgb(255,255,255);}")
fr_lbl.setFixedHeight(25)
self.status_lbl = QtGui.QLabel("NOCONN")
self.status_lbl.setAlignment(QtCore.Qt.AlignLeft|QtCore.Qt.AlignVCenter)
self.status_lbl.setStyleSheet("QLabel {font-size:18px; font-weight:bold; color:rgb(255,255,255);}")
self.status_lbl.setFixedWidth(90)
hbox1 = QtGui.QHBoxLayout()
hbox1.addWidget(fr_lbl)
hbox1.addWidget(self.status_lbl)
lbl = QtGui.QLabel("Total Count:")
lbl.setAlignment(QtCore.Qt.AlignRight|QtCore.Qt.AlignVCenter)
lbl.setStyleSheet("QLabel {color:rgb(255,255,255);}")
lbl.setFixedWidth(125)
self.total_cnt_lbl = QtGui.QLabel("0")
self.total_cnt_lbl.setAlignment(QtCore.Qt.AlignLeft|QtCore.Qt.AlignVCenter)
self.total_cnt_lbl.setStyleSheet("QLabel {color:rgb(255,255,255);}")
self.total_cnt_lbl.setFixedWidth(80)
hbox2 = QtGui.QHBoxLayout()
hbox2.addWidget(lbl)
hbox2.addWidget(self.total_cnt_lbl)
lbl = QtGui.QLabel("Valid Count:")
lbl.setAlignment(QtCore.Qt.AlignRight|QtCore.Qt.AlignVCenter)
lbl.setStyleSheet("QLabel {color:rgb(255,255,255);}")
lbl.setFixedWidth(125)
self.valid_cnt_lbl = QtGui.QLabel("0")
self.valid_cnt_lbl.setAlignment(QtCore.Qt.AlignLeft|QtCore.Qt.AlignVCenter)
self.valid_cnt_lbl.setStyleSheet("QLabel {color:rgb(255,255,255);}")
self.valid_cnt_lbl.setFixedWidth(80)
hbox3 = QtGui.QHBoxLayout()
hbox3.addWidget(lbl)
hbox3.addWidget(self.valid_cnt_lbl)
lbl = QtGui.QLabel("Fault Count:")
lbl.setAlignment(QtCore.Qt.AlignRight|QtCore.Qt.AlignVCenter)
lbl.setStyleSheet("QLabel {color:rgb(255,255,255);}")
lbl.setFixedWidth(125)
self.fault_cnt_lbl = QtGui.QLabel("0")
self.fault_cnt_lbl.setAlignment(QtCore.Qt.AlignLeft|QtCore.Qt.AlignVCenter)
self.fault_cnt_lbl.setStyleSheet("QLabel {color:rgb(255,255,255);}")
self.fault_cnt_lbl.setFixedWidth(80)
hbox4 = QtGui.QHBoxLayout()
hbox4.addWidget(lbl)
hbox4.addWidget(self.fault_cnt_lbl)
lbl = QtGui.QLabel("Reconnect Count:")
lbl.setAlignment(QtCore.Qt.AlignRight|QtCore.Qt.AlignVCenter)
lbl.setStyleSheet("QLabel {color:rgb(255,255,255);}")
lbl.setFixedWidth(125)
self.recon_cnt_lbl = QtGui.QLabel("0")
self.recon_cnt_lbl.setAlignment(QtCore.Qt.AlignLeft|QtCore.Qt.AlignVCenter)
self.recon_cnt_lbl.setStyleSheet("QLabel {color:rgb(255,255,255);}")
self.recon_cnt_lbl.setFixedWidth(80)
hbox5 = QtGui.QHBoxLayout()
hbox5.addWidget(lbl)
hbox5.addWidget(self.recon_cnt_lbl)
vbox = QtGui.QVBoxLayout()
vbox.addLayout(hbox1)
vbox.addLayout(hbox2)
vbox.addLayout(hbox3)
vbox.addLayout(hbox4)
vbox.addLayout(hbox5)
#vbox.addLayout(hbox6)
#vbox.addLayout(hbox7)
#vbox.addLayout(hbox8)
#vbox.addLayout(hbox9)
#vbox.addLayout(hbox10)
#vbox.addLayout(hbox11)
vbox.addStretch(1)
self.status_fr.setLayout(vbox)
def initRTDFrame(self):
lbl_width = 125
val_width = 80
fr_lbl = QtGui.QLabel("Real-Time Data")
fr_lbl.setAlignment(QtCore.Qt.AlignLeft|QtCore.Qt.AlignVCenter)
fr_lbl.setStyleSheet("QLabel {font-size:18px; font-weight:bold; text-decoration:underline; color:rgb(255,255,255);}")
fr_lbl.setFixedHeight(25)
lbl = QtGui.QLabel("Callsign: ")
lbl.setAlignment(QtCore.Qt.AlignRight|QtCore.Qt.AlignVCenter)
lbl.setStyleSheet("QLabel {color:rgb(255,255,255);}")
lbl.setFixedWidth(lbl_width)
self.call_lbl = QtGui.QLabel("XXXXXX")
self.call_lbl.setAlignment(QtCore.Qt.AlignLeft|QtCore.Qt.AlignVCenter)
self.call_lbl.setStyleSheet("QLabel {color:rgb(255,255,255);}")
self.call_lbl.setFixedWidth(val_width)
hbox1 = QtGui.QHBoxLayout()
hbox1.addWidget(lbl)
hbox1.addWidget(self.call_lbl)
lbl = QtGui.QLabel("Packet ID: ")
lbl.setAlignment(QtCore.Qt.AlignRight|QtCore.Qt.AlignVCenter)
lbl.setStyleSheet("QLabel {color:rgb(255,255,255);}")
lbl.setFixedWidth(lbl_width)
self.pkt_id_lbl = QtGui.QLabel("XXX")
self.pkt_id_lbl.setAlignment(QtCore.Qt.AlignLeft|QtCore.Qt.AlignVCenter)
self.pkt_id_lbl.setStyleSheet("QLabel {color:rgb(255,255,255);}")
self.pkt_id_lbl.setFixedWidth(val_width)
hbox2 = QtGui.QHBoxLayout()
hbox2.addWidget(lbl)
hbox2.addWidget(self.pkt_id_lbl)
lbl = QtGui.QLabel("TX Time Stamp [s]: ")
lbl.setAlignment(QtCore.Qt.AlignRight|QtCore.Qt.AlignVCenter)
lbl.setStyleSheet("QLabel {color:rgb(255,255,255);}")
lbl.setFixedWidth(lbl_width)
self.ts_tx_lbl = QtGui.QLabel("XXX")
self.ts_tx_lbl.setAlignment(QtCore.Qt.AlignLeft|QtCore.Qt.AlignVCenter)
self.ts_tx_lbl.setStyleSheet("QLabel {color:rgb(255,255,255);}")
self.ts_tx_lbl.setFixedWidth(val_width)
hbox3 = QtGui.QHBoxLayout()
hbox3.addWidget(lbl)
hbox3.addWidget(self.ts_tx_lbl)
lbl = QtGui.QLabel("Temperature [C]: ")
lbl.setAlignment(QtCore.Qt.AlignRight|QtCore.Qt.AlignVCenter)
lbl.setStyleSheet("QLabel {color:rgb(255,255,255);}")
lbl.setFixedWidth(lbl_width)
self.temp_lbl = QtGui.QLabel("XXX")
self.temp_lbl.setAlignment(QtCore.Qt.AlignLeft|QtCore.Qt.AlignVCenter)
self.temp_lbl.setStyleSheet("QLabel {color:rgb(255,255,255);}")
self.temp_lbl.setFixedWidth(val_width)
hbox4 = QtGui.QHBoxLayout()
hbox4.addWidget(lbl)
hbox4.addWidget(self.temp_lbl)
lbl = QtGui.QLabel("Pressure [mbar]: ")
lbl.setAlignment(QtCore.Qt.AlignRight|QtCore.Qt.AlignVCenter)
lbl.setStyleSheet("QLabel {color:rgb(255,255,255);}")
lbl.setFixedWidth(lbl_width)
self.pres_lbl = QtGui.QLabel("XXX")
self.pres_lbl.setAlignment(QtCore.Qt.AlignLeft|QtCore.Qt.AlignVCenter)
self.pres_lbl.setStyleSheet("QLabel {color:rgb(255,255,255);}")
self.pres_lbl.setFixedWidth(val_width)
hbox5 = QtGui.QHBoxLayout()
hbox5.addWidget(lbl)
hbox5.addWidget(self.pres_lbl)
lbl = QtGui.QLabel("Altitude [ft]: ")
lbl.setAlignment(QtCore.Qt.AlignRight|QtCore.Qt.AlignVCenter)
lbl.setStyleSheet("QLabel {color:rgb(255,255,255);}")
lbl.setFixedWidth(lbl_width)
self.alt_ft_lbl = QtGui.QLabel("XXX")
self.alt_ft_lbl.setAlignment(QtCore.Qt.AlignLeft|QtCore.Qt.AlignVCenter)
self.alt_ft_lbl.setStyleSheet("QLabel {color:rgb(255,255,255);}")
self.alt_ft_lbl.setFixedWidth(val_width)
hbox5_1 = QtGui.QHBoxLayout()
hbox5_1.addWidget(lbl)
hbox5_1.addWidget(self.alt_ft_lbl)
lbl = QtGui.QLabel("Altitude [m]: ")
lbl.setAlignment(QtCore.Qt.AlignRight|QtCore.Qt.AlignVCenter)
lbl.setStyleSheet("QLabel {color:rgb(255,255,255);}")
lbl.setFixedWidth(lbl_width)
self.alt_m_lbl = QtGui.QLabel("XXX")
self.alt_m_lbl.setAlignment(QtCore.Qt.AlignLeft|QtCore.Qt.AlignVCenter)
self.alt_m_lbl.setStyleSheet("QLabel {color:rgb(255,255,255);}")
self.alt_m_lbl.setFixedWidth(val_width)
hbox5_2 = QtGui.QHBoxLayout()
hbox5_2.addWidget(lbl)
hbox5_2.addWidget(self.alt_m_lbl)
lbl = QtGui.QLabel("Yaw [deg]: ")
lbl.setAlignment(QtCore.Qt.AlignRight|QtCore.Qt.AlignVCenter)
lbl.setStyleSheet("QLabel {color:rgb(255,255,255);}")
lbl.setFixedWidth(lbl_width)
self.yaw_lbl = QtGui.QLabel("XXX")
self.yaw_lbl.setAlignment(QtCore.Qt.AlignLeft|QtCore.Qt.AlignVCenter)
self.yaw_lbl.setStyleSheet("QLabel {color:rgb(255,255,255);}")
self.yaw_lbl.setFixedWidth(val_width)
hbox6 = QtGui.QHBoxLayout()
hbox6.addWidget(lbl)
hbox6.addWidget(self.yaw_lbl)
lbl = QtGui.QLabel("Pitch [deg]: ")
lbl.setAlignment(QtCore.Qt.AlignRight|QtCore.Qt.AlignVCenter)
lbl.setStyleSheet("QLabel {color:rgb(255,255,255);}")
lbl.setFixedWidth(lbl_width)
self.pitch_lbl = QtGui.QLabel("XXX")
self.pitch_lbl.setAlignment(QtCore.Qt.AlignLeft|QtCore.Qt.AlignVCenter)
self.pitch_lbl.setStyleSheet("QLabel {color:rgb(255,255,255);}")
self.pitch_lbl.setFixedWidth(val_width)
hbox7 = QtGui.QHBoxLayout()
hbox7.addWidget(lbl)
hbox7.addWidget(self.pitch_lbl)
lbl = QtGui.QLabel("Roll [deg]: ")
lbl.setAlignment(QtCore.Qt.AlignRight|QtCore.Qt.AlignVCenter)
lbl.setStyleSheet("QLabel {color:rgb(255,255,255);}")
lbl.setFixedWidth(lbl_width)
self.roll_lbl = QtGui.QLabel("XXX")
self.roll_lbl.setAlignment(QtCore.Qt.AlignLeft|QtCore.Qt.AlignVCenter)
self.roll_lbl.setStyleSheet("QLabel {color:rgb(255,255,255);}")
self.roll_lbl.setFixedWidth(val_width)
hbox8 = QtGui.QHBoxLayout()
hbox8.addWidget(lbl)
hbox8.addWidget(self.roll_lbl)
lbl = QtGui.QLabel("Accel-X [g]: ")
lbl.setAlignment(QtCore.Qt.AlignRight|QtCore.Qt.AlignVCenter)
lbl.setStyleSheet("QLabel {color:rgb(255,255,255);}")
lbl.setFixedWidth(lbl_width)
self.x_accel_lbl = QtGui.QLabel("XXX")
self.x_accel_lbl.setAlignment(QtCore.Qt.AlignLeft|QtCore.Qt.AlignVCenter)
self.x_accel_lbl.setStyleSheet("QLabel {color:rgb(255,255,255);}")
self.x_accel_lbl.setFixedWidth(val_width)
hbox9 = QtGui.QHBoxLayout()
hbox9.addWidget(lbl)
hbox9.addWidget(self.x_accel_lbl)
lbl = QtGui.QLabel("Accel-Y [g]: ")
lbl.setAlignment(QtCore.Qt.AlignRight|QtCore.Qt.AlignVCenter)
lbl.setStyleSheet("QLabel {color:rgb(255,255,255);}")
lbl.setFixedWidth(lbl_width)
self.y_accel_lbl = QtGui.QLabel("XXX")
self.y_accel_lbl.setAlignment(QtCore.Qt.AlignLeft|QtCore.Qt.AlignVCenter)
self.y_accel_lbl.setStyleSheet("QLabel {color:rgb(255,255,255);}")
self.y_accel_lbl.setFixedWidth(val_width)
hbox10 = QtGui.QHBoxLayout()
hbox10.addWidget(lbl)
hbox10.addWidget(self.y_accel_lbl)
lbl = QtGui.QLabel("Accel-Z [g]: ")
lbl.setAlignment(QtCore.Qt.AlignRight|QtCore.Qt.AlignVCenter)
lbl.setStyleSheet("QLabel {color:rgb(255,255,255);}")
lbl.setFixedWidth(lbl_width)
self.z_accel_lbl = QtGui.QLabel("XXX")
self.z_accel_lbl.setAlignment(QtCore.Qt.AlignLeft|QtCore.Qt.AlignVCenter)
self.z_accel_lbl.setStyleSheet("QLabel {color:rgb(255,255,255);}")
self.z_accel_lbl.setFixedWidth(val_width)
hbox11 = QtGui.QHBoxLayout()
hbox11.addWidget(lbl)
hbox11.addWidget(self.z_accel_lbl)
lbl = QtGui.QLabel("RX Offset [s]: ")
lbl.setAlignment(QtCore.Qt.AlignRight|QtCore.Qt.AlignVCenter)
lbl.setStyleSheet("QLabel {color:rgb(255,255,255);}")
lbl.setFixedWidth(lbl_width)
self.rx_offset_lbl = QtGui.QLabel("XXX")
self.rx_offset_lbl.setAlignment(QtCore.Qt.AlignLeft|QtCore.Qt.AlignVCenter)
self.rx_offset_lbl.setStyleSheet("QLabel {color:rgb(255,255,255);}")
self.rx_offset_lbl.setFixedWidth(val_width)
hbox12 = QtGui.QHBoxLayout()
hbox12.addWidget(lbl)
hbox12.addWidget(self.rx_offset_lbl)
lbl = QtGui.QLabel("RX Time Stamp: ")
lbl.setAlignment(QtCore.Qt.AlignLeft|QtCore.Qt.AlignVCenter)
lbl.setStyleSheet("QLabel {color:rgb(255,255,255);}")
lbl.setFixedWidth(lbl_width)
self.valid_rx_ts_lbl = QtGui.QLabel("")
self.valid_rx_ts_lbl.setAlignment(QtCore.Qt.AlignRight|QtCore.Qt.AlignVCenter)
self.valid_rx_ts_lbl.setStyleSheet("QLabel {color:rgb(255,255,255);}")
self.valid_rx_ts_lbl.setFixedWidth(200)
vbox12 = QtGui.QVBoxLayout()
vbox12.addWidget(lbl)
vbox12.addWidget(self.valid_rx_ts_lbl)
self.useRxOffset_cb = QtGui.QCheckBox("Use RX Offset")
self.useRxOffset_cb.setStyleSheet("QCheckBox { background-color:rgb(0,0,0); color:rgb(255,0,0); }")
self.useRxOffset_cb.setChecked(False)
self.useRxOffset_cb.setEnabled(False)
vbox = QtGui.QVBoxLayout()
vbox.addWidget(fr_lbl)
vbox.addLayout(hbox1)
vbox.addLayout(hbox2)
vbox.addLayout(hbox3)
vbox.addLayout(hbox4)
vbox.addLayout(hbox5)
vbox.addLayout(hbox5_1)
vbox.addLayout(hbox5_2)
vbox.addLayout(hbox6)
vbox.addLayout(hbox7)
vbox.addLayout(hbox8)
vbox.addLayout(hbox9)
vbox.addLayout(hbox10)
vbox.addLayout(hbox11)
vbox.addLayout(hbox12)
vbox.addLayout(vbox12)
vbox.addWidget(self.useRxOffset_cb)
vbox.addStretch(1)
self.rtd_fr.setLayout(vbox)
def set_callback(self, callback):
self.callback = callback
def darken(self):
palette = QtGui.QPalette()
palette.setColor(QtGui.QPalette.Background,QtCore.Qt.black)
palette.setColor(QtGui.QPalette.WindowText,QtCore.Qt.black)
palette.setColor(QtGui.QPalette.Text,QtCore.Qt.white)
self.setPalette(palette)
def utc_ts(self):
return str(date.utcnow()) + " UTC | "
def main():
app = QtGui.QApplication(sys.argv)
ex = funcube_tlm_gui()
sys.exit(app.exec_())
if __name__ == '__main__':
main()
| 1.523438 | 2 |
old/test2.py | archu2020/python-2 | 48 | 95748 | <gh_stars>10-100
import time
print("start")
start_time = time.time()
temp = 0
for i in range(10000000):
temp += i
print(temp)
end_time = time.time()
print(end_time - start_time)
| 1.703125 | 2 |
probdists/Uniformdistribution.py | mhdzumair/probdists | 0 | 95876 | <reponame>mhdzumair/probdists<gh_stars>0
import math
import matplotlib.pyplot as plt
from .Generaldistribution import Distribution
class Uniform(Distribution):
""" Uniform distribution class for calculating and
visualizing a Uniform distribution.
Attributes:
mean (float) representing the mean value of the distribution
stdev (float) representing the standard deviation of the distribution
data (list of floats) extracted from the data file
low (float) representing the smallest number in data
high (float) representing the highest number in data
"""
def __init__(self, low=0, high=10):
if low == high:
raise Exception('Invalid interval - start and end of interval cannot be the same')
self.low = low
self.high = high
Distribution.__init__(self, self.calculate_mean(),
self.calculate_stdev())
def replace_stats_with_data(self):
"""Function to calculate low and high from the data set
# Args:
None
Returns:
float: the low value
float: the high value
"""
self.low = min(self.data)
self.high = max(self.data)
if self.low == self.high:
raise Exception('Invalid interval - start and end of interval cannot be the same')
self.calculate_mean()
self.calculate_stdev()
return self.low, self.high
def calculate_mean(self, round_to=2):
"""Function to calculate the mean of the data set.
Args:
round_to (int): Round the mean value. [Default value: 2 floating point]
Returns:
float: mean of the data set
"""
self.mean = (self.low + self.high) / 2
return round(self.mean, round_to)
def calculate_stdev(self, sample=True, round_to=2):
"""Function to calculate the standard deviation of the data set.
Args:
sample (bool): whether the data represents a sample or population
round_to (int): Round the mean value. [Default value: 2 floating point]
Returns:
float: standard deviation of the data set
"""
sqr_interval = (self.high - self.low) ** 2
self.stdev = math.sqrt(sqr_interval / 12)
return round(self.stdev, round_to)
def calculate_cdf(self, x, round_to=2):
"""Cumulative distribution function calculator for the uniform distribution.
Args:
x (float): point for calculating the
cumulative distribution function
round_to (int): Round the mean value. [Default value: 2 floating point]
Returns:
float: cumulative distribution function output
"""
if x < self.low:
self.cdf = 0
elif self.low <= x <= self.high:
self.cdf = (x - self.low) / (self.high - self.low)
else:
self.cdf = 1
return round(self.cdf, round_to)
def plot_histogram(self):
"""Function to output a histogram of the instance variable data using
matplotlib pyplot library.
Args:
None
Returns:
None
"""
plt.hist(self.data)
plt.title("Histogram of Data")
plt.xlabel("data")
plt.ylabel("count")
plt.show()
def calculate_pdf(self, x, round_to=2):
"""Probability density function calculator for the uniform distribution.
Args:
x (float): point for calculating the
probability density function
round_to (int): Round the mean value. [Default value: 2 floating point]
Returns:
float: probability density function output
"""
self.pdf = 1 / (self.high - self.low) if self.high >= x >= self.low else 0
return round(self.pdf, round_to)
def plot_bar_pdf(self):
"""Function to plot the pdf of the uniform distribution
Args:
None
Returns:
list: x values for the pdf plot
list: y values for the pdf plot
"""
x = []
y = []
# calculate the x values to visualize
for i in range(int(self.low) - 5, int(self.high) + 5):
x.append(i)
self.calculate_pdf(i)
y.append(self.pdf)
# make the plots
plt.bar(x, y)
plt.title('Probability Density for Uniform Distribution')
plt.ylabel('Probability')
plt.xlabel('x')
plt.show()
return x, y
def __repr__(self):
"""Function to output the characteristics of the Uniform instance
Args:
None
Returns:
string: characteristics of the Uniform distribution
"""
return "mean {}, standard deviation {}".format(self.mean, self.stdev)
| 3.359375 | 3 |
nova/conf/configdrive.py | viveknandavanam/nova | 1 | 96004 | # All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_config import cfg
config_drive_opts = [
cfg.StrOpt('config_drive_format',
default='iso9660',
choices=('iso9660', 'vfat'),
help="""
Configuration drive format
Configuration drive format that will contain metadata attached to the
instance when it boots.
Possible values:
* iso9660: A file system image standard that is widely supported across
operating systems. NOTE: Mind the libvirt bug
(https://bugs.launchpad.net/nova/+bug/1246201) - If your hypervisor
driver is libvirt, and you want live migrate to work without shared storage,
then use VFAT.
* vfat: For legacy reasons, you can configure the configuration drive to
use VFAT format instead of ISO 9660.
Related options:
* This option is meaningful when one of the following alternatives occur:
1. force_config_drive option set to 'true'
2. the REST API call to create the instance contains an enable flag for
config drive option
3. the image used to create the instance requires a config drive,
this is defined by img_config_drive property for that image.
* A compute node running Hyper-V hypervisor can be configured to attach
configuration drive as a CD drive. To attach the configuration drive as a CD
drive, set config_drive_cdrom option at hyperv section, to true.
"""),
cfg.BoolOpt('force_config_drive',
default=False,
help="""
Force injection to take place on a config drive
When this option is set to true configuration drive functionality will be
forced enabled by default, otherwise user can still enable configuration
drives via the REST API or image metadata properties.
Possible values:
* True: Force to use of configuration drive regardless the user's input in the
REST API call.
* False: Do not force use of configuration drive. Config drives can still be
enabled via the REST API or image metadata properties.
Related options:
* Use the 'mkisofs_cmd' flag to set the path where you install the
genisoimage program. If genisoimage is in same path as the
nova-compute service, you do not need to set this flag.
* To use configuration drive with Hyper-V, you must set the
'mkisofs_cmd' value to the full path to an mkisofs.exe installation.
Additionally, you must set the qemu_img_cmd value in the hyperv
configuration section to the full path to an qemu-img command
installation.
"""),
cfg.StrOpt('mkisofs_cmd',
default='genisoimage',
help="""
Name or path of the tool used for ISO image creation
Use the mkisofs_cmd flag to set the path where you install the genisoimage
program. If genisoimage is on the system path, you do not need to change
the default value.
To use configuration drive with Hyper-V, you must set the mkisofs_cmd value
to the full path to an mkisofs.exe installation. Additionally, you must set
the qemu_img_cmd value in the hyperv configuration section to the full path
to an qemu-img command installation.
Possible values:
* Name of the ISO image creator program, in case it is in the same directory
as the nova-compute service
* Path to ISO image creator program
Related options:
* This option is meaningful when config drives are enabled.
* To use configuration drive with Hyper-V, you must set the qemu_img_cmd
value in the hyperv configuration section to the full path to an qemu-img
command installation.
"""),
]
def register_opts(conf):
conf.register_opts(config_drive_opts)
def list_opts():
return {"DEFAULT": config_drive_opts}
| 1.421875 | 1 |
problemproblem1.py | Shohanurcsevu/Python_Basics_Practice | 1 | 96132 | <filename>problemproblem1.py
def sentencemaker(pharase):
cap = pharase.capitalize()
interogatives = ("how" , "what" , "why")
if pharase.startswith(interogatives):
return "{}?".format(cap)
else:
return "{}".format(cap)
results = []
while True:
user_input = input("Say Something :-) ")
if user_input == "\end":
break
else:
results.append(sentencemaker(user_input))
print(" ".join(results)) | 2.375 | 2 |
user43_xNecki6wqa_0.py | KuanZhasulan/Python-Games | 0 | 96260 | # Conditionals Examples
# Return True if year is a leap year, false otherwise
def is_leap_year(year):
if (year % 400) == 0:
return True
elif (year % 100) == 0:
return False
elif (year % 4) == 0:
return True
else:
return False
year = 2012
leap_year = is_leap_year(year)
if leap_year:
print year, "is a leap year"
else:
print year, "is not a leap year"
| 2.90625 | 3 |
KEYWORDS/LENGTH.py | Andrew95496/ODL | 0 | 96388 | from KEYWORDS.KEYVAR import VAR_STACK
import os
class b:
HEADER = '\033[95m'
OKBLUE = '\033[94m'
OKCYAN = '\033[96m'
OKGREEN = '\033[92m'
WARNING = '\033[93m'
FAIL = '\033[91m'
ENDC = '\033[0m'
BOLD = '\033[1m'
UNDERLINE = '\033[4m'
class LENGTH:
# number of items list
def len_items(i, line):
TOKEN = line.split(' ')
try:
length_items = len(VAR_STACK[TOKEN[1]]) - VAR_STACK[TOKEN[1]].count('_')
print(length_items)
except KeyError:
print(f'{TOKEN[1]} is not a STACK or QUEUE')
os._exit(0)
# length of the entire list
def length(i, line):
TOKEN = line.split(' ')
length = len(VAR_STACK[TOKEN[1]])
print(length) | 2.078125 | 2 |
lectures/chapter5/maxScore.py | arongas/python-examples | 0 | 96516 | <filename>lectures/chapter5/maxScore.py<gh_stars>0
# 🚨 Don't change the code below 👇
student_scores = input("Input a list of student scores ").split()
for n in range(0, len(student_scores)):
student_scores[n] = int(student_scores[n])
print(student_scores)
# 🚨 Don't change the code above 👆
#Write your code below this row 👇
max_score = -1
index = -1
max_index = -1
for score in student_scores:
index+=1
if score > max_score:
max_score=score
max_index=index
print(f"The highest score in the class is: {max_score} at position {max_index}")
max(student_scores)
min(student_scores)
| 2.828125 | 3 |
src/twitter_regression.py | juditacs/semeval | 11 | 96644 | <reponame>juditacs/semeval<filename>src/twitter_regression.py
from numpy import linalg
from nltk.classify.scikitlearn import SklearnClassifier
from sklearn.pipeline import Pipeline
from sklearn import svm
from sys import argv
from collections import defaultdict
def read_gold(fn):
with open(fn) as f:
return [float(l) for l in f]
def read_feats(fn):
feats = []
with open(fn) as f:
for l in f:
feats.append([float(i) for i in l.strip().split(' ')])
return feats
def train_regression(feats, gold):
w = linalg.lstsq(feats, gold)
return w[0]
def train_svm(feats, gold):
labeled = []
clf = svm.SVC()
clf.fit(feats, gold)
return clf
for i, f in enumerate(feats):
feat_dict = {}
for j, feat in enumerate(f):
feat_dict[j] = feat
labeled.append([feat_dict, gold[i]])
return SklearnClassifier(Pipeline()).train(labeled, max_iter=30)
def predict_svm(model, feats):
return model.predict(feats)
pred = []
for f in feats:
feat_d = {}
for i, feat in enumerate(f):
feat_d[i] = feat
pred.append(model.classify(feat_d))
return pred
def predict_regression(model, feats):
scores = []
for sample in feats:
ans = sum(model[i] * x for i, x in enumerate(sample))
scores.append(ans)
return scores
def print_stats(prediction, gold):
stat = defaultdict(int)
true_th = 0.3
for i, p in enumerate(prediction):
if p <= true_th:
if gold[i] < 0.5:
stat['tn'] += 1
else:
stat['fn'] += 1
else:
if gold[i] >= 0.5:
stat['tp'] += 1
else:
stat['fp'] += 1
N = sum(stat.values())
print('\ntrue positive: {0}\ntrue negative: {1}\nfalse positive: {2}\nfalse negative: {3}\nsum: {4}\n******'.format(stat['tp'], stat['tn'], stat['fp'], stat['fn'], N))
prec = float(stat['tp']) / (stat['tp'] + stat['fp'])
rec = float(stat['tp']) / (stat['tp'] + stat['fn'])
acc = float(stat['tp'] + stat['tn']) / N
F = 2 * prec * rec / (prec + rec)
print('Precision: {0}\nRecall: {1}\nF1: {2}\nAccuracy: {3}'.format(prec, rec, F, acc))
def main():
train_feats = read_feats(argv[1])
dev_feats = read_feats(argv[2])
if len(argv) > 3:
fn = argv[3]
else:
fn = 'data/filt/labels_train_binary'
train_gold = read_gold(fn)
if len(argv) > 4:
fn = argv[4]
else:
fn = 'data/filt/labels_dev_binary'
dev_gold = read_gold(fn)
#model = train_svm(train_feats, train_gold)
model = train_regression(train_feats, train_gold)
#prediction = predict_svm(model, dev_feats)
prediction = predict_regression(model, dev_feats)
th = 0.25
with open('predicted', 'w') as f:
for p in prediction:
if p >= th:
if p > 1:
p = 1
f.write('true\t{0:1.4f}\n'.format(round(p, 4)))
else:
if p < 0:
p = 0
f.write('false\t{0:1.4f}\n'.format(round(p, 4)))
f.write('\n'.join(map(str, prediction)) + '\n')
if dev_gold:
print_stats(prediction, dev_gold)
if __name__ == '__main__':
main()
| 2.171875 | 2 |
venv/lib/python3.5/site-packages/awscli/customizations/globalargs.py | meetynasu/server | 0 | 96772 | # Copyright 2014 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You
# may not use this file except in compliance with the License. A copy of
# the License is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file. This file is
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
# ANY KIND, either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
import sys
import os
from botocore.handlers import disable_signing
import jmespath
from awscli.compat import urlparse
def register_parse_global_args(cli):
cli.register('top-level-args-parsed', resolve_types)
cli.register('top-level-args-parsed', no_sign_request)
def resolve_types(parsed_args, **kwargs):
# This emulates the "type" arg from argparse, but does so in a way
# that plugins can also hook into this process.
_resolve_arg(parsed_args, 'query')
_resolve_arg(parsed_args, 'verify_ssl')
_resolve_arg(parsed_args, 'endpoint_url')
def _resolve_arg(parsed_args, name):
value = getattr(parsed_args, name, None)
if value is not None:
new_value = getattr(sys.modules[__name__], '_resolve_%s' % name)(value)
setattr(parsed_args, name, new_value)
def _resolve_query(value):
try:
return jmespath.compile(value)
except Exception as e:
raise ValueError("Bad value for --query %s: %s" % (value, str(e)))
def _resolve_verify_ssl(value):
verify = None
if not value:
verify = False
else:
verify = os.environ.get('AWS_CA_BUNDLE')
return verify
def _resolve_endpoint_url(value):
parsed = urlparse.urlparse(value)
# Our http library requires you specify an endpoint url
# that contains a scheme, so we'll verify that up front.
if not parsed.scheme:
raise ValueError('Bad value for --endpoint-url "%s": scheme is '
'missing. Must be of the form '
'http://<hostname>/ or https://<hostname>/' % value)
return value
def no_sign_request(parsed_args, session, **kwargs):
if not parsed_args.sign_request:
# In order to make signing disabled for all requests
# we need to use botocore's ``disable_signing()`` handler.
session.register('choose-signer', disable_signing)
| 1.382813 | 1 |
app/admin/routes.py | biefeng/profile | 0 | 96900 | <reponame>biefeng/profile
# -*- coding:utf-8 -*-
# author : BieFeNg
# date_time 2020/06/30 16:31
# file_name : routes.py
from flask import render_template, redirect, url_for
from flask_login import login_required
from . import admin
@admin.route('/')
@login_required
def manager():
return redirect(url_for('admin.manage_articles'))
@admin.route('/index')
@login_required
def index():
return render_template('base/index.html', component="admin/article-list.vue")
@admin.route('/manage-articles', methods=['GET', 'POST'])
@login_required
def manage_articles():
return render_template('base/admin.html', component="admin/article-list.vue")
@admin.route('/manage-plugins', methods=['GET', 'POST'])
@login_required
def manage_plugins():
return render_template('base/admin.html', component="admin/chrome-plugin-list.vue")
| 0.957031 | 1 |
emotion_classifier/data/data_utilities/pkl_emotion_fixer.py | IronEdward/chatbot | 0 | 97028 | <gh_stars>0
import pickle as pkl
from Functions.functions import *
data = pkl.load(open("Data(new)/data.pkl", "rb"))
emotion_type = pkl.load(open("Data(new)/emotion_types.pkl", "rb"))
new_data = []
for i in data:
new_data.append([i[0], emotion_type.index(i[1])])
pkl.dump(new_data, open("Data(new)/final_data.pkl", "wb")) | 0.957031 | 1 |
runBib.py | diku-irlab/hugo_academic_webpage | 0 | 97156 | <reponame>diku-irlab/hugo_academic_webpage
import subprocess
import glob
for file in glob.glob("bibtex/*"):
print("academic import --bibtex "+file)
callProcess = subprocess.call("academic import --overwrite --bibtex "+file, shell=True) #
print("---")
| 1.023438 | 1 |
codewars/8 kyu/get-planet-name-by-id.py | sirken/coding-practice | 0 | 97284 | <filename>codewars/8 kyu/get-planet-name-by-id.py<gh_stars>0
from Test import Test, Test as test
'''
The function is not returning the correct values. Can you figure out why?
get_planet_name(3) # should return 'Earth'
'''
def get_planet_name(id):
planets = {
1: "Mercury",
2: "Venus",
3: "Earth",
4: "Mars",
5: "Jupiter",
6: "Saturn",
7: "Uranus",
8: "Neptune"
}
return planets[id]
Test.assert_equals(get_planet_name(2), 'Venus')
Test.assert_equals(get_planet_name(5), 'Jupiter')
Test.assert_equals(get_planet_name(3), 'Earth')
Test.assert_equals(get_planet_name(4), 'Mars')
Test.assert_equals(get_planet_name(8), 'Neptune')
Test.assert_equals(get_planet_name(1), 'Mercury') | 1.765625 | 2 |
click_project/commands/recipe.py | hobeika/click-project | 0 | 97412 | <reponame>hobeika/click-project
#!/usr/bin/env python
# -*- coding:utf-8 -*-
from __future__ import print_function, absolute_import
import os
import json
from pathlib import Path
import click
from click_project.decorators import group, option, argument, use_settings, flag, \
pass_context, settings_stores, table_fields, table_format
from click_project.completion import startswith
from click_project.log import get_logger
from click_project.config import config
from click_project.colors import Colorer
from click_project.lib import move, copy, ParameterType, json_file,\
json_dumps, rm, call, cd, get_option_choices
from click_project.lib import TablePrinter, get_authenticator
from click_project.overloads import CommandSettingsKeyType
from click_project.types import DirectoryProfileType
LOGGER = get_logger(__name__)
class RecipeConfig(object):
pass
class RecipeNameType(ParameterType):
def __init__(self, enabled=False, disabled=False, failok=True):
self.disabled = disabled
self.enabled = enabled
self.failok = failok
super(RecipeNameType, self).__init__()
def getchoice(self, ctx):
if self.enabled:
recipes = config.all_enabled_recipes
elif self.disabled:
recipes = (
list(config.all_disabled_recipes) +
list(config.all_unset_recipes)
)
else:
recipes = config.all_recipes
return [
recipe.short_name
for recipe in recipes
]
def complete(self, ctx, incomplete):
choice = self.getchoice(ctx)
return [(recipe, load_short_help(recipe))
for recipe in choice
if startswith(recipe, incomplete)]
class AllRecipeNameType(RecipeNameType):
def getchoice(self, ctx):
return super(AllRecipeNameType, self).getchoice(ctx) + [
n
for p in config.root_profiles
for n in p.recipe_link_names
]
class RecipeLinkNameType(ParameterType):
def getchoice(self, ctx):
return settings_stores["recipe"].profile.recipe_link_names
def complete(self, ctx, incomplete):
choice = self.getchoice(ctx)
return [(recipe, load_short_help(recipe))
for recipe in choice
if startswith(recipe, incomplete)]
class RecipeType(RecipeNameType):
def convert(self, value, param, ctx):
choice = self.getchoice(ctx)
if value not in choice and self.failok:
self.fail('invalid choice: %s. (choose from %s)' %
(value, ', '.join(choice)), param, ctx)
profile = settings_stores["recipe"].profile
return profile.get_recipe(value)
def load_short_help(recipe):
return recipe
@group(default_command='show')
@use_settings("recipe", RecipeConfig)
def recipe():
"""Recipe related commands
A recipe is a set of settings that may be activated or disactivated in a project.
The recipes can be defined at the global or local profile."""
pass
@recipe.command(handle_dry_run=True)
@argument("name", help="The recipe name")
def create(name):
"""Create a new recipe"""
profile = config.recipe.profile
r = profile.create_recipe(name)
LOGGER.status(
"Created recipe {}.".format(
r.friendly_name
))
@recipe.command(handle_dry_run=True)
@argument("old", type=RecipeType(), help="The current recipe name")
@argument("new", help="The new recipe name")
def rename(old, new):
"""Rename a recipe"""
if "/" not in new:
new = "{}/{}".format(
old.name.split("/")[0],
new
)
new_loc = config.recipe_location(new)
if os.path.exists(new_loc):
raise click.UsageError("{} already exists".format(new_loc))
move(old.location, new_loc)
@recipe.command(handle_dry_run=True)
@argument("old", type=RecipeType(), help="The current recipe name")
@argument("profile", type=DirectoryProfileType(root_only=True),
help="The profile where to move the recipe")
def _move(old, profile):
"""Move a recipe to another profile"""
move(old.location, Path(profile.location) / "recipes" / Path(old.location).name)
@recipe.command(handle_dry_run=True)
@argument("src", type=RecipeType(), help="The source recipe name")
@argument("dest", help="The destination recipe name")
def _copy(src, dest):
"""Copy a recipe"""
if "/" not in dest:
dest = "{}/{}".format(
src.name.split("/")[0],
dest
)
new_loc = config.recipe_location(dest)
if os.path.exists(new_loc):
raise click.UsageError("{} already exists".format(new_loc))
copy(src.location, new_loc)
@recipe.command(handle_dry_run=True)
@argument("recipe", type=RecipeType(), nargs=-1, help="The name of the recipes to remove")
def remove(recipe):
"""Remove a recipe"""
for rec in recipe:
LOGGER.status("Removing {}".format(rec.friendly_name))
config.get_profile_containing_recipe(rec.name).remove_recipe(rec.name)
@recipe.command(handle_dry_run=True)
@table_fields(choices=['recipe', "set_in", "defined_in", "link", "order"])
@table_format(default='simple')
@Colorer.color_options
@flag("--link/--no-link", help="Show links also", default=False)
@flag("--enabled-only/--not-enabled-only", help="Show only the enabled recipes")
@flag("--disabled-only/--not-disabled-only", help="Show only the disabled recipes")
@option('--order/--no-order', help="Display the priority of the recipe")
@argument('recipes', type=RecipeNameType(disabled=True, failok=False), nargs=-1,
help="The names of the recipes to show")
def show(fields, format, link, order, recipes, enabled_only, disabled_only, **kwargs):
"""List the recipes and some info about them"""
config_recipes = set(config.recipe.readonly.keys())
avail_recipes = set([r.short_name for r in config.all_recipes])
if not fields:
fields = list(get_option_choices('fields'))
if not link:
fields.remove('link')
if not order:
fields.remove('order')
if not recipes:
for profile in config.root_profiles:
config_recipes |= profile.recipe_link_names
recipes = config_recipes | avail_recipes
if not recipes:
LOGGER.status("No recipe yet")
exit(0)
with Colorer(kwargs) as colorer, TablePrinter(fields, format) as tp:
for recipe_name in sorted(recipes):
profiles = ", ".join([
click.style(profile.name, **colorer.get_style(profile.name))
for profile in config.root_profiles
if profile.has_recipe(recipe_name)
])
link_profiles = ", ".join([
profile.name
for profile in config.root_profiles
if profile.has_recipe_link(recipe_name)
])
profile = colorer.last_profile_of_settings(
recipe_name,
config.recipe.all_settings,
)
recipe_enabled = config.is_recipe_enabled(recipe_name)
if (
(
not enabled_only or
recipe_enabled
) and
(
not disabled_only or
not recipe_enabled
)
):
profile_style = colorer.get_style(profile) if profile else {}
tp.echo(
click.style(recipe_name, fg = "green" if recipe_enabled else "red"),
(profile and click.style(profile, **profile_style)) or "Unset",
profiles or "Undefined",
link_profiles,
config.get_recipe_order(recipe_name),
)
@recipe.command(handle_dry_run=True)
@flag("--all", help="On all recipes")
@argument("recipe", type=RecipeNameType(enabled=True, failok=False), nargs=-1,
help="The names of the recipes to disable")
@pass_context
def disable(ctx, recipe, all):
"""Don't use this recipe"""
if all:
recipe = RecipeType(disabled=True).getchoice(ctx)
for cmd in recipe:
if cmd in config.recipe.writable:
config.recipe.writable[cmd]["enabled"] = False
else:
config.recipe.writable[cmd] = {"enabled": False}
LOGGER.status("Disabling recipe {} in profile {}".format(cmd, config.recipe.writeprofile))
config.recipe.write()
@recipe.command(handle_dry_run=True)
@flag("--all", help="On all recipes")
@argument("recipe", type=CommandSettingsKeyType("recipe"), nargs=-1, help="The name of the recipe to unset")
@pass_context
def unset(ctx, recipe, all):
"""Don't say whether to use or not this recipe (let the upper profiles decide)"""
if all:
recipe = list(config.recipe.readonly.keys())
for cmd in recipe:
if cmd not in config.recipe.writable:
raise click.UsageError(
"Recipe {} not set in profile {}".format(
cmd,
config.recipe.writeprofile
)
)
for cmd in recipe:
del config.recipe.writable[cmd]
LOGGER.status("Unsetting {} from profile {}".format(cmd, config.recipe.writeprofile))
config.recipe.write()
@recipe.command(handle_dry_run=True)
@flag("--all", help="On all recipes")
@option('--only/--no-only', help="Use only the provided recipe, and disable the others")
@argument("recipe", type=RecipeNameType(disabled=True, failok=False), nargs=-1,
help="The names of the recipes to enable")
@pass_context
def enable(ctx, recipe, all, only):
"""Use this recipe"""
if all:
recipe = RecipeType(disabled=True).getchoice(ctx)
if only:
for cmd in set(RecipeType().getchoice(ctx)) - set(recipe):
if cmd in config.recipe.writable:
config.recipe.writable[cmd]["enabled"] = False
else:
config.recipe.writable[cmd] = {"enabled": False}
LOGGER.status("Disabling recipe {} in profile {}".format(cmd, config.recipe.writeprofile))
for cmd in recipe:
if cmd in config.recipe.writable:
config.recipe.writable[cmd]["enabled"] = True
else:
config.recipe.writable[cmd] = {"enabled": True}
LOGGER.status("Enabling recipe {} in profile {}".format(cmd, config.recipe.writeprofile))
config.recipe.write()
@recipe.command(handle_dry_run=True)
@argument("recipe1", type=RecipeNameType(enabled=True, failok=False),
help="The name of the recipe to disable")
@argument("recipe2", type=RecipeNameType(disabled=True, failok=False),
help="The name of the recipe to enable")
@pass_context
def switch(ctx, recipe1, recipe2):
"""Switch from a recipe to another"""
ctx.invoke(disable, recipe=[recipe1])
ctx.invoke(enable, recipe=[recipe2])
@recipe.command(handle_dry_run=True)
@argument("recipe", type=RecipeNameType(failok=False), nargs=-1,
help="The names of the recipes to which the order will be set")
@argument("order", type=int, help="The order to be set on the recipes")
def set_order(recipe, order):
"""Set the order of the recipes"""
if not recipe:
recipe = config.all_recipes
for cmd in recipe:
if cmd in config.recipe.writable:
config.recipe.writable[cmd]["order"] = order
else:
config.recipe.writable[cmd] = {"order": order}
LOGGER.status("Set order of {} to {} in profile {}".format(cmd, order, config.recipe.writeprofile))
config.recipe.write()
@recipe.command()
@argument("profile", type=DirectoryProfileType(),
help="The name of the profile to open")
@option("--opener", help="Program to call to open the directory", default="xdg-open")
def open(profile, opener):
"""Open the directory containing the profile"""
call(
[
opener, profile.location
]
)
@recipe.command()
@argument("profile", type=DirectoryProfileType(),
help="The name of the profile to show")
def where_is(profile):
"""Show where is a given recipe"""
print(profile.location)
@recipe.command()
@option("--profile", type=DirectoryProfileType(),
help="The profile where to install the recipe")
@argument("url",
help="The url of the git repository hosting the recipe.")
@argument("name", help="The name of the recipe")
def clone(profile, url, name):
"""Clone a recipe stored in github in the given profile"""
profile = profile or config.global_profile
recipe_path = Path(profile.location) / "recipes" / name
call(
[
"git", "clone", url, str(recipe_path)
]
)
@recipe.group(default_command="show")
def link():
"""Manipulate recipes link"""
link.inherited_params = ["profile", "recipe"]
@link.command()
@argument("recipes", type=AllRecipeNameType(), nargs=-1, help="The names of the recipes to enable")
def _enable(recipes):
"""Enable the given recipe in the link"""
for recipe in recipes:
with json_file(
config.recipe.profile.link_location(recipe)
) as values:
values["enabled"] = True
LOGGER.status("Enabling the link file of {} ({})".format(recipe, config.recipe.writeprofile))
@link.command()
@argument("recipes", type=AllRecipeNameType(), nargs=-1, help="The names of the recipes to enable")
def _disable(recipes):
"""Disable the given recipe in the link"""
for recipe in recipes:
with json_file(
config.recipe.profile.link_location(recipe)
) as values:
values["enabled"] = False
LOGGER.status("Disabling the link file of {} ({})".format(recipe, config.recipe.writeprofile))
@link.command()
@argument("recipe", type=RecipeNameType(), help="The name of the recipe to dump")
def _dump(recipe):
"""Show the values of the link file"""
with json_file(
config.recipe.profile.link_location(recipe)
) as values:
click.echo(json_dumps(values))
@link.command()
@Colorer.color_options
def _show(**kwargs):
"""Link the list recipes"""
with Colorer(kwargs) as colorer:
for profile in config.root_profiles:
for name in profile.recipe_link_names:
message = name
enabled = profile.recipeislinkenabled(name)
message += " ({})".format(
{
True: "enabled",
False: "disabled",
None: "implicitly disabled"
}[enabled]
)
colorer.echo(message, profile.name)
@link.command()
@argument("recipes", type=RecipeLinkNameType(), help="The names of the recipes to unset")
def _unset(recipes):
"""Remove the the link file"""
for recipe in recipes:
rm(config.recipe.profile.link_location(recipe))
LOGGER.status("Removing the link file of {} ({})".format(recipe, config.recipe.writeprofile))
| 1.265625 | 1 |
modules/scraper.py | Nikhil-Kulkarni/instagram-scraper | 0 | 97540 | import re
import time
from selenium import webdriver
from selenium.webdriver.common.keys import Keys
from selenium.webdriver.common.by import By
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
from random import seed
from random import random
from random import randint
class Scraper(object):
"""Able to start up a browser, to authenticate to Instagram and get
followers and people following a specific user."""
def __init__(self):
self.driver = webdriver.Chrome('drivers/chromedriver2')
def close(self):
"""Close the browser."""
self.driver.close()
def authenticate(self, username, password):
"""Log in to Instagram with the provided credentials."""
print('\nLogging in…')
self.driver.get('https://www.instagram.com')
# Go to log in
login_link = WebDriverWait(self.driver, 5).until(
EC.presence_of_element_located((By.LINK_TEXT, 'Forgot password?'))
)
# Authenticate
username_input = self.driver.find_element_by_xpath(
'//input[@name="username"]'
)
password_input = self.driver.find_element_by_xpath(
'//input[@name="password"]'
)
username_input.send_keys(username)
password_input.send_keys(password)
password_input.send_keys(Keys.RETURN)
time.sleep(10)
def get_users(self, group, target, link, verbose = False):
f = open("links.txt", "a")
"""Return a list of links to the users profiles found."""
if link is None:
wblink = self._get_link(group, target)
if wblink is None:
return []
self._open_dialog(wblink)
if self.users_list_container is None:
return None
else:
wblink = self._get_link_by_link(group, link)
if wblink is None:
return []
self._open_dialog(wblink)
if self.users_list_container is None:
return None
print('\nGetting {} users…{}'.format(
self.expected_number,
'\n' if verbose else ''
))
links = []
last_user_index = 0
updated_list = self._get_updated_user_list()
initial_scrolling_speed = 5
retry = 2
# While there are more users scroll and save the results
while updated_list[last_user_index] is not updated_list[-1] or retry > 0:
self._scroll(self.users_list_container, initial_scrolling_speed)
for index, user in enumerate(updated_list):
if index < last_user_index:
continue
if index > 100:
return links
try:
link_to_user = user.find_element(By.TAG_NAME, 'a').get_attribute('href')
last_user_index = index
if link_to_user not in links:
links.append(link_to_user)
if verbose:
f.write(link_to_user + "\n")
print(
'{0:.2f} {1:s}'.format(
index,
link_to_user
)
)
except:
if (initial_scrolling_speed > 1):
initial_scrolling_speed -= 1
pass
updated_list = self._get_updated_user_list()
if updated_list[last_user_index] is updated_list[-1]:
retry -= 1
f.close()
print('100% Complete')
return links
def _open_dialog(self, link):
"""Open a specific dialog and identify the div containing the users
list."""
link.click()
self.expected_number = int(
re.search('(\d+)', link.text).group(1)
)
time.sleep(1)
try:
self.users_list_container = self.driver.find_element_by_xpath(
'//div[@role="dialog"]//ul/parent::div'
)
except:
self.users_list_container = None
def _get_link(self, group, target):
"""Return the element linking to the users list dialog."""
print('\nNavigating to %s profile…' % target)
self.driver.get('https://www.instagram.com/%s/' % target)
try:
return WebDriverWait(self.driver, 5).until(
EC.presence_of_element_located((By.PARTIAL_LINK_TEXT, group))
)
except:
return None
def _get_link_by_link(self, group, link):
"""Return the element linking to the users list dialog."""
print('\nNavigating to %s profile…' % link)
self.driver.get(link)
try:
return WebDriverWait(self.driver, 5).until(
EC.presence_of_element_located((By.PARTIAL_LINK_TEXT, group))
)
except:
return None
def _get_updated_user_list(self):
"""Return all the list items included in the users list."""
return self.users_list_container.find_elements(By.XPATH, 'ul//li')
def _scroll(self, element, times = 1):
"""Scroll a specific element one or more times with small delay between
them."""
while times > 0:
self.driver.execute_script(
'arguments[0].scrollTop = arguments[0].scrollHeight',
element
)
time.sleep(random() * randint(2, 5))
times -= 1
| 2.375 | 2 |
datamodels/scripts/cdp_correct_wildcard.py | mwregan2/MiriTE | 0 | 97668 | #!/usr/bin/env python
#
# :History:
#
# 10 Aug 2018: Created.
#
# @author: <NAME> (UKATC)
#
"""
Script `cdp_correct_wildcard` corrects wildcard references within CDP metadata.
Prior to the CDP-7 release, MIRI CDPs would set a metadata keywords to 'ANY'
to indicate that the CDP was valid for any variant of that CDP (e.g. FILTER='ANY').
From CDP-7 onwards, the naming convention is changed so the string 'N/A' is
used instead, which is more compatible with the JWST CRDS searching mechanism.
This script checks the keywords contained in a CDP file and changes all
occurrences of THING='ANY' to THING='N/A'.
The following command arguments are defined by position::
inputfile[0]
The path+name of the file to be read. Compulsory.
outputfile[1]
The path+name of the file to be written.
Optional. Defaults to the same name as inputfile with "_out" appended.
The command also takes the following options::
--verbose or -v
Generate more verbose output.
--overwrite or -o
Overwrite wildcard existing FITS file.
"""
# Python logging facility.
import logging
# Set the default logging level.
logging.basicConfig(level=logging.INFO)
# Get a default parent logger
logger = logging.getLogger("cdp_correct_wildcard")
import optparse
import sys, time
import miri.datamodels
def correct_wildcard_metadata( datamodel ):
"""
Correct the wild card used to .
:Parameters:
datamodel: MiriDataModel
The calibration data model whose metadata is to be updated.
:Returns:
nchanges: int
Returns the number of changes made to the metadata.
"""
# Check MRS wildcard information.
nchanges = 0
if hasattr(datamodel, 'meta') and hasattr(datamodel.meta, 'instrument') and \
hasattr(datamodel.meta, 'exposure') and hasattr(datamodel.meta, 'subarray'):
if datamodel.meta.instrument.model is not None:
if str(datamodel.meta.instrument.model).strip() == 'ANY':
datamodel.meta.instrument.model = 'N/A'
nchanges += 1
if datamodel.meta.instrument.detector is not None:
if str(datamodel.meta.instrument.detector).strip() == 'ANY':
datamodel.meta.instrument.detector = 'N/A'
nchanges += 1
if datamodel.meta.instrument.detector_settings is not None:
if str(datamodel.meta.instrument.detector_settings).strip() == 'ANY':
datamodel.meta.instrument.detector_settings = 'N/A'
nchanges += 1
if datamodel.meta.instrument.filter is not None:
if str(datamodel.meta.instrument.filter).strip() == 'ANY':
datamodel.meta.instrument.filter = 'N/A'
nchanges += 1
if datamodel.meta.instrument.channel is not None:
if str(datamodel.meta.instrument.channel).strip() == 'ANY':
datamodel.meta.instrument.channel = 'N/A'
nchanges += 1
if datamodel.meta.instrument.band is not None:
if str(datamodel.meta.instrument.band).strip() == 'ANY':
datamodel.meta.instrument.band = 'N/A'
nchanges += 1
if datamodel.meta.exposure.readpatt is not None:
if str(datamodel.meta.exposure.readpatt).strip() == 'ANY':
datamodel.meta.exposure.readpatt = 'N/A'
nchanges += 1
if datamodel.meta.subarray.name is not None:
if str(datamodel.meta.subarray.name).strip() == 'ANY':
datamodel.meta.subarray.name = 'N/A'
nchanges += 1
else:
strg = "MIRI instrument, exposure and subarray metadata attributes missing from data model %s" % \
datamodel.__class__.__name__
raise TypeError(strg)
return nchanges
if __name__ == "__main__":
# Parse arguments
help_text = __doc__
usage = "%prog [opt] inputfile outputfile\n"
usage += "Corrects the wildcard usage (\'ANY\'-->\'N/A\') within a "
usage += "MIRI calibration data product."
parser = optparse.OptionParser(usage)
parser.add_option("-v", "--verbose", dest="verb", action="store_true",
help="Verbose mode"
)
parser.add_option("-o", "--overwrite", dest="overwrite", action="store_true",
help="Overwrite the copy of the file if it already exists"
)
(options, args) = parser.parse_args()
try:
inputfile = args[0]
if len(args) > 1:
outputfile = args[1]
else:
outputfile = inputfile + "_out.fits"
except IndexError:
print(help_text)
time.sleep(1) # Ensure help text appears before error messages.
parser.error("Not enough arguments provided")
sys.exit(1)
verb = options.verb
overwrite = options.overwrite
# Open the data model using the class derived from the data type.
with miri.datamodels.open( init=inputfile ) as datamodel:
# Attempt to correct the wildcards in the metadata keyword
logger.info("Analysing %s..." % inputfile)
nchanges = correct_wildcard_metadata( datamodel )
if verb:
print(datamodel)
print(datamodel.get_history_str())
if nchanges > 0:
datamodel.save( outputfile, overwrite=overwrite)
logger.info("%d changes made. Data saved to new file, %s\n" % (nchanges, outputfile))
else:
logger.info("Data not changed. No output file written.\n")
del datamodel
| 1.421875 | 1 |
SciComputing with Python/lesson_05-06/ballisiticDrag.py | evtodorov/aerospace | 0 | 97796 | # -*- coding: utf-8 -*-
"""
Created on Tue May 06 09:47:05 2014
Ballistic trajectories Case 2 Drag
@author: etodorov
"""
import math
import matplotlib.pyplot as plt
v0 = 50. #m/s
x0 = 0.
y0 = 0.
deg = math.pi/180.
g = -9.81
dt = .01 #time step
cD = .47 #drag c
A = 0.8 # m^2
m = 3. #kg
rho = 1.225
for i in xrange(25,80,5):
xTab = []
yTab = []
t = 0
vx = v0*math.cos(i*deg)
vy = v0*math.sin(i*deg)
x = x0
y = y0
while y>=y0:
phi = math.atan2(vy,vx)
D = cD*A*.5*rho*(vx*vx+vy*vy)
ax = -D*math.cos(phi)/m
vx += ax*dt
x += vx*dt
ay = g-D*math.sin(phi)/m
vy += ay*dt
y += vy*dt
xTab.append(x)
yTab.append(y)
plt.plot(xTab,yTab, label=str(i))
plt.legend()
plt.title("Ballisitic trajectories")
plt.xlabel("x-position [m]")
plt.ylabel("y-posiiton [m]")
plt.show() | 2.09375 | 2 |
spyke/__version__.py | spyke/spyke | 22 | 97924 | """Define __version__ and enforce minimum library versions"""
from __future__ import division
from __future__ import print_function
import os
import sys
from distutils.version import LooseVersion
__version__ = '2.1' # incremented mostly to track significant changes to sort file format
# enforce minimum versions of various required libraries:
PY2_LIBVERSIONS = {'Python': '2.7.15',
'Qt4': '4.8.7',
'PyQt4': '4.12.1',
'PyOpenGL': '3.1.0',
#'setuptools': '39.0.0', # for checking packaged versions
'IPython': '5.8.0',
'numpy': '1.16.5',
'scipy': '1.2.2',
'matplotlib': '2.2.3',
'cython': '0.29.13',
'mdp': '3.5',
'sklearn': '0.20.4',
'pywt': '1.0.3',
'jsonpickle': '1.2',
'simplejson': '3.16.0',
}
PY3_LIBVERSIONS = {'Python': '3.6.8',
'Qt4': '4.8.7',
'PyQt4': '4.12.1',
'PyOpenGL': '3.1.0',
#'setuptools': '39.0.0', # for checking packaged versions
'IPython': '7.4.0',
'numpy': '1.17.2',
'scipy': '1.3.1',
'matplotlib': '3.0.3',
'cython': '0.29.13',
'mdp': '3.5',
'sklearn': '0.21.3',
'pywt': '1.0.3',
'jsonpickle': '1.2',
'simplejson': '3.16.0',
}
PYVER = sys.version_info.major
PYVER2LIBVERSIONS = {2: PY2_LIBVERSIONS,
3: PY3_LIBVERSIONS}
LIBVERSIONS = PYVER2LIBVERSIONS[PYVER]
# map library names to pip/conda package names, for those few which are not identical:
LIBNAME2PKGNAME = {'pywt': 'PyWavelets',
'skimage': 'scikit-image',
'sklearn': 'scikit-learn'
}
def get_python_version(libname):
return os.sys.version.split(' ')[0]
def get_qt4_version(libname):
from PyQt4.QtCore import QT_VERSION_STR
return QT_VERSION_STR
def get_pyqt4_version(libname):
from PyQt4.pyqtconfig import Configuration
cfg = Configuration()
return cfg.pyqt_version_str
def get_pyopengl_version(libname):
import OpenGL
return OpenGL.version.__version__
def get_generic_version(libname):
exec('import ' + libname) # import full library names into namespace
ver = eval(libname + '.__version__') # recommended attrib, according to PEP8
return ver
def get_generic_pkg_version(libname):
import pkg_resources # installed by setuptools package
ver = pkg_resources.get_distribution(libname).version # packaged version
return ver
LIBNAME2VERF = {'Python': get_python_version,
'Qt4': get_qt4_version,
'PyQt4': get_pyqt4_version,
'PyOpenGL': get_pyopengl_version,
}
def check_LIBVERSIONS(verbose=False):
"""Check that all minimum version requirements in LIBVERSIONS are met"""
for libname, minver in LIBVERSIONS.items():
verf = LIBNAME2VERF.get(libname, get_generic_version)
# get current version of libname:
ver = verf(libname)
if verbose:
print(libname, ver)
if ver < LooseVersion(minver):
msg = ('Please update %s from version %s to at least version %s\n'
% (libname, ver, minver))
if libname in LIBNAME2VERF:
sln = ''
else:
if libname in LIBNAME2PKGNAME: # libname and install package name differ
pkgname = LIBNAME2PKGNAME[libname]
else:
pkgname = libname
sln = ('Run `sudo pip%d install --upgrade %s` or `conda update %s` '
'at the command line' % (PYVER, pkgname, pkgname))
raise RuntimeError(msg+sln)
if libname == 'jsonpickle' and ver != LooseVersion(minver):
msg = ('spyke currently requires exactly jsonpickle version %s, version %s is '
'currently installed\n' % (minver, ver))
sln = ('Run `sudo pip%d install jsonpickle==%s`'% (PYVER, minver))
raise RuntimeError(msg+sln)
| 1.773438 | 2 |
tests/test_decode.py | bemis1/python-openapi-codec | 0 | 98052 | from coreapi import Document
from openapi_codec import OpenAPICodec
import os
test_filepath = os.path.join(os.path.dirname(__file__), 'petstore.json')
def test_decode():
test_content = open(test_filepath, 'rb').read()
codec = OpenAPICodec()
document = (codec.load(test_content))
assert isinstance(document, Document)
assert set(document.keys()) == set(['pet', 'store', 'user'])
assert document.title == 'Swagger Petstore'
| 1.3125 | 1 |
tests/testing_lib/test_data.py | Zotkin/incremental_learning.pytorch | 277 | 98180 | <gh_stars>100-1000
import pytest
from inclearn.lib import data
@pytest.mark.parametrize("dataset_name,increment,n_tasks", [
("cifar100", 10, 10),
("cifar100", 2, 50)
])
def test_incremental_class(dataset_name, increment, n_tasks):
dataset = data.IncrementalDataset(
dataset_name,
increment=increment
)
assert dataset.n_tasks == n_tasks
current_class = 0
for _ in range(dataset.n_tasks):
task_info, train_loader, _, test_loader = dataset.new_task()
min_c, max_c = current_class, current_class + increment
assert task_info["increment"] == increment
assert task_info["min_class"] == min_c
assert task_info["max_class"] == max_c
for _, targets, _ in train_loader:
assert all(min_c <= t.item() < max_c for t in targets)
for _, targets, _ in test_loader:
assert all(0 <= t.item() < max_c for t in targets)
current_class += increment
| 2.25 | 2 |
networkapi/api_usuario/facade.py | brunodevel/GloboNetworkAPI | 0 | 98308 | <reponame>brunodevel/GloboNetworkAPI<filename>networkapi/api_usuario/facade.py
# -*- coding: utf-8 -*-
import logging
from networkapi.usuario.models import UsuarioGrupo
# from networkapi.admin_permission import AdminPermission
log = logging.getLogger(__name__)
def get_groups(users_permissions):
groups = list()
for user_permission in users_permissions:
for group in UsuarioGrupo.list_by_user_id(user_permission['user']):
group_id = int(group.ugrupo.id)
if group_id != 1:
groups.append({
'group': group_id,
'read': user_permission['read'],
'write': user_permission['write'],
'delete': user_permission['delete'],
'change_config': user_permission['change_config'],
})
return groups
def reduce_groups(groups):
group_reduce = list()
group_reduce_idx = list()
for group in groups:
if group['group'] in group_reduce_idx:
idx = group_reduce_idx[group['group']]
if group['read']:
group_reduce[idx]['read']
if group['write']:
group_reduce[idx]['write']
if group['delete']:
group_reduce[idx]['delete']
if group['change_config']:
group_reduce[idx]['change_config']
else:
group_reduce_idx.append(group['group'])
group_reduce.append(group)
return group_reduce
| 1.25 | 1 |
Part_1_beginner/07_type_dictionary/rozwiazania/exercise_2.py | Mikma03/InfoShareacademy_Python_Courses | 0 | 98436 |
# Stwórz zmienną my_family zawierającą drzewo genealogiczne twojej rodziny.
# Zacznij od siebie - opisując imię, nazwisko, datę urodzenia
# każdej osoby oraz jej rodziców.
# Podpowiedź: rodzice będą listami, zawierającymi w sobie kolejne słowniki.
my_family = {
"first_name": "Mikołaj",
"last_name": "Lewandowski",
"birth_date": "23-11-1991",
"parents": [
{
"first_name": "Jan",
"last_name": "Lewandowski",
"birth_date": "15-11-1961",
"parents": [
{
"first_name": "Piotr",
"last_name": "Lewandowski",
"birth_date": "15-11-1931",
"parents": [],
},
{
"first_name": "Beata",
"last_name": "Lewandowska",
"birth_date": "15-11-1931",
"parents": [],
},
]
},
{
"first_name": "Alicja",
"last_name": "Lewandowski",
"birth_date": "15-11-1961",
"parents": [
{
"first_name": "Paweł",
"last_name": "Kowalski",
"birth_date": "15-11-1931",
"parents": [],
},
{
"first_name": "Anna",
"last_name": "Kowalska",
"birth_date": "15-11-1931",
"parents": [],
},
]
}
],
}
print("Drzewo genealogiczne", my_family)
| 1.289063 | 1 |
opencv_project_python-master/opencv_project_python-master/06.filter/edge_roberts.py | dongrami0425/Python_OpenCV-Study | 0 | 98564 | <filename>opencv_project_python-master/opencv_project_python-master/06.filter/edge_roberts.py<gh_stars>0
import cv2
import numpy as np
img = cv2.imread("../img/sudoku.jpg")
# 로버츠 커널 생성 ---①
gx_kernel = np.array([[1,0], [0,-1]])
gy_kernel = np.array([[0, 1],[-1,0]])
# 커널 적용 ---②
edge_gx = cv2.filter2D(img, -1, gx_kernel)
edge_gy = cv2.filter2D(img, -1, gy_kernel)
# 결과 출력
merged = np.hstack((img, edge_gx, edge_gy, edge_gx+edge_gy))
cv2.imshow('<NAME>', merged)
cv2.waitKey(0)
cv2.destroyAllWindows() | 1.601563 | 2 |
src/binary_app/view.py | Stupnitskiy/BinaryAPI | 0 | 98692 | from flask import render_template, request, Blueprint
from src import app
from src.binary_app import serialize
from src.binary_app import spec
from src.lib.validate import wrap_validate
from src.lib.services import dropbox
binary_bp = Blueprint('binary_bp', __name__, url_prefix='/api/binary')
@binary_bp.route("/get", methods=['GET'])
def get_list():
files = dropbox.list_folder()
return serialize.get_list(files)
@binary_bp.route("/get/<string:key>", methods=['GET'])
@wrap_validate(spec.get())
def get(key):
return dropbox.download(key)
@binary_bp.route("/put", methods=['PUT'])
@wrap_validate(spec.put())
def put():
form = request.form
key = form['key']
data = form['data']
encoded_data = str.encode(data)
result = dropbox.upload(encoded_data, key)
return serialize.put(result)
@binary_bp.route("/delete/<string:key>", methods=['DELETE'])
@wrap_validate(spec.delete())
def delete(key):
file = dropbox.delete(key)
return serialize.delete(file) | 1.265625 | 1 |
pynet/datasets/dsprites.py | neurospin/pynet | 8 | 98820 | # -*- coding: utf-8 -*-
##########################################################################
# NSAp - Copyright (C) CEA, 2020
# Distributed under the terms of the CeCILL-B license, as published by
# the CEA-CNRS-INRIA. Refer to the LICENSE file or to
# http://www.cecill.info/licences/Licence_CeCILL-B_V1-en.html
# for details.
##########################################################################
"""
Module that provides functions to prepare the DSprites dataset.
beta-vae: Learning basic visual concepts with a constrained variational
framework, Higgins, International Conference on Learning Representations, 2017.
Code: https://github.com/YannDubs/disentangling-vae
"""
# Imports
import os
import logging
import subprocess
import numpy as np
from pynet.datasets.core import DataItem
from torch.utils.data import Dataset
# Global parameters
logger = logging.getLogger("pynet")
class DSprites(Dataset):
""" Disentanglement test Sprites dataset.
Procedurally generated 2D shapes, from 6 disentangled latent factors.
This dataset uses 6 latents, controlling the color, shape, scale,
rotation and position of a sprite.
All possible variations of the latents are present. Ordering along
dimension 1 is fixed and can be mapped back to the exact latent values
that generated that image. Pixel outputs are different. No noise added.
Notes
-----
- Link : https://github.com/deepmind/dsprites-dataset/
- hard coded metadata because issue with python 3 loading of python 2
"""
urls = {
"train":
"https://github.com/deepmind/dsprites-dataset/blob/master/"
"dsprites_ndarray_co1sh3sc6or40x32y32_64x64.npz?raw=true"}
files = {"train": "dsprite_train.npz"}
lat_names = ("shape", "scale", "orientation", "posX", "posY")
img_size = (64, 64)
def __init__(self, datasetdir, size=None, **kwargs):
""" Init class.
Latent values of length 6, that gives the value of each factor of
variation.
Parameters
----------
datasetdir: string
the dataset destination folder.
size: int, default None
the size of the dataset, default use all images available.
Returns
-------
item: namedtuple
a named tuple containing 'input_path', and 'metadata_path'.
"""
super(DSprites, self).__init__(**kwargs)
self.datasetdir = datasetdir
self.dsprites_file = os.path.join(
self.datasetdir, DSprites.files["train"])
self.download()
dataset = np.load(self.dsprites_file)
if size is None:
size = len(dataset["imgs"])
size = min(size, len(dataset["imgs"]))
index = np.arange(size)
np.random.shuffle(index)
self.imgs = dataset["imgs"][index]
self.lat_values = dataset["latents_values"][index]
self.n_samples = len(self.imgs)
def download(self):
""" Download the dataset.
"""
if not os.path.isdir(self.datasetdir):
os.makedirs(self.datasetdir)
if not os.path.isfile(self.dsprites_file):
subprocess.check_call(["curl", "-L", DSprites.urls["train"],
"--output", self.dsprites_file])
def __len__(self):
return self.n_samples
def __getitem__(self, idx):
""" Get the image at position 'idx'.
Returns
-------
out: DataItem
input/output tensor in [0, 1] of shape 'img_size'.
"""
data = np.expand_dims(self.imgs[idx], axis=0)
return DataItem(inputs=data, outputs=data, labels=None)
| 1.945313 | 2 |
String Handling/2804.py | kjh9267/BOJ_Python | 0 | 98948 | <filename>String Handling/2804.py
import sys
a, b = sys.stdin.readline().strip().split()
graph = [['.' for j in range(len(a))] for i in range(len(b))]
for i, char in enumerate(a):
if char in b:
same = i
start = b.depth(char)
break
graph[start] = list(a)
for i, stirngs in enumerate(graph):
stirngs[same] = b[i]
print("\n".join(map("".join,graph))) | 1.890625 | 2 |
platon/middleware/pythonic.py | shinnng/platon.py | 0 | 99076 | from platon._utils.method_formatters import (
PYTHONIC_REQUEST_FORMATTERS,
PYTHONIC_RESULT_FORMATTERS,
)
from platon.middleware.formatting import (
construct_formatting_middleware,
)
pythonic_middleware = construct_formatting_middleware(
request_formatters=PYTHONIC_REQUEST_FORMATTERS,
result_formatters=PYTHONIC_RESULT_FORMATTERS,
)
| 0.542969 | 1 |
contrib/convertConditions/ConvertConditions.py | Ambal/mangos | 0 | 99204 | #
# This file is part of the CMaNGOS Project. See AUTHORS file for Copyright information
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
#
import MySQLdb as mdb
import sys
#global Variables (change as required)
host = "localhost"
user = "mangos"
passw = "<PASSWORD>"
# databases format: list of [name, expansion]
databases = [ ["mangos", 2] ]
#databases = [ ["zero_db", 0], ["tbcdb", 1], ["udb_clean", 2], ["ytdb", 2] ]
# Should the current conditions table be loaded? (usefull for appending custom content)
loadOldConditions = 0
# database, from which the conditions table will be loaded
database = databases[0][0]
#database = "mangos_custom"
# be very chatty with debug output
debug = 0
# global variables for internal use
false = 0
true = 1
processNumConditions = 0
fUpdates = 0
# Some Helper functions, main code at the bottom
def isSameCondition(c1, v11, v12, c2, v21, v22):
return (c1 == c2) and (v11 == v21) and (v12 == v22)
#
def compareCondition(c1, v11, v12, c2, v21, v22):
if (c1 > c2):
return true
if (c1 == c2):
if (v11 > v21):
return true
if (v11 == v21):
if (v12 > v22):
return true
return false
#
def insertCondition(c, v1, v2):
global old_max
old_max = old_max + 1
linkedList.append( [old_max, c, v1, v2, database] )
if (debug):
print "Inserted: [%d, %d, %d, %d], (%s)" % (old_max, c, v1, v2, database)
#
def findCondition(c, v1, v2):
for entry in linkedList:
if (isSameCondition(c, v1, v2, entry[1], entry[2], entry[3])):
return entry[0]
return 0
#
# Function that processes table tableName for keys keyName1, keyName2, parses the conditions of conditionString, which must select numberOfConditions conditions
def progressTable(tableName, keyName1, keyName2, conditionString, numberOfConditions):
global old_max
global processNumConditions
global fUpdates
try:
con = mdb.connect(host, user, passw, database);
cur = con.cursor()
cur.execute('SELECT %s, %s, %s FROM %s; ' % (keyName1, keyName2, conditionString, tableName))
result = cur.fetchall()
if (debug):
print 'ProgressTable %s in database %s' % (tableName, database)
for row in result:
key1 = row[0]
key2 = row[1]
c1 = v11 = v12 = c2 = v21 = v22 = c3= v31 =v32 = 0
c1 = row[2]
v11 = row[3]
v12 = row[4]
if (numberOfConditions >= 2):
c2 = row[5]
v21 = row[6]
v22 = row[7]
if (numberOfConditions >= 3):
c3 = row[8]
v31 = row[9]
v32 = row[10]
# Order the conditions of one row from big to slow
if (numberOfConditions >= 2) and (compareCondition(c2, v21, v22, c1, v11, v12)):
c1, v11, v12, c2, v21, v22 = c2, v21, v22, c1, v11, v12
if (numberOfConditions >= 3):
if (compareCondition(c3, v31, v32, c2, v21, v22)):
c2, v21, v22, c3, v31, v32 = c3, v31, v32, c2, v21, v22
if (compareCondition(c2, v21, v22, c1, v11, v12)):
c1, v11, v12, c2, v21, v22 = c2, v21, v22, c1, v11, v12
# How many conditions do we have?
rowConditionNumber = 0
if (c1 > 0):
rowConditionNumber = rowConditionNumber + 1
if (c2 > 0):
rowConditionNumber = rowConditionNumber + 1
if (c3 > 0):
rowConditionNumber = rowConditionNumber + 1
if (rowConditionNumber == 0): #nothing to do
continue;
if (debug):
print "Condition(s) for Key (%d, %d): %d, %d, %d -- %d, %d, %d -- %d, %d, %d" % (key1, key2, c1, v11, v12, c2, v21, v22, c3, v31, v32)
# Just insert
if (processNumConditions == 0):
if (rowConditionNumber >= 1 and findCondition(c1, v11, v12) == 0):
insertCondition(c1, v11, v12)
if (rowConditionNumber >= 2 and findCondition(c2, v21, v22) == 0):
insertCondition(c2, v21, v22)
if (rowConditionNumber >= 3 and findCondition(c3, v31, v32) == 0):
insertCondition(c3, v31, v32)
continue
#
# Currently processing?
if (processNumConditions != rowConditionNumber):
continue
founds = [0, 0, 0]
countFound = 0 # helper for error
if (rowConditionNumber >= 1):
founds[0] = findCondition(c1, v11, v12)
if (founds[0] > 0):
countFound = countFound + 1
if (rowConditionNumber >= 2):
founds[1] = findCondition(c2, v21, v22)
if (founds[1] > 0):
countFound = countFound + 1
if (rowConditionNumber >= 3):
founds[2] = findCondition(c3, v31, v32)
if (founds[2] > 0):
countFound = countFound + 1
if (countFound != rowConditionNumber):
print 'An error happened for: Condition(s) for Key (%d, %d): %d, %d, %d -- %d, %d, %d -- %d, %d, %d' % (key1, key2, c1, v11, v12, c2, v21, v22, c3, v31, v32)
continue
last_point = 0
#3-vector condition
if (rowConditionNumber == 3):
# search for 2 match
notSearched = [0, 0, 0]
notSearched[2] = findCondition(-1, founds[0], founds[1])
if (notSearched[2] == 0):
notSearched[2] = findCondition(-1, founds[1], founds[0])
notSearched[1] = findCondition(-1, founds[0], founds[2])
if (notSearched[1] == 0):
notSearched[1] = findCondition(-1, founds[2], founds[0])
notSearched[0] = findCondition(-1, founds[1], founds[2])
if (notSearched[0] == 0):
notSearched[0] = findCondition(-1, founds[2], founds[1])
if (notSearched == [0, 0, 0]): # nothing found
insertCondition(-1, founds[1], founds[2])
notSearched[0] = old_max
for i in range(0, 3):
if (notSearched[i] > 0):
last_point = findCondition(-1, notSearched[i], founds[i])
if (last_point == 0):
last_point = findCondition(-1, founds[i], notSearched[i])
if (last_point > 0):
break
if (last_point == 0):
for i in range(0, 3):
if (notSearched[i] > 0):
insertCondition(-1, founds[i], notSearched[i])
last_point = old_max
break
#2-vector condition
if (rowConditionNumber == 2):
# search for 2 match
last_point = findCondition(-1, founds[1], founds[0])
if (last_point == 0):
last_point = findCondition(-1, founds[0], founds[1])
if (last_point == 0):
#Not found, insert list
insertCondition(-1, founds[1], founds[0])
last_point = old_max
#1-vector condition
if (rowConditionNumber == 1):
last_point = founds[0]
# Now we must have last_point > 0 (for a condition), and linking to proper place
if (last_point > 0 and processNumConditions > 0):
#cur.execute('UPDATE %s SET condition_id=%d WHERE %s=%d AND %s=%d; ' % (tableName, last_point, keyName1, key1, keyName2, key2))
print >> fUpdates, 'UPDATE %s SET condition_id=%d WHERE %s=%d AND %s=%d;' % (tableName, last_point, keyName1, key1, keyName2, key2)
except mdb.Error, e:
print 'Error %d, %s' % (e.args[0], e.args[1])
sys.exit(1)
finally:
if con:
con.close()
## End of Helper function
linkedList = []
old_max = 0
linkedList.append( [0, 0, 0, 0, 'initial fill'] )
# Extract old conditions
if (loadOldConditions):
try:
con = mdb.connect(host, user, passw, database);
cur = con.cursor()
cur.execute('SELECT condition_entry, type, value1, value2 FROM conditions')
for row in cur:
linkedList.append( [row[0], row[1], row[2], row[3], 'reloaded from %s' % database ] )
old_max = old_max + 1
if (row[0] != old_max):
print 'An error happened at old_max=%d, entry=%d' % (old_max, row[0])
print 'Loaded %d values from %s conditions table' % (old_max, database)
except mdb.Error, e:
print 'Error %d, %s' % (e.args[0], e.args[1])
sys.exit(1)
finally:
if con:
con.close()
#
start_entry=old_max
def doTables(db):
global processNumConditions
global fUpdates
global database
database = db[0]
print 'Processing database %s (%d vector conditions)' % (database, processNumConditions)
try:
if (processNumConditions == 0):
fUpdates = open("%s_updates.sql" % database, "w")
else:
fUpdates = open("%s_updates.sql" % database, "a")
if (processNumConditions <= 1):
progressTable("reference_loot_template", "entry", "item", "lootcondition, condition_value1, condition_value2", 1)
progressTable("creature_loot_template", "entry", "item", "lootcondition, condition_value1, condition_value2", 1)
progressTable("gameobject_loot_template", "entry", "item", "lootcondition, condition_value1, condition_value2", 1)
progressTable("pickpocketing_loot_template", "entry", "item", "lootcondition, condition_value1, condition_value2", 1)
progressTable("item_loot_template", "entry", "item", "lootcondition, condition_value1, condition_value2", 1)
progressTable("fishing_loot_template", "entry", "item", "lootcondition, condition_value1, condition_value2", 1)
progressTable("skinning_loot_template", "entry", "item", "lootcondition, condition_value1, condition_value2", 1)
progressTable("disenchant_loot_template", "entry", "item", "lootcondition, condition_value1, condition_value2", 1)
progressTable("mail_loot_template", "entry", "item", "lootcondition, condition_value1, condition_value2", 1)
# Not all expansions have all tables
if (db[1] >= 1):
progressTable("prospecting_loot_template", "entry", "item", "lootcondition, condition_value1, condition_value2", 1)
if (db[1] >= 2):
progressTable("spell_loot_template", "entry", "item", "lootcondition, condition_value1, condition_value2", 1)
progressTable("milling_loot_template", "entry", "item", "lootcondition, condition_value1, condition_value2", 1)
if (processNumConditions < 3):
progressTable("gossip_menu", "entry", "text_id", "cond_1, cond_1_val_1, cond_1_val_2, cond_2, cond_2_val_1, cond_2_val_2", 2)
progressTable("gossip_menu_option", "menu_id", "id", "cond_1, cond_1_val_1, cond_1_val_2, cond_2, cond_2_val_1, cond_2_val_2, cond_3, cond_3_val_1, cond_3_val_2", 3)
except:
print "An error happened here"
sys.exit(1)
finally:
fUpdates.close()
# end of helper function doTables
try:
fConditions = open("conditions_dump.sql", "w")
if (debug):
print 'Opened conditions_dump.sql successfully'
for i in range (0, 4):
processNumConditions = i
for db in databases:
doTables(db)
print 'Inserted %d rows for database %s' % (old_max - start_entry, database)
start_entry = old_max
print 'Processed database(s): %s' % databases
#create dump
print >> fConditions, 'TRUNCATE conditions;'
print >> fConditions, 'INSERT INTO conditions VALUES'
for i in range(1, old_max):
if (linkedList[i][0] != i):
print 'AN ERROR HAPPENED for i=%d, liLi[i].entry=%d' % (i, linkedList[i][0])
print >> fConditions, '(%d, %d, %d, %d), -- %s' % (linkedList[i][0], linkedList[i][1], linkedList[i][2], linkedList[i][3], linkedList[i][4])
i = old_max
print >> fConditions, '(%d, %d, %d, %d); -- %s' % (linkedList[i][0], linkedList[i][1], linkedList[i][2], linkedList[i][3], linkedList[i][4])
except:
print "An error happened"
sys.exit(1)
finally:
fConditions.close()
| 1.742188 | 2 |
lib/libgreader.py | hitsmaxft/kindlereader | 2 | 99332 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
libG(oogle)Reader
Copyright (C) 2010 <NAME> <<EMAIL>> http://asktherelic.com
Python library for working with the unofficial Google Reader API.
Google may break this at anytime, I am not responsible for damages from that
breakage, but I will try my best to fix it.
Uses HTTPS for all requests to and from Google.
Licensing included in LICENSE.txt
"""
__author__ = "<NAME> <<EMAIL>>"
__version__ = "0.4"
__credits__ = "<NAME> <<EMAIL>>, <NAME> Twidi <<EMAIL>>"
import sys
import urllib
import urllib2
import urlparse
import time
try:
import json
except:
import simplejson as json
try:
import lib.oauth2 as oauth
has_oauth = True
except:
has_oauth = False
reload(sys)
sys.setdefaultencoding("utf-8")
def urlquote(string):
"""Encode a string to utf-8 and encode it for urllib"""
return urllib.quote(string.encode("utf-8"))
class ItemsContainer(object):
"""
A base class used for all classes aimed to have items (Categories and Feeds)
"""
def __init__(self):
self.items = []
self.itemsById = {}
self.lastLoadOk = False
self.lastLoadLength = 0
self.lastUpdated = None
self.unread = 0
self.continuation = None
def _getContent(self, excludeRead=False, continuation=None):
"""
Get content from google reader with specified parameters.
Must be overladed in inherited clases
"""
return None
def loadItems(self, excludeRead=False):
"""
Load items and call itemsLoadedDone to transform data in objects
"""
self.clearItems()
self.loadtLoadOk = False
self.lastLoadLength = 0
self._itemsLoadedDone(self._getContent(excludeRead, None))
def loadMoreItems(self, excludeRead=False, continuation=None):
"""
Load more items using the continuation parameters of previously loaded items.
"""
self.lastLoadOk = False
self.lastLoadLength = 0
if not continuation and not self.continuation:
return
self._itemsLoadedDone(self._getContent(excludeRead, continuation or self.continuation))
def _itemsLoadedDone(self, data):
"""
Called when all items are loaded
"""
if data is None:
return
self.continuation = data.get('continuation', None)
self.lastUpdated = data.get('updated', None)
self.lastLoadLength = len(data.get('items', []))
self.googleReader.itemsToObjects(self, data.get('items', []))
self.lastLoadOk = True
def _addItem(self, item):
self.items.append(item)
self.itemsById[item.id] = item
def getItem(self, id):
return self.itemsById[id]
def clearItems(self):
self.items = []
self.itemsById = {}
self.continuation = None
def getItems(self):
return self.items
def countItems(self, excludeRead=False):
if excludeRead:
sum([1 for item in self.items if item.isUnread()])
else:
return len(self.items)
def markItemRead(self, item, read):
if read and item.isUnread():
self.unread -= 1
elif not read and item.isRead():
self.unread += 1
def markAllRead(self):
self.unread = 0
for item in self.items:
item.read = True
item.canUnread = False
result = self.googleReader.markFeedAsRead(self)
return result.upper() == 'OK'
def countUnread(self):
self.unread = self.countItems(excludeRead=True)
class Category(ItemsContainer):
"""
Class for representing a category
"""
def __str__(self):
return "<%s (%d), %s>" % (self.label, self.unread, self.id)
def __init__(self, googleReader, label, id):
"""
Key args:
- label (str)
- id (str)
"""
super(Category, self).__init__()
self.googleReader = googleReader
self.label = label
self.id = id
self.feeds = []
if id is not GoogleReader.UNCATEGORIZED_ID:
self.fetchUrl = GoogleReader.CATEGORY_URL + urlquote(self.label)
else:
self.fetchUrl = None
def _addFeed(self, feed):
if not feed in self.feeds:
self.feeds.append(feed)
try:
self.unread += feed.unread
except:
pass
def getFeeds(self):
return self.feeds
def _getContent(self, excludeRead=False, continuation=None):
return self.googleReader.getCategoryContent(self, excludeRead, continuation)
def countUnread(self):
self.unread = sum([feed.unread for feed in self.feeds])
def toArray(self):
pass
def toJSON(self):
pass
class BaseFeed(ItemsContainer):
"""
Class for representing a special feed.
"""
def __str__(self):
return "<%s, %s>" % (self.title, self.id)
def __init__(self, googleReader, title, id, unread, categories=[]):
"""
Key args:
- title (str, name of the feed)
- id (str, id for google reader)
- unread (int, number of unread items, 0 by default)
- categories (list) - list of all categories a feed belongs to, can be empty
"""
super(BaseFeed, self).__init__()
self.googleReader = googleReader
self.id = id
self.title = title
self.unread = unread
self.item_count = 0
self.categories = []
for category in categories:
self.addCategory(category)
self.continuation = None
def addCategory(self, category):
if not category in self.categories:
self.categories.append(category)
category._addFeed(self)
def getCategories(self):
return self.categories
def _getContent(self, excludeRead=False, continuation=None):
return self.googleReader.getFeedContent(self, excludeRead, continuation)
def markItemRead(self, item, read):
super(BaseFeed, self).markItemRead(item, read)
for category in self.categories:
category.countUnread()
def markAllRead(self):
self.unread = 0
for category in self.categories:
category.countUnread()
return super(BaseFeed, self).markAllRead()
def toArray(self):
pass
def toJSON(self):
pass
class SpecialFeed(BaseFeed):
"""
Class for representing specials feeds (starred, shared, friends...)
"""
def __init__(self, googleReader, type):
"""
type is one of GoogleReader.SPECIAL_FEEDS
"""
super(SpecialFeed, self).__init__(
googleReader,
title = type,
id = GoogleReader.SPECIAL_FEEDS_PART_URL+type,
unread = 0,
categories = [],
)
self.type = type
self.fetchUrl = GoogleReader.CONTENT_BASE_URL + urlquote(self.id)
class Feed(BaseFeed):
"""
Class for representing a normal feed.
"""
def __init__(self, googleReader, title, id, siteUrl=None, unread=0, categories=[]):
"""
Key args:
- title (str, name of the feed)
- id (str, id for google reader)
- siteUrl (str, can be empty)
- unread (int, number of unread items, 0 by default)
- categories (list) - list of all categories a feed belongs to, can be empty
"""
super(Feed, self).__init__(googleReader, title, id, unread, categories)
self.feedUrl = self.id.lstrip('feed/')
self.siteUrl = siteUrl
self.fetchUrl = GoogleReader.FEED_URL + urlquote(self.id)
class Item(object):
"""
Class for representing an individual item (an entry of a feed)
"""
def __str__(self):
return '<"%s" by %s, %s>' % (self.title, self.author, self.id)
def __init__(self, googleReader, item, parent):
"""
item : An item loaded from json
parent : the object (Feed of Category) containing the Item
"""
self.googleReader = googleReader
self.parent = parent
self.data = item # save original data for accessing other fields
self.id = item['id']
self.title = item.get('title', '(no title)')
self.author = item.get('author', None)
if item.has_key('content'):
self.content = item['content']
else:
self.content = item.get('content', item.get('summary', {})).get('content', '')
self.origin = { 'title': '', 'url': ''}
self.published = item.get('published', '')
self.idx = item.get('idx', 0)
if self.published and self.published != '':
self.published = time.strftime('%m/%d %H:%M', time.localtime(self.published))
# check original url
self.url = None
for alternate in item.get('alternate', []):
if alternate.get('type', '') == 'text/html':
self.url = alternate['href']
break
# check status
self.read = False
self.starred = False
self.shared = False
for category in item.get('categories', []):
if category.endswith('/state/com.google/read'):
self.read = True
elif category.endswith('/state/com.google/starred'):
self.starred = True
elif category in ('user/-/state/com.google/broadcast',
'user/%s/state/com.google/broadcast' % self.googleReader.userId):
self.shared = True
self.canUnread = item.get('isReadStateLocked', 'false') != 'true'
#keep feed, can be used when item is fetched from a special feed, then it's the original one
# try:
# f = item['origin']
# self.origin = {
# 'title': f.get('title', ''),
# 'url': f.get('htmlUrl', ''),
# }
# self.feed = self.googleReader.getFeed(f['streamId'])
# if not self.feed:
# raise
# if not self.feed.title and 'title' in f:
# self.feed.title = f['title']
# except:
# try:
# self.feed = Feed(self, f.get('title', ''), f['streamId'], f.get('htmlUrl', None), 0, [])
# try:
# self.googleReader._addFeed(self.feed)
# except:
# pass
# except:
# self.feed = None
self.feed = None
self.parent._addItem(self)
def isUnread(self):
return not self.read
def isRead(self):
return self.read
def markRead(self, read=True):
self.parent.markItemRead(self, read)
self.read = read
if read:
result = self.googleReader.addItemTag(self, GoogleReader.TAG_READ)
else:
result = self.googleReader.removeItemTag(self, GoogleReader.TAG_READ)
return result.upper() == 'OK'
def markUnread(self, unread=True):
return self.markRead(not unread)
def isShared(self):
return self.shared
def markShared(self, shared=True):
self.shared = shared
if shared:
result = self.googleReader.addItemTag(self, GoogleReader.TAG_SHARED)
else:
result = self.googleReader.removeItemTag(self, GoogleReader.TAG_SHARED)
return result.upper() == 'OK'
def share(self):
return self.markShared()
def unShare(self):
return self.markShared(False)
def isStarred(self):
return self.starred
def markStarred(self, starred=True):
self.starred = starred
if starred:
result = self.googleReader.addItemTag(self, GoogleReader.TAG_STARRED)
else:
result = self.googleReader.removeItemTag(self, GoogleReader.TAG_STARRED)
return result.upper() == 'OK'
def star(self):
return self.markStarred()
def unStar(self):
return self.markStarred(False)
class GoogleReader(object):
"""
Class for using the unofficial Google Reader API and working with
the data it returns.
Requires valid google username and password.
"""
READER_BASE_URL = 'https://www.google.com/reader/api'
API_URL = READER_BASE_URL + '/0/'
USER_INFO_URL = API_URL + 'user-info'
SUBSCRIPTION_LIST_URL = API_URL + 'subscription/list'
UNREAD_COUNT_URL = API_URL + 'unread-count'
TOKEN_URL = API_URL + 'token'
CONTENT_PART_URL = 'stream/contents/'
CONTENT_BASE_URL = API_URL + CONTENT_PART_URL
SPECIAL_FEEDS_PART_URL = 'user/-/state/com.google/'
READING_LIST = 'reading-list'
READ_LIST = 'read'
KEPTUNREAD_LIST = 'kept-unread'
STARRED_LIST = 'starred'
SHARED_LIST = 'broadcast'
NOTES_LIST = 'created'
FRIENDS_LIST = 'broadcast-friends'
SPECIAL_FEEDS = (READING_LIST, READ_LIST, KEPTUNREAD_LIST, STARRED_LIST, \
SHARED_LIST, FRIENDS_LIST, NOTES_LIST, )
FEED_URL = CONTENT_BASE_URL
CATEGORY_URL = CONTENT_BASE_URL + 'user/-/label/'
TAG_LIST = API_URL + 'tag/list'
EDIT_TAG_URL = API_URL + 'edit-tag'
TAG_READ = 'user/-/state/com.google/read'
TAG_STARRED = 'user/-/state/com.google/starred'
TAG_SHARED = 'user/-/state/com.google/broadcast'
MARK_ALL_READ_URL = API_URL + 'mark-all-as-read'
UNCATEGORIZED_ID = 'uncategorized'
UNCATEGORIZED_LABEL = u'未分类'
def __str__(self):
return "<Google Reader object: %s>" % self.username
def __init__(self, auth, userId=None):
self.auth = auth
self.feeds = []
self.categories = []
self.feedsById = {}
self.categoriesById = {}
self.specialFeeds = {}
self.orphanFeeds = []
self.userId = userId
self.token = None
def toJSON(self):
"""
TODO: build a json object to return via ajax
"""
pass
def getToken(self):
self.token = self.httpGet(self.TOKEN_URL)
return self.token
def getFeeds(self):
"""
Returns a list of Feed objects containing all of a users subscriptions
or None if buildSubscriptionList has not been called, to get the Feeds
"""
return self.feeds
def getCategories(self):
"""
Returns a list of all the categories or None if buildSubscriptionList
has not been called, to get the Feeds
"""
return self.categories
def getTaglist(self):
"""return tag list"""
contentJson = self.httpGet(self.TAG_LIST, { 'output': 'json', })
return json.loads(contentJson, strict=False)
def makeSpecialFeeds(self):
for type in self.SPECIAL_FEEDS:
self.specialFeeds[type] = SpecialFeed(self, type)
def getSpecialFeed(self, type):
return self.specialFeeds[type]
def buildSubscriptionList(self):
"""
Hits Google Reader for a users's alphabetically ordered list of feeds.
Returns true if succesful.
"""
self._clearLists()
unreadById = {}
if not self.userId:
self.getUserInfo()
if not self.token:
self.getToken()
unreadJson = self.httpGet(GoogleReader.UNREAD_COUNT_URL, { 'output': 'json', })
unreadCounts = json.loads(unreadJson, strict=False)['unreadcounts']
for unread in unreadCounts:
unreadById[unread['id']] = unread['count']
feedsJson = self.httpGet(GoogleReader.SUBSCRIPTION_LIST_URL, { 'output': 'json', })
subscriptions = json.loads(feedsJson, strict=False)['subscriptions']
for sub in subscriptions:
categories, uncategorized = [], 0
hUncategorized = {
'id':GoogleReader.UNCATEGORIZED_ID,
'label':GoogleReader.UNCATEGORIZED_LABEL
}
if 'categories' in sub and len(sub['categories']) > 0:
for hCategory in sub['categories']:
cId = hCategory['id']
if not cId in self.categoriesById:
category = Category(self, hCategory['label'], cId)
self._addCategory(category)
categories.append(self.categoriesById[cId])
else:
sub['categories'] = []
sub['categories'].append(hUncategorized)
uncategorized = uncategorized + 1
if uncategorized > 0:
if not hUncategorized['id'] in self.categoriesById:
category = Category(self, hUncategorized['label'], hUncategorized['id'])
self._addCategory(category)
categories.append(self.categoriesById[hUncategorized['id']])
try:
feed = self.getFeed(sub['id'])
if not feed:
raise
if not feed.title:
feed.title = sub['title']
for category in categories:
feed.addCategory(category)
feed.unread = unreadById.get(sub['id'], 0)
except:
feed = Feed(self,
sub['title'],
sub['id'],
sub.get('htmlUrl', None),
unreadById.get(sub['id'], 0),
categories)
if not categories:
self.orphanFeeds.append(feed)
self._addFeed(feed)
specialUnreads = [id for id in unreadById
if id.find('user/%s/state/com.google/' % self.userId) != -1]
for type in self.specialFeeds:
feed = self.specialFeeds[type]
feed.unread = 0
for id in specialUnreads:
if id.endswith('/%s' % type):
feed.unread = unreadById.get(id, 0)
break
return True
def _getFeedContent(self, url, excludeRead=False, continuation=None, number=None):
"""
A list of items (from a feed, a category or from URLs made with SPECIAL_ITEMS_URL)
Returns a dict with
- id (str, feed's id)
- continuation (str, to be used to fetch more items)
- items, array of dits with :
- update (update timestamp)
- author (str, username)
- title (str, page title)
- id (str)
- content (dict with content and direction)
- categories (list of categories including states or ones provided by the feed owner)
"""
parameters = {}
if excludeRead:
parameters['xt'] = 'user/-/state/com.google/read'
if continuation:
parameters['c'] = continuation
if number:
parameters['n'] = number
contentJson = self.httpGet(url, parameters)
return json.loads(contentJson, strict=False)
def itemsToObjects(self, parent, items):
objects = []
for item in items:
objects.append(Item(self, item, parent))
return objects
def getFeedContent(self, feed, excludeRead=False, continuation=None, number=None):
"""
Return items for a particular feed
"""
return self._getFeedContent(feed.fetchUrl, excludeRead, continuation, number)
def getCategoryContent(self, category, excludeRead=False, continuation=None, number=None):
"""
Return items for a particular category
"""
if category.id is GoogleReader.UNCATEGORIZED_ID:
return None
return self._getFeedContent(category.fetchUrl, excludeRead, continuation, number)
def removeItemTag(self, item, tag):
return self.httpPost(GoogleReader.EDIT_TAG_URL, {'i': item.id, 'r': tag, 'ac': 'edit-tags', 'T':self.token })
def addItemTag(self, item, tag):
return self.httpPost(GoogleReader.EDIT_TAG_URL, {'i': item.id, 'a': tag, 'ac': 'edit-tags', 'T':self.token })
def markFeedAsRead(self, feed):
return self.httpPost(GoogleReader.MARK_ALL_READ_URL, {'s': feed.id, 'T':self.token })
def getUserInfo(self):
"""
Returns a dictionary of user info that google stores.
"""
userJson = self.httpGet(GoogleReader.USER_INFO_URL)
result = json.loads(userJson, strict=False)
self.userId = result['userId']
return result
def getUserSignupDate(self):
"""
Returns the human readable date of when the user signed up for google reader.
"""
userinfo = self.getUserInfo()
timestamp = int(float(userinfo["signupTimeSec"]))
return time.strftime("%m/%d/%Y %H:%M", time.gmtime(timestamp))
def httpGet(self, url, parameters=None):
"""
Wrapper around AuthenticationMethod get()
"""
return self.auth.get(url, parameters)
def httpPost(self, url, post_parameters=None):
"""
Wrapper around AuthenticationMethod post()
"""
return self.auth.post(url, post_parameters)
def _addFeed(self, feed):
if feed.id not in self.feedsById:
self.feedsById[feed.id] = feed
self.feeds.append(feed)
def _addCategory (self, category):
if category.id not in self.categoriesById:
self.categoriesById[category.id] = category
self.categories.append(category)
def getFeed(self, id):
return self.feedsById.get(id, None)
def getCategory(self, id):
return self.categoriesById.get(id, None)
def _clearLists(self):
"""
Clear all list before sync : feeds and categories
"""
self.feedsById = {}
self.feeds = []
self.categoriesById = {}
self.categories = []
self.orphanFeeds = []
class AuthenticationMethod(object):
"""
Defines an interface for authentication methods, must have a get method
make this abstract?
1. auth on setup
2. need to have GET method
"""
def __init__(self):
self.client = "libgreader" #@todo: is this needed?
def getParameters(self, extraargs=None):
#ck is a timecode to help google with caching
parameters = {'ck':time.time(), 'client':self.client}
if extraargs:
parameters.update(extraargs)
return urllib.urlencode(parameters)
def postParameters(self, post=None):
if post is not None:
post_string = urllib.urlencode(post)
else:
post_string = None
return post_string
class ClientAuth(AuthenticationMethod):
"""
Auth type which requires a valid Google Reader username and password
"""
CLIENT_URL = 'https://www.google.com/accounts/ClientLogin'
def __init__(self, username, password):
super(ClientAuth, self).__init__()
self.username = username
self.password = password
self.auth_token = self._getAuth()
self.token = self._getToken()
def postParameters(self, post=None):
post.update({'T': self.token})
return super(ClientAuth, self).postParameters(post)
def get(self, url, parameters=None):
"""
Convenience method for requesting to google with proper cookies/params.
"""
getString = self.getParameters(parameters)
req = urllib2.Request(url + "?" + getString)
req.add_header('Authorization','GoogleLogin auth=%s' % self.auth_token)
r = urllib2.urlopen(req)
data = r.read()
r.close()
return data
def post(self, url, postParameters=None, urlParameters=None):
if urlParameters:
getString = self.getParameters(urlParameters)
req = urllib2.Request(url + "?" + getString)
else:
req = urllib2.Request(url)
req.add_header('Authorization','GoogleLogin auth=%s' % self.auth_token)
postString = self.postParameters(postParameters)
r = urllib2.urlopen(req, data=postString)
data = r.read()
r.close()
return data
def _getAuth(self):
"""
Main step in authorizing with Reader.
Sends request to Google ClientAuth URL which returns an Auth token.
Returns Auth token or raises IOError on error.
"""
parameters = urllib.urlencode({
'service':'reader',
'Email':self.username,
'Passwd':self.password,
'accountType':'GOOGLE'})
try:
conn = urllib2.urlopen(ClientAuth.CLIENT_URL,parameters)
data = conn.read()
conn.close()
except urllib2.HTTPError:
raise IOError("Error getting the Auth token, have you entered a"
"correct username and password?")
#Strip newline and non token text.
token_dict = dict(x.split('=') for x in data.split('\n') if x)
return token_dict["Auth"]
def _getToken(self):
"""
Second step in authorizing with Reader.
Sends authorized request to Reader token URL and returns a token value.
Returns token or raises IOError on error.
"""
req = urllib2.Request(GoogleReader.API_URL + 'token')
req.add_header('Authorization','GoogleLogin auth=%s' % self.auth_token)
try:
conn = urllib2.urlopen(req)
token = conn.read()
conn.close()
except urllib2.HTTPError:
raise IOError("Error getting the Reader token.")
return token
class OAuthMethod(AuthenticationMethod):
"""
Loose wrapper around OAuth2 lib. Kinda awkward.
"""
GOOGLE_URL = 'https://www.google.com/accounts/'
REQUEST_TOKEN_URL = (GOOGLE_URL + 'OAuthGetRequestToken?scope=%s'
% GoogleReader.READER_BASE_URL)
AUTHORIZE_URL = GOOGLE_URL + 'OAuthAuthorizeToken'
ACCESS_TOKEN_URL = GOOGLE_URL + 'OAuthGetAccessToken'
def __init__(self, consumer_key, consumer_secret):
if not has_oauth:
raise ImportError("No module named oauth2")
super(OAuthMethod, self).__init__()
self.oauth_key = consumer_key
self.oauth_secret = consumer_secret
self.consumer = oauth.Consumer(self.oauth_key, self.oauth_secret)
self.authorized_client = None
self.token_key = None
self.token_secret = None
self.callback = None
def setCallback(self, callback_url):
self.callback = '&oauth_callback=%s' % callback_url
def setRequestToken(self):
# Step 1: Get a request token. This is a temporary token that is used for
# having the user authorize an access token and to sign the request to obtain
# said access token.
client = oauth.Client(self.consumer)
if not self.callback:
resp, content = client.request(OAuthMethod.REQUEST_TOKEN_URL)
else:
resp, content = client.request(OAuthMethod.REQUEST_TOKEN_URL + self.callback)
if int(resp['status']) != 200:
raise IOError("Error setting Request Token")
token_dict = dict(urlparse.parse_qsl(content))
self.token_key = token_dict['oauth_token']
self.token_secret = token_dict['oauth_token_secret']
def setAndGetRequestToken(self):
self.setRequestToken()
return (self.token_key, self.token_secret)
def buildAuthUrl(self, token_key=None):
if not token_key:
token_key = self.token_key
#return auth url for user to click or redirect to
return "%s?oauth_token=%s" % (OAuthMethod.AUTHORIZE_URL, token_key)
def setAccessToken(self):
self.setAccessTokenFromCallback(self.token_key, self.token_secret, None)
def setAccessTokenFromCallback(self, token_key, token_secret, verifier):
token = oauth.Token(token_key, token_secret)
#step 2 depends on callback
if verifier:
token.set_verifier(verifier)
client = oauth.Client(self.consumer, token)
resp, content = client.request(OAuthMethod.ACCESS_TOKEN_URL, "POST")
if int(resp['status']) != 200:
raise IOError("Error setting Access Token")
access_token = dict(urlparse.parse_qsl(content))
#created Authorized client using access tokens
self.authFromAccessToken(access_token['oauth_token'],
access_token['oauth_token_secret'])
def authFromAccessToken(self, oauth_token, oauth_token_secret):
self.token_key = oauth_token
self.token_key_secret = oauth_token_secret
token = oauth.Token(oauth_token,oauth_token_secret)
self.authorized_client = oauth.Client(self.consumer, token)
def getAccessToken(self):
return (self.token_key, self.token_secret)
def get(self, url, parameters=None):
if self.authorized_client:
getString = self.getParameters(parameters)
#can't pass in urllib2 Request object here?
resp, content = self.authorized_client.request(url + "?" + getString)
return content
else:
raise IOError("No authorized client available.")
def post(self, url, postParameters=None, urlParameters=None):
if self.authorized_client:
if urlParameters:
getString = self.getParameters(urlParameters)
req = urllib2.Request(url + "?" + getString)
else:
req = urllib2.Request(url)
postString = self.postParameters(postParameters)
resp,content = self.authorized_client.request(req, method="POST", body=postString)
return content
else:
raise IOError("No authorized client available.")
class OAuth2Method(AuthenticationMethod):
def __init__(self, consumer_key, consumer_secret, token_key, token_secret):
if not has_oauth:
raise ImportError("No module named oauth2")
super(OAuth2Method, self).__init__()
consumer = oauth.Consumer(consumer_key, consumer_secret)
token = oauth.Token(token_key, token_secret)
self.authorized_client = oauth.Client(consumer, token)
def get(self, url, parameters=None):
if self.authorized_client:
getString = self.getParameters(parameters)
req = url + "?" + getString
resp,content = self.authorized_client.request(req, 'GET')
return content
else:
raise IOError("No authorized client available.")
def post(self, url, postParameters=None, urlParameters=None):
if self.authorized_client:
getString = self.getParameters(urlParameters)
req = url + "?" + getString
postString = self.postParameters(postParameters)
resp,content = self.authorized_client.request(req, "POST", body=postString)
return content
else:
raise IOError("No authorized client available.") | 1.914063 | 2 |
ml_train_service/app/api/config.py | PSUCompBio/ML-Training-v1 | 0 | 99460 | <reponame>PSUCompBio/ML-Training-v1
import boto3
import os
# from dotenv import load_dotenv
# # # OR, explicitly providing path to '.env'
# from pathlib import Path # Python 3.6+ only
# env_path = Path('.') / '.env'
# load_dotenv(dotenv_path=env_path)
PATH_PKG = os.path.dirname(os.path.abspath(__file__))
# PATH_PKG = "."
FEATURE_PATH = os.path.join(PATH_PKG, "resources/features.json")
DATA_PATH = os.path.join(PATH_PKG, "data")
MODEL_PATH = os.path.join(PATH_PKG, "model")
RESULT_PATH = os.path.join(PATH_PKG, "result")
TRAIN_FILE_PATH = os.path.join(PATH_PKG,"train.py")
TRAIN_REQUIREMENTS = os.path.join(PATH_PKG,"requirements.txt")
if not os.path.exists(DATA_PATH):
os.makedirs(DATA_PATH)
if not os.path.exists(MODEL_PATH):
os.makedirs(MODEL_PATH)
if not os.path.exists(RESULT_PATH):
os.makedirs(RESULT_PATH)
# aws_access_key_id=
# aws_secret_access_key=
# region_name=
# folder=
session = boto3.Session(
aws_access_key_id=os.environ["aws_access_key_id"],
aws_secret_access_key=os.environ["aws_secret_access_key"],
region_name=os.environ["region"])
s3_resource = session.resource('s3')
s3_client = session.client('s3')
bucket_name = os.environ["bucket_name"]
sagemaker_role = os.environ['sagemaker_role']
| 1.289063 | 1 |
test/test_language_models/test_lstm.py | entn-at/BrnoLM | 0 | 99588 | from test.common import TestCase
import torch
from brnolm.language_models.lstm_model import LSTMLanguageModel
class OutputExtractionTests(TestCase):
def test_multilayer(self):
model = LSTMLanguageModel(4, ninp=10, nhid=10, nlayers=2, dropout=0.0)
h0 = model.init_hidden(3)
o, h1 = model(torch.tensor([[1], [2], [3]]), h0)
self.assertEqual(model.extract_output_from_h(h1).unsqueeze(1), o)
| 1.234375 | 1 |
bases/vision/transforms.py | yeshwanthv5/PruneFL | 6 | 99716 | <gh_stars>1-10
import torch
from torch.nn.functional import one_hot
class Flatten:
def __call__(self, img: torch.FloatTensor):
return img.reshape((-1))
class OneHot:
def __init__(self, n_classes, to_float: bool = False):
self.n_classes = n_classes
self.to_float = to_float
def __call__(self, label: torch.Tensor):
return one_hot(label, self.n_classes).float() if self.to_float else one_hot(label, self.n_classes)
class DataToTensor:
def __init__(self, dtype=None):
if dtype is None:
dtype = torch.float
self.dtype = dtype
def __call__(self, data):
return torch.tensor(data, dtype=self.dtype)
| 1.984375 | 2 |
Python_Exercise/py_exercise_thirty_four.py | kindyluv/My_Personal_Python_Exercises | 0 | 99844 | <filename>Python_Exercise/py_exercise_thirty_four.py
import random
def just_wasting_time():
for i in range(5):
num = random.randint(0, 10)
print(num)
just_wasting_time()
def just_wasting_time_two():
total = 0
while total < 102:
print(total )
total += 1
just_wasting_time_two()
def getAnswer(answerNumber):
if answerNumber == 1:
return 'It is certain'
elif answerNumber == 2:
return 'It is decidedly so'
elif answerNumber == 3:
return 'Yes'
elif answerNumber == 4:
return 'Reply hazy try again'
elif answerNumber == 5:
return 'Ask again later'
elif answerNumber == 6:
return 'Concentrate and ask again'
elif answerNumber == 7:
return 'My reply is no'
elif answerNumber == 8:
return 'Outlook not so good'
elif answerNumber == 9:
return 'Very doubtful'
r = random.randint(1, 9)
fortune = getAnswer(r)
print(fortune)
def spam(divideBy):
try:
return 42 / divideBy
except ZeroDivisionError:
print('can not divide by zero please enter a number greater than zero.')
print(spam(2))
print(spam(12))
print(spam(0))
print(spam(1))
| 2 | 2 |
salt/modules/aixpkg.py | tomdoherty/salt | 1 | 99972 | """
Package support for AIX
.. important::
If you feel that Salt should be using this module to manage filesets or
rpm packages on a minion, and it is using a different module (or gives an
error similar to *'pkg.install' is not available*), see :ref:`here
<module-provider-override>`.
"""
import copy
import logging
import os
import pathlib
import salt.utils.data
import salt.utils.functools
import salt.utils.path
import salt.utils.pkg
from salt.exceptions import CommandExecutionError
log = logging.getLogger(__name__)
# Define the module's virtual name
__virtualname__ = "pkg"
def __virtual__():
"""
Set the virtual pkg module if the os is AIX
"""
if __grains__["os_family"] == "AIX":
return __virtualname__
return (False, "Did not load AIX module on non-AIX OS.")
def _check_pkg(target):
"""
Return name, version and if rpm package for specified target
"""
ret = {}
cmd = ["/usr/bin/lslpp", "-Lc", target]
result = __salt__["cmd.run_all"](cmd, python_shell=False)
if 0 == result["retcode"]:
name = ""
version_num = ""
rpmpkg = False
lines = result["stdout"].splitlines()
for line in lines:
if line.startswith("#"):
continue
comps = line.split(":")
if len(comps) < 7:
raise CommandExecutionError(
"Error occurred finding fileset/package",
info={"errors": comps[1].strip()},
)
# handle first matching line
if "R" in comps[6]:
name = comps[0]
rpmpkg = True
else:
name = comps[1] # use fileset rather than rpm package
version_num = comps[2]
break
return name, version_num, rpmpkg
else:
raise CommandExecutionError(
"Error occurred finding fileset/package",
info={"errors": result["stderr"].strip()},
)
def _is_installed_rpm(name):
"""
Returns True if the rpm package is installed. Otherwise returns False.
"""
cmd = ["/usr/bin/rpm", "-q", name]
return __salt__["cmd.retcode"](cmd) == 0
def _list_pkgs_from_context(versions_as_list):
"""
Use pkg list from __context__
"""
if versions_as_list:
return __context__["pkg.list_pkgs"]
else:
ret = copy.deepcopy(__context__["pkg.list_pkgs"])
__salt__["pkg_resource.stringify"](ret)
return ret
def list_pkgs(versions_as_list=False, **kwargs):
"""
List the filesets/rpm packages currently installed as a dict:
.. code-block:: python
{'<package_name>': '<version>'}
CLI Example:
.. code-block:: bash
salt '*' pkg.list_pkgs
"""
ret = {}
versions_as_list = salt.utils.data.is_true(versions_as_list)
# not yet implemented or not applicable
if any(
[salt.utils.data.is_true(kwargs.get(x)) for x in ("removed", "purge_desired")]
):
return ret
if "pkg.list_pkgs" in __context__ and kwargs.get("use_context", True):
return _list_pkgs_from_context(versions_as_list)
# cmd returns information colon delimited in a single linei, format
# Package Name:Fileset:Level:State:PTF Id:Fix State:Type:Description:
# Destination Dir.:Uninstaller:Message Catalog:Message Set:
# Message Number:Parent:Automatic:EFIX Locked:Install Path:Build Date
# Example:
# xcursor:xcursor-1.1.7-3:1.1.7-3: : :C:R:X Cursor library: :\
# /bin/rpm -e xcursor: : : : :0: :(none):Mon May 8 15:18:35 CDT 2017
# bos:bos.rte.libcur:7.1.5.0: : :C:F:libcurses Library: : : : : : :0:0:/:1731
#
# where Type codes: F -- Installp Fileset, P -- Product, C -- Component,
# T -- Feature, R -- RPM Package
cmd = "/usr/bin/lslpp -Lc"
lines = __salt__["cmd.run"](cmd, python_shell=False).splitlines()
for line in lines:
if line.startswith("#"):
continue
comps = line.split(":")
if len(comps) < 7:
continue
if "R" in comps[6]:
name = comps[0]
else:
name = comps[1] # use fileset rather than rpm package
version_num = comps[2]
__salt__["pkg_resource.add_pkg"](ret, name, version_num)
__salt__["pkg_resource.sort_pkglist"](ret)
__context__["pkg.list_pkgs"] = copy.deepcopy(ret)
if not versions_as_list:
__salt__["pkg_resource.stringify"](ret)
return ret
def version(*names, **kwargs):
"""
Return the current installed version of the named fileset/rpm package
If more than one fileset/rpm package name is specified a dict of
name/version pairs is returned.
.. versionchanged:: 3005
CLI Example:
.. code-block:: bash
salt '*' pkg.latest_version <package name>
salt '*' pkg.latest_version <package1> <package2> <package3> ...
"""
kwargs.pop("refresh", True)
ret = {}
if not names:
return ""
for name in names:
# AIX packaging includes info on filesets and rpms
version_found = ""
cmd = "lslpp -Lq {}".format(name)
aix_info = __salt__["cmd.run_all"](cmd, python_shell=False)
if 0 == aix_info["retcode"]:
aix_info_list = aix_info["stdout"].split("\n")
log.debug(
"Returned AIX packaging information aix_info_list %s for name %s",
aix_info_list,
name,
)
for aix_line in aix_info_list:
if name in aix_line:
aix_ver_list = aix_line.split()
log.debug(
"Processing name %s with AIX packaging version information %s",
name,
aix_ver_list,
)
version_found = aix_ver_list[1]
if version_found:
log.debug(
"Found name %s in AIX packaging information, version %s",
name,
version_found,
)
break
else:
log.debug("Could not find name %s in AIX packaging information", name)
ret[name] = version_found
# Return a string if only one package name passed
if len(names) == 1:
return ret[names[0]]
return ret
def _is_installed(name, **kwargs):
"""
Returns True if the fileset/rpm package is installed. Otherwise returns False.
CLI Example:
.. code-block:: bash
salt '*' pkg._is_installed bash
"""
cmd = ["/usr/bin/lslpp", "-Lc", name]
return __salt__["cmd.retcode"](cmd) == 0
def install(name=None, refresh=False, pkgs=None, version=None, test=False, **kwargs):
"""
Install the named fileset(s)/rpm package(s).
.. versionchanged:: 3005
preference to install rpm packages are to use in the following order:
/opt/freeware/bin/dnf
/opt/freeware/bin/yum
/usr/bin/yum
/usr/bin/rpm
.. note:
use of rpm to install implies that rpm's dependencies must have been previously installed.
dnf and yum automatically install rpm's dependencies as part of the install process
Alogrithm to install filesets or rpms is as follows:
if ends with '.rte' or '.bff'
process as fileset
if ends with '.rpm'
process as rpm
if unrecognised or no file extension
attempt process with dnf | yum
failure implies attempt process as fileset
Fileset needs to be available as a single path and filename
compound filesets are not handled and are not supported.
An example is bos.adt.insttools which is part of bos.adt.other and is installed as follows
/usr/bin/installp -acXYg /cecc/repos/aix72/TL4/BASE/installp/ppc/bos.adt.other bos.adt.insttools
name
The name of the fileset or rpm package to be installed.
refresh
Whether or not to update the yum database before executing.
pkgs
A list of filesets and/or rpm packages to install.
Must be passed as a python list. The ``name`` parameter will be
ignored if this option is passed.
version
Install a specific version of a fileset/rpm package.
(Unused at present).
test
Verify that command functions correctly.
Returns a dict containing the new fileset(s)/rpm package(s) names and versions:
{'<package>': {'old': '<old-version>',
'new': '<new-version>'}}
CLI Example:
.. code-block:: bash
salt '*' pkg.install /stage/middleware/AIX/bash-4.2-3.aix6.1.ppc.rpm
salt '*' pkg.install /stage/middleware/AIX/bash-4.2-3.aix6.1.ppc.rpm refresh=True
salt '*' pkg.install /stage/middleware/AIX/VIOS2211_update/tpc_4.1.1.85.bff
salt '*' pkg.install /cecc/repos/aix72/TL3/BASE/installp/ppc/bos.rte.printers_7.2.2.0.bff
salt '*' pkg.install /stage/middleware/AIX/Xlc/usr/sys/inst.images/xlC.rte
salt '*' pkg.install /stage/middleware/AIX/Firefox/ppc-AIX53/Firefox.base
salt '*' pkg.install /cecc/repos/aix72/TL3/BASE/installp/ppc/bos.net
salt '*' pkg.install pkgs='["foo", "bar"]'
salt '*' pkg.install libxml2
"""
targets = salt.utils.args.split_input(pkgs) if pkgs else [name]
if not targets:
return {}
if pkgs:
log.debug("Installing these fileset(s)/rpm package(s) %s: %s", name, targets)
# Get a list of the currently installed pkgs.
old = list_pkgs()
# Install the fileset (normally ends with bff or rte) or rpm package(s)
errors = []
for target in targets:
filename = os.path.basename(target)
flag_fileset = False
flag_actual_rpm = False
flag_try_rpm_failed = False
cmd = ""
out = {}
if filename.endswith(".bff") or filename.endswith(".rte"):
flag_fileset = True
log.debug("install identified %s as fileset", filename)
else:
if filename.endswith(".rpm"):
flag_actual_rpm = True
log.debug("install identified %s as rpm", filename)
else:
log.debug("install filename %s trying install as rpm", filename)
# assume use dnf or yum
cmdflags = "install "
libpathenv = {"LIBPATH": "/opt/freeware/lib:/usr/lib"}
if pathlib.Path("/opt/freeware/bin/dnf").is_file():
cmdflags += "--allowerasing "
cmdexe = "/opt/freeware/bin/dnf"
if test:
cmdflags += "--assumeno "
else:
cmdflags += "--assumeyes "
if refresh:
cmdflags += "--refresh "
cmd = "{} {} {}".format(cmdexe, cmdflags, target)
out = __salt__["cmd.run_all"](
cmd,
python_shell=False,
env=libpathenv,
ignore_retcode=True,
)
elif pathlib.Path("/usr/bin/yum").is_file():
# check for old yum first, removed if new dnf or yum
cmdexe = "/usr/bin/yum"
if test:
cmdflags += "--assumeno "
else:
cmdflags += "--assumeyes "
cmd = "{} {} {}".format(cmdexe, cmdflags, target)
out = __salt__["cmd.run_all"](
cmd,
python_shell=False,
env=libpathenv,
ignore_retcode=True,
)
elif pathlib.Path("/opt/freeware/bin/yum").is_file():
cmdflags += "--allowerasing "
cmdexe = "/opt/freeware/bin/yum"
if test:
cmdflags += "--assumeno "
else:
cmdflags += "--assumeyes "
if refresh:
cmdflags += "--refresh "
cmd = "{} {} {}".format(cmdexe, cmdflags, target)
out = __salt__["cmd.run_all"](
cmd,
python_shell=False,
env=libpathenv,
ignore_retcode=True,
)
else:
cmdexe = "/usr/bin/rpm"
cmdflags = "-Uivh "
if test:
cmdflags += "--test"
cmd = "{} {} {}".format(cmdexe, cmdflags, target)
out = __salt__["cmd.run_all"](cmd, python_shell=False)
if "retcode" in out and not (0 == out["retcode"] or 100 == out["retcode"]):
if not flag_actual_rpm:
flag_try_rpm_failed = True
log.debug(
"install tried filename %s as rpm and failed, trying as fileset",
filename,
)
else:
errors.append(out["stderr"])
log.debug(
"install error rpm path, returned result %s, resultant errors %s",
out,
errors,
)
if flag_fileset or flag_try_rpm_failed:
# either identified as fileset, or failed trying install as rpm, try as fileset
cmd = "/usr/sbin/installp -acYXg"
if test:
cmd += "p"
cmd += " -d "
dirpath = os.path.dirname(target)
cmd += dirpath + " " + filename
log.debug("install fileset commanda to attempt %s", cmd)
out = __salt__["cmd.run_all"](cmd, python_shell=False)
if 0 != out["retcode"]:
errors.append(out["stderr"])
log.debug(
"install error fileset path, returned result %s, resultant errors %s",
out,
errors,
)
# Get a list of the packages after the uninstall
__context__.pop("pkg.list_pkgs", None)
new = list_pkgs()
ret = salt.utils.data.compare_dicts(old, new)
if errors:
raise CommandExecutionError(
"Problems encountered installing filesets(s)/package(s)",
info={"changes": ret, "errors": errors},
)
# No error occurred
if test:
return "Test succeeded."
return ret
def remove(name=None, pkgs=None, **kwargs):
"""
Remove specified fileset(s)/rpm package(s).
name
The name of the fileset or rpm package to be deleted.
.. versionchanged:: 3005
preference to install rpm packages are to use in the following order:
/opt/freeware/bin/dnf
/opt/freeware/bin/yum
/usr/bin/yum
/usr/bin/rpm
pkgs
A list of filesets and/or rpm packages to delete.
Must be passed as a python list. The ``name`` parameter will be
ignored if this option is passed.
Returns a list containing the removed packages.
CLI Example:
.. code-block:: bash
salt '*' pkg.remove <fileset/rpm package name>
salt '*' pkg.remove tcsh
salt '*' pkg.remove xlC.rte
salt '*' pkg.remove Firefox.base.adt
salt '*' pkg.remove pkgs='["foo", "bar"]'
"""
targets = salt.utils.args.split_input(pkgs) if pkgs else [name]
if not targets:
return {}
if pkgs:
log.debug("Removing these fileset(s)/rpm package(s) %s: %s", name, targets)
errors = []
# Get a list of the currently installed pkgs.
old = list_pkgs()
# Remove the fileset or rpm package(s)
for target in targets:
cmd = ""
out = {}
try:
named, versionpkg, rpmpkg = _check_pkg(target)
except CommandExecutionError as exc:
if exc.info:
errors.append(exc.info["errors"])
continue
if rpmpkg:
# assume use dnf or yum
cmdflags = "-y remove"
libpathenv = {"LIBPATH": "/opt/freeware/lib:/usr/lib"}
if pathlib.Path("/opt/freeware/bin/dnf").is_file():
cmdexe = "/opt/freeware/bin/dnf"
cmd = "{} {} {}".format(cmdexe, cmdflags, target)
out = __salt__["cmd.run_all"](
cmd,
python_shell=False,
env=libpathenv,
ignore_retcode=True,
)
elif pathlib.Path("/opt/freeware/bin/yum").is_file():
cmdexe = "/opt/freeware/bin/yum"
cmd = "{} {} {}".format(cmdexe, cmdflags, target)
out = __salt__["cmd.run_all"](
cmd,
python_shell=False,
env=libpathenv,
ignore_retcode=True,
)
elif pathlib.Path("/usr/bin/yum").is_file():
cmdexe = "/usr/bin/yum"
cmd = "{} {} {}".format(cmdexe, cmdflags, target)
out = __salt__["cmd.run_all"](
cmd,
python_shell=False,
env=libpathenv,
ignore_retcode=True,
)
else:
cmdexe = "/usr/bin/rpm"
cmdflags = "-e"
cmd = "{} {} {}".format(cmdexe, cmdflags, target)
out = __salt__["cmd.run_all"](cmd, python_shell=False)
else:
cmd = ["/usr/sbin/installp", "-u", named]
out = __salt__["cmd.run_all"](cmd, python_shell=False)
log.debug("result of removal command %s, returned result %s", cmd, out)
# Get a list of the packages after the uninstall
__context__.pop("pkg.list_pkgs", None)
new = list_pkgs()
ret = salt.utils.data.compare_dicts(old, new)
if errors:
raise CommandExecutionError(
"Problems encountered removing filesets(s)/package(s)",
info={"changes": ret, "errors": errors},
)
return ret
def latest_version(*names, **kwargs):
"""
Return the latest available version of the named fileset/rpm package available for
upgrade or installation. If more than one fileset/rpm package name is
specified, a dict of name/version pairs is returned.
If the latest version of a given fileset/rpm package is already installed,
an empty string will be returned for that package.
.. versionchanged:: 3005
CLI Example:
.. code-block:: bash
salt '*' pkg.latest_version <package name>
salt '*' pkg.latest_version <package1> <package2> <package3> ...
Note: currently only functional for rpm packages due to filesets do not have a specific location to check
Requires yum of dnf available in order to query a repository
This function will always return an empty string for unfound fileset/rpm package.
"""
kwargs.pop("refresh", True)
ret = {}
if not names:
return ""
for name in names:
# AIX packaging includes info on filesets and rpms
version_found = ""
libpathenv = {"LIBPATH": "/opt/freeware/lib:/usr/lib"}
if pathlib.Path("/opt/freeware/bin/dnf").is_file():
cmdexe = "/opt/freeware/bin/dnf"
cmd = "{} check-update {}".format(cmdexe, name)
available_info = __salt__["cmd.run_all"](
cmd, python_shell=False, env=libpathenv, ignore_retcode=True
)
elif pathlib.Path("/opt/freeware/bin/yum").is_file():
cmdexe = "/opt/freeware/bin/yum"
cmd = "{} check-update {}".format(cmdexe, name)
available_info = __salt__["cmd.run_all"](
cmd, python_shell=False, env=libpathenv, ignore_retcode=True
)
elif pathlib.Path("/usr/bin/yum").is_file():
cmdexe = "/usr/bin/yum"
cmd = "{} check-update {}".format(cmdexe, name)
available_info = __salt__["cmd.run_all"](
cmd, python_shell=False, env=libpathenv, ignore_retcode=True
)
else:
# no yum found implies no repository support
available_info = None
log.debug(
"latest_version dnf|yum check-update command returned information %s",
available_info,
)
if available_info and (
0 == available_info["retcode"] or 100 == available_info["retcode"]
):
available_output = available_info["stdout"]
if available_output:
available_list = available_output.split()
flag_found = False
for name_chk in available_list:
# have viable check, note .ppc or .noarch
if name_chk.startswith(name):
# check full name
pkg_label = name_chk.split(".")
if name == pkg_label[0]:
flag_found = True
elif flag_found:
# version comes after name found
version_found = name_chk
break
if version_found:
log.debug(
"latest_version result for name %s found version %s",
name,
version_found,
)
else:
log.debug("Could not find AIX / RPM packaging version for %s", name)
ret[name] = version_found
# Return a string if only one package name passed
if len(names) == 1:
return ret[names[0]]
return ret
# available_version is being deprecated
available_version = salt.utils.functools.alias_function(
latest_version, "available_version"
)
def upgrade_available(name, **kwargs):
"""
Check whether or not an upgrade is available for a given package
.. versionchanged:: 3005
CLI Example:
.. code-block:: bash
salt '*' pkg.upgrade_available <package name>
Note: currently only functional for rpm packages due to filesets do not have a specific location to check
Requires yum of dnf available in order to query a repository
"""
# AIX packaging includes info on filesets and rpms
rpm_found = False
version_found = ""
libpathenv = {"LIBPATH": "/opt/freeware/lib:/usr/lib"}
if pathlib.Path("/opt/freeware/bin/dnf").is_file():
cmdexe = "/opt/freeware/bin/dnf"
cmd = "{} check-update {}".format(cmdexe, name)
available_info = __salt__["cmd.run_all"](
cmd, python_shell=False, env=libpathenv, ignore_retcode=True
)
elif pathlib.Path("/opt/freeware/bin/yum").is_file():
cmdexe = "/opt/freeware/bin/yum"
cmd = "{} check-update {}".format(cmdexe, name)
available_info = __salt__["cmd.run_all"](
cmd, python_shell=False, env=libpathenv, ignore_retcode=True
)
elif pathlib.Path("/usr/bin/yum").is_file():
cmdexe = "/usr/bin/yum"
cmd = "{} check-update {}".format(cmdexe, name)
available_info = __salt__["cmd.run_all"](
cmd, python_shell=False, env=libpathenv, ignore_retcode=True
)
else:
# no yum found implies no repository support
return False
log.debug(
"upgrade_available yum check-update command %s, returned information %s",
cmd,
available_info,
)
if 0 == available_info["retcode"] or 100 == available_info["retcode"]:
available_output = available_info["stdout"]
if available_output:
available_list = available_output.split()
flag_found = False
for name_chk in available_list:
# have viable check, note .ppc or .noarch
if name_chk.startswith(name):
# check full name
pkg_label = name_chk.split(".")
if name == pkg_label[0]:
flag_found = True
elif flag_found:
# version comes after name found
version_found = name_chk
break
current_version = version(name)
log.debug(
"upgrade_available result for name %s, found current version %s, available version %s",
name,
current_version,
version_found,
)
if version_found:
return current_version != version_found
else:
log.debug("upgrade_available information for name %s was not found", name)
return False
| 1.320313 | 1 |
servicecatalog_factory/workflow/portfolios/associate_product_with_portfolio_task.py | RobBrazier/aws-service-catalog-factory | 116 | 100100 | # Copyright 2021 Amazon.com, Inc. or its affiliates. All Rights Reserved.
# SPDX-License-Identifier: Apache-2.0
import json
import luigi
from servicecatalog_factory import aws
from servicecatalog_factory.workflow.portfolios.create_portfolio_task import (
CreatePortfolioTask,
)
from servicecatalog_factory.workflow.portfolios.create_product_task import (
CreateProductTask,
)
from servicecatalog_factory.workflow.tasks import FactoryTask, logger
class AssociateProductWithPortfolioTask(FactoryTask):
region = luigi.Parameter()
portfolio_args = luigi.DictParameter()
product_args = luigi.DictParameter()
def params_for_results_display(self):
return {
"region": self.region,
"portfolio": f"{self.portfolio_args.get('portfolio_group_name')}-{self.portfolio_args.get('display_name')}",
"product": self.product_args.get("name"),
}
def output(self):
return luigi.LocalTarget(
f"output/AssociateProductWithPortfolioTask/"
f"{self.region}"
f"{self.product_args.get('name')}"
f"_{self.portfolio_args.get('portfolio_group_name')}"
f"_{self.portfolio_args.get('display_name')}.json"
)
def requires(self):
return {
"create_portfolio_task": CreatePortfolioTask(**self.portfolio_args),
"create_product_task": CreateProductTask(**self.product_args),
}
def run(self):
logger_prefix = f"{self.region}-{self.portfolio_args.get('portfolio_group_name')}-{self.portfolio_args.get('display_name')}"
portfolio = json.loads(
self.input().get("create_portfolio_task").open("r").read()
)
portfolio_id = portfolio.get("Id")
product = json.loads(self.input().get("create_product_task").open("r").read())
product_id = product.get("ProductId")
with self.regional_client("servicecatalog") as service_catalog:
logger.info(f"{logger_prefix}: Searching for existing association")
aws.ensure_portfolio_association_for_product(
portfolio_id, product_id, service_catalog
)
with self.output().open("w") as f:
logger.info(f"{logger_prefix}: about to write!")
f.write("{}")
| 1.460938 | 1 |
src/comotion/dash.py | ComotionLabs/dash-sdk | 1 | 100228 | import io
import requests
import csv
import time
from typing import Union, Callable
from os.path import join
import pandas as pd
from comotion import Auth
from comotion import comodash_api_client_lowlevel
from comodash_api_client_lowlevel.comodash_api import queries_api
from comodash_api_client_lowlevel.model.query_text import QueryText
from urllib3.exceptions import IncompleteRead
from urllib3.response import HTTPResponse
from comodash_api_client_lowlevel.model.query import Query as QueryInfo
class DashConfig(comodash_api_client_lowlevel.Configuration):
"""
Object containing configuration information for Dash API
Attributes
----------
auth : comotion.Auth
comotion.Auth object holding information about authentication
"""
def __init__(self, auth: Auth):
if not(isinstance(auth, Auth)):
raise TypeError("auth must be of type comotion.Auth")
self.auth = auth
super().__init__(
host='https://%s.api.comodash.io/v2' % (auth.orgname),
access_token=auth.get_access_token()
)
# comodash_api_client_lowlevel.Configuration.set_default(config)
class Query():
"""
The query object starts and tracks a query on Comotion Dash.
Initialising this class runs a query on Comotion Dash and stores the
resulting query id in `query_id`
"""
COMPLETED_STATES = ['SUCCEEDED', 'CANCELLED', 'FAILED']
def __init__(
self,
config: DashConfig,
query_text: str = None,
query_id: str = None
):
"""
Parameters
----------
query_text : str
sql of the query to run
config : DashConfig
Object of type DashConfig including configuration details
query_id : str, optional
Query id of existing query. If not provided, then a new query will be started on Dash
Raises
------
TypeError
If config is not of type DashConfig
ValueError
if one of query_id or query_text is not provided
"""
if not(isinstance(config, DashConfig)):
raise TypeError("config must be of type comotion.dash.DashConfig")
with comodash_api_client_lowlevel.ApiClient(config) as api_client:
self.query_api_instance = queries_api.QueriesApi(api_client)
if query_id:
query_info = self.query_api_instance.get_query(query_id)
self.query_id = query_id
self.query_text = query_info.query
elif query_text:
self.query_text = query_text
query_text_model = QueryText(query=query_text)
query_id_model = self.query_api_instance.run_query(query_text_model) # noqa
self.query_id = query_id_model['query_id']
else:
raise ValueError("One of query_id or query_text must be provided")
def get_query_info(self) -> QueryInfo:
"""Gets the state of the query.
Returns
-------
QueryInfo
Model containing all query info, with the following attributes
`query`
query sql
`query_id`
query_id of query
`status`
`completion_date_time`
GMT Completion Time
`state`
Current state of query. One of QUEUED,RUNNING,SUCCEEDED,FAILED,CANCELLED
`stateChangeReason`
info about reason for state change (generally failure)
submission_date_time`
GMT submission time
"""
return self.query_api_instance.get_query(self.query_id)
def state(self) -> str:
"""Gets the state of the query.
Returns
-------
str
One of QUEUED,RUNNING,SUCCEEDED,FAILED,CANCELLED
"""
return self.get_query_info().status.state
def is_complete(self) -> bool:
"""Indicates whether the query is in a final state.
This means it has either succeeded, failed or been cancelled.
Returns
-------
bool
Whether query complete
"""
return self.state() in Query.COMPLETED_STATES
def wait_to_complete(self) -> bool:
"""Blocks until query is in a complete state
Returns
-------
str
Final state, one of 'SUCCEEDED', 'CANCELLED', 'FAILED'
"""
while True:
query_info = self.get_query_info()
print(query_info.status.state)
if query_info.status.state in Query.COMPLETED_STATES:
return query_info
time.sleep(5)
def query_id(self) -> str:
"""Returns query id for this query
"""
return self.query_id
def get_csv_for_streaming(self) -> HTTPResponse:
""" Returns a ``urllib3.response.HTTPResponse`` object that can be used for streaming
This allows use of the downloaded file without having to save
it to local storage.
Be sure to use ``.release_conn()`` when completed to ensure that the
connection is released
This can be achieved using the `with` notation e.g.::
with query.get_csv_for_streaming().stream() as stream:
for chunk in stream:
# do somthing with chunk
# chunk is a byte array ``
"""
response = self.query_api_instance.download_csv(
query_id=self.query_id,
_preload_content=False)
response.autoclose = False
return response
def download_csv(self, output_file_path, fail_if_exists=False):
"""Download csv of results and check that the total file size is correct
Parameters
----------
output_file_path : File path
Path of the file to output to
fail_if_exists : bool, optional
If true, then will fail if the target file name already/
Defaults to false.
Raises
------
IncompleteRead
If only part of the file is downloaded, this is raised
"""
with self.get_csv_for_streaming() as response:
write_mode = "wb"
if fail_if_exists:
write_mode = "xb"
with io.open(output_file_path, write_mode) as f:
size = 0
content_length = (response.getheader('Content-Length'))
for chunk in response.stream(1048576):
size = size + len(chunk)
f.write(chunk)
if (response.tell() != int(content_length)):
raise IncompleteRead(
response.tell(),
int(content_length) - response.tell()
)
def stop(self):
""" Stop the query"""
return self.query_api_instance.stop_query(self.query_id)
def upload_csv_to_dash(
dash_orgname: str, # noqa
dash_api_key: str,
dash_table: str,
csv_gz_stream: io.FileIO
) -> requests.Response:
"""Uploads csv gzipped stream to Dash
Expects a csv gzipped stream to upload to dash.
Args:
dash_orgname (str): Dash organisation name for dash instance
dash_api_key (str): Valid API key for the organisation instance
dash_table (str): Table name to upload to
csv_gz_stream (io.FileIO): Description
Returns:
requests.Response: response from dash api
Raises:
HTTPError: If one is raised by the call
"""
url = "https://api.comodash.io/v1/data-input-file"
headers = {
'Content-Type': 'application/gzip',
'service_client_id': '0',
'x-api-key': dash_api_key,
'org-name': dash_orgname,
'table-name': dash_table
}
dash_response = requests.request(
"POST",
url,
headers=headers,
data=csv_gz_stream.getbuffer()
)
dash_response.raise_for_status()
return dash_response
def create_gzipped_csv_stream_from_df(df: pd.DataFrame) -> io.BytesIO:
"""Returns a gzipped, utf-8 csv file bytestream from a pandas dataframe
Useful to help upload dataframes to dash
It does not break file up, so be sure to apply a maximise chunksize
to the dataframe before applying - otherwise dash max file limits will
cause an error
Parameters
----------
df : pd.DataFrame
Dateframe to be turned into bytestream
Returns
-------
io.BytesIO
The Bytestream
"""
csv_stream = io.BytesIO()
df.to_csv(
csv_stream,
compression="gzip",
encoding="utf-8",
index=False,
quoting=csv.QUOTE_NONNUMERIC
)
return csv_stream
def read_and_upload_file_to_dash(
file: Union[str, io.FileIO],
dash_table: str,
dash_orgname: str,
dash_api_key: str,
encoding: str = 'utf-8',
chunksize: int = 30000,
modify_lambda: Callable = None,
path_to_output_for_dryrun: str = None
):
"""Reads a file and uploads to dash.
This function will:
- Read a csv file
- Break it up into multiple csv's
- each with a maximum number of lines defined by chunksize
- upload them to dash
Parameters
----------
file : Union[str, io.FileIO]
Either a path to the file to be uploaded,
or a FileIO stream representing the file to be uploaded
Should be an unencrypted, uncompressed CSV file
dash_table: str
name of Dash table to upload the file to
dash_orgname: str
orgname of your Dash instance
dash_api_key: str
valid api key for Dash API
encoding: str
the encoding of the source file. defaults to utf-8.
chunksize: int
(optional)
the maximum number of lines to be included in each file.
Note that this should be low enough that the zipped file is less
than Dash maximum gzipped file size. Defaults to 30000.
modify_lambda:
(optional)
a callable that recieves the pandas dataframe read from the
csv. Gives the opportunity to modify - such as adding a timestamp
column.
Is not required.
path_to_output_for_dryrun: str
(optional)
if specified, no upload will be made to dash, but files
will be saved to the location specified. This is useful for
testing.
multiple files will be created: [table_name].[i].csv.gz where i
represents multiple file parts
Returns
-------
List
List of http responses
"""
file_reader = pd.read_csv(
file,
chunksize=chunksize,
encoding=encoding
)
i = 1
responses = []
for file_df in file_reader:
if modify_lambda is not None:
modify_lambda(file_df)
csv_stream = create_gzipped_csv_stream_from_df(file_df)
if path_to_output_for_dryrun is None:
response = upload_csv_to_dash(
dash_orgname=dash_orgname,
dash_api_key=dash_api_key,
dash_table=dash_table,
csv_gz_stream=csv_stream
)
responses.append(response.text)
else:
with open(
join(
path_to_output_for_dryrun,
dash_table + "." + str(i) + ".csv.gz"
),
"wb"
) as f:
f.write(csv_stream.getvalue())
i = i + 1
return responses
| 1.421875 | 1 |
drivers/nidaq/syncAIAO.py | mv20100/phd_code | 0 | 100356 | <reponame>mv20100/phd_code
from PyDAQmx import *
import numpy as np
import ctypes, time
from pyqtgraph.Qt import QtGui, QtCore
import pyqtgraph as pg
from collections import deque
class SyncAIAO(object):
compatibility_mode = 0 # Set this to 1 on some PC (Mouss)
trigName = "ai/StartTrigger"
timeout = 10.0
mean = 64
sampling_rate = 1e5
numSamp=2000
nbSampCroppedFactor=0.5
vpp=1.
offset = 0.
def __init__(self,device="Dev2",outChan="ao1",inChanList=["ai0"],inRange=(-10.,10.),outRange=(-10.,10.)):
self.device = device
self.outChan = outChan
self.inChanList = inChanList
self.inRange = inRange
self.outRange = outRange
self.running = False
self.initialize()
def initialize(self):
self._sampling_rate = self.sampling_rate
self._numSamp = self.numSamp
self.nbSampCropped = int(self.nbSampCroppedFactor * self._numSamp)
self.AImean = np.zeros(self._numSamp*len(self.inChanList),dtype=np.float64)
self.AIdata = np.zeros((self.mean,self._numSamp*len(self.inChanList)),dtype=np.float64)
self.ptr = 0
self.deque = deque([],self.mean)
self.AOdata = self.offset + np.hstack([np.linspace(-self.vpp/2.,self.vpp/2.,self._numSamp/2,dtype=np.float64,endpoint=False),
np.linspace(self.vpp/2.,-self.vpp/2.,self._numSamp/2,dtype=np.float64,endpoint=False)])
self.counter=0
self.totalAI=0
self.AItaskHandle = None
self.AOtaskHandle = None
def makeInputStr(self):
return ",".join([self.device+"/"+inChan for inChan in self.inChanList])
def makeOutputStr(self):
return self.device+"/"+self.outChan
def getNthFullChanName(self,index):
return self.device+"/"+self.inChanList[index]
def getNthChanAIdata(self,index):
return self.AOdata[0:self._numSamp-self.nbSampCropped],self.AIdata[self.ptr,index*self._numSamp:(index+1)*self._numSamp-self.nbSampCropped]
def getNthChanAImean(self,index):
return self.AOdata[0:self._numSamp-self.nbSampCropped],self.AImean[index*self._numSamp:(index+1)*self._numSamp-self.nbSampCropped]
def start(self):
assert not self.running
self.running = True
self.initialize()
def EveryNCallback(taskHandle, everyNsamplesEventType, nSamples, callbackData):
# global AItaskHandle, totalAI, AIdata, ptr
readAI = c_int32()
self.ptr=(self.ptr+1)%self.mean
self.deque.append(self.ptr)
DAQmxReadAnalogF64(self.AItaskHandle,self._numSamp,self.timeout,DAQmx_Val_GroupByChannel,self.AIdata[self.ptr],self._numSamp*len(self.inChanList),byref(readAI),None)
self.AImean=np.mean(self.AIdata[self.deque],axis=0)
self.totalAI = self.totalAI + readAI.value
self.counter=self.counter+1
# print self.totalAI
return int(0)
def DoneCallback(taskHandle, status, callbackData):
self.clearTasks()
return int(0)
self.AItaskHandle = TaskHandle()
self.AOtaskHandle = TaskHandle()
self.totalAI=0
DAQmxCreateTask(None,byref(self.AItaskHandle))
DAQmxCreateAIVoltageChan(self.AItaskHandle,self.makeInputStr(), None, DAQmx_Val_Cfg_Default, self.inRange[0],self.inRange[1], DAQmx_Val_Volts, None)
DAQmxCfgSampClkTiming(self.AItaskHandle,None, self._sampling_rate, DAQmx_Val_Rising, DAQmx_Val_ContSamps, self._numSamp)
DAQmxCreateTask(None,byref(self.AOtaskHandle))
DAQmxCreateAOVoltageChan(self.AOtaskHandle,self.makeOutputStr(),None,self.outRange[0],self.outRange[1],DAQmx_Val_Volts,None)
DAQmxCfgSampClkTiming(self.AOtaskHandle,None,self._sampling_rate,DAQmx_Val_Rising,DAQmx_Val_ContSamps,self._numSamp)
DAQmxCfgDigEdgeStartTrig(self.AOtaskHandle,self.trigName,DAQmx_Val_Rising)
if self.compatibility_mode == 0:
EveryNCallbackCWRAPPER = CFUNCTYPE(c_int32,c_void_p,c_int32,c_uint32,c_void_p)
else:
EveryNCallbackCWRAPPER = CFUNCTYPE(c_int32,c_ulong,c_int32,c_uint32,c_void_p)
self.everyNCallbackWrapped = EveryNCallbackCWRAPPER(EveryNCallback)
DAQmxRegisterEveryNSamplesEvent(self.AItaskHandle,DAQmx_Val_Acquired_Into_Buffer,self._numSamp,0,self.everyNCallbackWrapped,None)
if self.compatibility_mode == 0:
DoneCallbackCWRAPPER = CFUNCTYPE(c_int32,c_void_p,c_int32,c_void_p)
else:
DoneCallbackCWRAPPER = CFUNCTYPE(c_int32,c_ulong,c_int32,c_void_p)
self.doneCallbackWrapped = DoneCallbackCWRAPPER(DoneCallback)
DAQmxRegisterDoneEvent(self.AItaskHandle,0,self.doneCallbackWrapped,None)
DAQmxWriteAnalogF64(self.AOtaskHandle, self._numSamp, 0, self.timeout, DAQmx_Val_GroupByChannel, self.AOdata, None, None)
DAQmxStartTask(self.AOtaskHandle)
DAQmxStartTask(self.AItaskHandle)
print "Starting acquisition"
def clearTasks(self):
if self.AItaskHandle:
DAQmxStopTask(self.AItaskHandle)
DAQmxClearTask(self.AItaskHandle)
self.AItaskHandle = None
if self.AOtaskHandle:
DAQmxStopTask(self.AOtaskHandle)
DAQmxClearTask(self.AOtaskHandle)
self.AOtaskHandle = None
def stop(self):
if self.running:
self.clearTasks()
self.setZero()
self.running = False
def setZero(self):
print "Setting output to 0 V"
clearTaskHandle = TaskHandle()
DAQmxCreateTask("", byref(clearTaskHandle))
DAQmxCreateAOVoltageChan(clearTaskHandle, self.makeOutputStr(), None, self.outRange[0],self.outRange[1], DAQmx_Val_Volts, None)
DAQmxWriteAnalogF64(clearTaskHandle,1,1,self.timeout,DAQmx_Val_GroupByChannel,np.array([0.]),None,None)
DAQmxStartTask(clearTaskHandle)
DAQmxClearTask(clearTaskHandle)
def __del__(self):
self.stop()
if __name__=="__main__":
app = QtGui.QApplication([])
win = pg.GraphicsWindow()
win.resize(1000,600)
win.setWindowTitle('Pyqtgraph : Live NIDAQmx data')
pg.setConfigOptions(antialias=True)
outChan="ao2"
inChanList=["ai20"]
syncAiAo = SyncAIAO(device = "Dev1", inChanList=inChanList,outChan=outChan)
p = win.addPlot(title="Live plot")
p.addLegend()
colors = ['m','y','c']
assert len(colors)>=len(inChanList)
curves = []
for idx,inChan in enumerate(inChanList):
curve = p.plot(pen=colors[idx],name=syncAiAo.getNthFullChanName(idx))
curves.append(curve)
def update():
for idx,curve in enumerate(curves):
x, y = syncAiAo.getNthChanAIdata(idx)
curve.setData(x=x, y=y)
if syncAiAo.counter == 1:
p.enableAutoRange('xy', False) ## stop auto-scaling after the first data set is plotted
timer = QtCore.QTimer()
timer.timeout.connect(update)
timer.start(50)
syncAiAo.start()
import sys
if (sys.flags.interactive != 1) or not hasattr(QtCore, 'PYQT_VERSION'):
ret = QtGui.QApplication.instance().exec_()
print "Closing"
syncAiAo.stop()
sys.exit(ret)
| 1.65625 | 2 |
members/urls.py | Moha369/vector | 0 | 100484 |
from django.contrib import admin
from django.urls import path
from . import views
from django.conf.urls.static import static
from django.conf import settings
from django.contrib.admin.views.decorators import staff_member_required
from django.contrib.auth import views as auth_views
urlpatterns = [
path('', views.home, name = 'home'),
path('register/', views.register, name = 'register'),
path('profile/', views.profile, name = 'user_dashboard'),
path('login/', auth_views.LoginView.as_view(template_name = 'members/login.html'), name = 'login'),
path('logout/', auth_views.LogoutView.as_view(template_name = 'members/logout.html'), name = 'logout'),
path('contact/', views.contact, name = 'contact'),
path('members/', views.members, name = 'members'),
path('members/<username>', views.user_url, name = 'user_profile'),
path('about/', views.about, name = 'about')
] + static(settings.STATIC_URL, document_root=settings.STATIC_ROOT)
if settings.DEBUG:
urlpatterns += static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
| 1.148438 | 1 |
model/modeling_etm.py | yyht/electra_electric | 0 | 100612 | <filename>model/modeling_etm.py
import tensorflow as tf
# tf.disable_v2_behavior()
# def check_tf_version():
# version = tf.__version__
# print("==tf version==", version)
# if int(version.split(".")[0]) >= 2 or int(version.split(".")[1]) >= 15:
# return True
# else:
# return False
# if check_tf_version():
# import tensorflow.compat.v1 as tf
# tf.disable_v2_behavior()
def check_tf_version():
version = tf.__version__
print("==tf version==", version)
if int(version.split(".")[0]) >= 2 or int(version.split(".")[1]) >= 15:
return True
else:
return False
if check_tf_version():
tf.disable_v2_behavior()
# from model.vqvae_utils import tfidf_utils
import collections
import copy
import json
import math
import re
import numpy as np
import six
import tensorflow as tf
class ETMConfig(object):
def __init__(self,
vocab_size,
topic_size=768,
hidden_size=768,
embedding_size=768,
num_hidden_layers=12,
hidden_act="gelu",
hidden_dropout_prob=0.1,
initializer_range=0.02,
apply_bn_vae_mean=True,
apply_bn_vae_var=True):
self.vocab_size = vocab_size
self.topic_size = topic_size
self.hidden_size = hidden_size
self.embedding_size = embedding_size
self.num_hidden_layers = num_hidden_layers
self.hidden_act = hidden_act
self.hidden_dropout_prob = hidden_dropout_prob
self.initializer_range = initializer_range
self.apply_bn_vae_mean = apply_bn_vae_mean
self.apply_bn_vae_var = apply_bn_vae_var
@classmethod
def from_dict(cls, json_object):
"""Constructs a `BertConfig` from a Python dictionary of parameters."""
config = ETMConfig(vocab_size=None)
for (key, value) in six.iteritems(json_object):
config.__dict__[key] = value
return config
@classmethod
def from_json_file(cls, json_file):
"""Constructs a `BertConfig` from a json file of parameters."""
with tf.gfile.GFile(json_file, "r") as reader:
text = reader.read()
return cls.from_dict(json.loads(text))
def to_dict(self):
"""Serializes this instance to a Python dictionary."""
output = copy.deepcopy(self.__dict__)
return output
def to_json_string(self):
"""Serializes this instance to a JSON string."""
return json.dumps(self.to_dict(), indent=2, sort_keys=True) + "\n"
class ETM(object):
def __init__(self,
etm_config,
input_term_count,
input_term_binary,
input_term_freq,
is_training=False,
embedding_matrix=None,
hidden_vector=None,
scope=None,
input_type="term_count"):
"""
https://github.com/linkstrife/NVDM-GSM/blob/master/GSM.py
https://github.com/adjidieng/ETM/blob/master/etm.py
compared to NVDM, GSM or ETM add topic-word-matrix-alignment
"""
etm_config = copy.deepcopy(etm_config)
if not is_training:
etm_config.hidden_dropout_prob = 0.0
if input_type == 'term_count':
tf.logging.info("*** model_input term_count ***")
model_input = tf.identity(input_term_count)
elif input_type == 'term_binary':
tf.logging.info("*** model_input term_binary ***")
model_input = tf.identity(input_term_binary)
elif input_type == 'term_freq':
tf.logging.info("*** model_input term_freq ***")
model_input = tf.identity(input_term_freq)
else:
tf.logging.info("*** model_input term_freq ***")
model_input = tf.identity(input_term_freq)
with tf.variable_scope("etm", scope):
with tf.variable_scope("encoder"):
# [batch_size, hidden_size]
self.q_theta = mlp(input_tensor=model_input,
num_hidden_layers=etm_config.num_hidden_layers,
hidden_size=etm_config.hidden_size,
is_training=is_training,
dropout_prob=etm_config.hidden_dropout_prob,
intermediate_act_fn=get_activation(etm_config.hidden_act),
initializer_range=etm_config.initializer_range,
scope="bow_mlp")
if hidden_vector is not None:
with tf.variable_scope("etm", scope):
if hidden_vector.shape[-1] != etm_config.hidden_size:
self.hidden_vector = tf.layers.dense(
hidden_vector, etm_config.hidden_size,
name="hidden_vector_project")
self.q_theta = tf.concat([self.q_theta, self.hidden_vector], axis=-1)
with tf.variable_scope("etm", scope):
with tf.variable_scope("bridge"):
# [batch_size, hidden_size]
# use bias is false since we will apply bn
self.mu_q_theta = mlp(
input_tensor=self.q_theta,
num_hidden_layers=1,
hidden_size=etm_config.topic_size,
is_training=is_training,
dropout_prob=etm_config.hidden_dropout_prob,
intermediate_act_fn=None,
initializer_range=etm_config.initializer_range,
scope="mu_theta_mlp",
matrix_start_zero=False,
use_bias=True if not etm_config.apply_bn_vae_mean else False
)
tf.logging.info("*** mu_q_theta ***")
tf.logging.info(self.mu_q_theta)
if etm_config.apply_bn_vae_mean:
with tf.variable_scope("vae_mu_bn"):
self.mu_q_theta = tf.layers.batch_normalization(
self.mu_q_theta,
training=is_training,
scale=False,
center=False,
epsilon=1e-8,
axis=-1
)
self.mu_q_theta = scalar_layer(self.mu_q_theta, tau=0.5,
mode='positive', initializer_range=0.02)
tf.logging.info("*** after bn mu_q_theta ***")
tf.logging.info(self.mu_q_theta)
# zero logsigma and simga is set 1
self.sigma_std_q_theta = mlp(
input_tensor=self.q_theta,
num_hidden_layers=1,
hidden_size=etm_config.topic_size,
is_training=is_training,
dropout_prob=etm_config.hidden_dropout_prob,
intermediate_act_fn=None,
initializer_range=etm_config.initializer_range,
scope="sigma_std_mlp",
matrix_start_zero=False,
use_bias=True if not etm_config.apply_bn_vae_var else False
)
tf.logging.info("*** sigma_std_q_theta ***")
tf.logging.info(self.sigma_std_q_theta)
if etm_config.apply_bn_vae_var:
with tf.variable_scope("vae_sigma_std_bn"):
self.sigma_std_q_theta = tf.layers.batch_normalization(
self.sigma_std_q_theta,
training=is_training,
scale=False,
center=False,
epsilon=1e-8,
axis=-1
)
self.sigma_std_q_theta = scalar_layer(self.sigma_std_q_theta, tau=0.5,
mode='negative', initializer_range=0.02)
tf.logging.info("*** after bn sigma_std_q_theta ***")
tf.logging.info(self.sigma_std_q_theta)
with tf.variable_scope("etm", scope):
with tf.variable_scope("reparameterize"):
self.z = reparameterize(
mu_q_theta=self.mu_q_theta,
sigma_std_q_theta=self.sigma_std_q_theta,
is_training=is_training)
tf.logging.info("*** reparameterize z ***")
tf.logging.info(self.z)
with tf.variable_scope("gsm"):
# [batch_size, hidden_size]
self.z_gsm = mlp(input_tensor=self.z,
num_hidden_layers=etm_config.num_hidden_layers,
hidden_size=etm_config.topic_size,
is_training=is_training,
dropout_prob=etm_config.hidden_dropout_prob,
intermediate_act_fn=None,
initializer_range=etm_config.initializer_range,
scope="decoder")
tf.logging.info("*** z_gsm ***")
tf.logging.info(self.z_gsm)
# [batch_size, topic_size]
self.theta = tf.nn.softmax(self.z_gsm, dim=-1)
tf.logging.info("*** theta ***")
tf.logging.info(self.theta)
with tf.variable_scope("embeddings"):
if embedding_matrix is None:
self.embedding_table = tf.get_variable(
name="vocab_word_embeddings",
shape=[etm_config.vocab_size, etm_config.embedding_size],
initializer=create_initializer(etm_config.initializer_range))
else:
self.embedding_table = tf.get_variable(
name="vocab_word_embeddings",
shape=[etm_config.vocab_size, etm_config.embedding_size],
initializer=tf.constant_initializer(embedding_matrix, dtype=tf.float32),
trainable=False)
tf.logging.info("*** vocab_word_embeddings ***")
tf.logging.info(self.embedding_table)
with tf.variable_scope("embeddings"):
self.topic_embedding_table = tf.get_variable(
name="topic_word_embeddings",
shape=[etm_config.topic_size, etm_config.embedding_size],
initializer=create_initializer(etm_config.initializer_range))
tf.logging.info("*** topic_word_embeddings ***")
tf.logging.info(self.topic_embedding_table)
# topic_embedding_table: [topic_size, embedding_size]
# embedding_table: [vocab_size, embedding_size]
with tf.variable_scope("decoder"):
self.topic_word_align = tf.matmul(self.topic_embedding_table,
self.embedding_table,
transpose_b=True)
# self.topic_word_align = tf.multiply(self.topic_word_align,
# 1.0 / math.sqrt(float(etm_config.embedding_size)))
tf.logging.info("*** topic_word_align ***")
tf.logging.info(self.topic_word_align)
# [topic_size, vocab_size]
self.beta = tf.nn.softmax(self.topic_word_align, axis=-1)
tf.logging.info("*** beta ***")
tf.logging.info(self.beta)
# theta: [batch_size, topic_size]
# beta : [topic_size, vocab_size]
# preds: [batch_size, vocab_size]
# preds needs to be log-softmax that normalized on vocab-size dims
self.preds = tf.log(tf.matmul(self.theta, self.beta)+1e-10)
tf.logging.info("*** preds ***")
tf.logging.info(self.preds)
self.per_example_recon_loss = -tf.reduce_sum(self.preds * tf.stop_gradient(model_input), axis=-1)
self.recon_loss = tf.reduce_mean(self.per_example_recon_loss)
self.sigma_q_theta = tf.pow(self.sigma_std_q_theta, 2.0)
self.logsigma_q_theta = tf.log(self.sigma_q_theta+1e-10)
self.per_example_kl_theta_loss = -0.5 * tf.reduce_sum(1 + self.logsigma_q_theta - tf.pow(self.mu_q_theta, 2) - self.sigma_q_theta, axis=-1)
self.kl_theta_loss = tf.reduce_mean(self.per_example_kl_theta_loss)
def get_hidden_vector(self):
return self.z
def get_vocab_word_embeddings(self):
return self.embedding_table
def get_topic_embedding_table(self):
return self.topic_embedding_table
def get_recon_loss(self):
return self.recon_loss
def get_kl_loss(self):
return self.kl_theta_loss
def scalar_layer(input_tensor, tau=0.5,
mode='positive', initializer_range=0.02):
with tf.variable_scope("vae_bn_scale"):
scale = tf.get_variable(
name="scale",
shape=[input_tensor.shape[-1]],
initializer=create_initializer(initializer_range))
if mode == 'positive':
scale = tau + (1 - tau) * tf.nn.sigmoid(scale)
else:
scale = (1 - tau) * tf.nn.sigmoid(-scale)
return input_tensor * tf.sqrt(scale)
def gelu(input_tensor):
"""Gaussian Error Linear Unit.
This is a smoother version of the RELU.
Original paper: https://arxiv.org/abs/1606.08415
Args:
input_tensor: float Tensor to perform activation.
Returns:
`input_tensor` with the GELU activation applied.
"""
cdf = 0.5 * (1.0 + tf.erf(input_tensor / tf.sqrt(2.0)))
return input_tensor * cdf
def create_initializer(initializer_range=0.02):
"""Creates a `truncated_normal_initializer` with the given range."""
return tf.truncated_normal_initializer(stddev=initializer_range)
def create_zero_initializer(initializer_range=0.02):
return tf.zeros_initializer()
def get_activation(activation_string):
"""Maps a string to a Python function, e.g., "relu" => `tf.nn.relu`.
Args:
activation_string: String name of the activation function.
Returns:
A Python function corresponding to the activation function. If
`activation_string` is None, empty, or "linear", this will return None.
If `activation_string` is not a string, it will return `activation_string`.
Raises:
ValueError: The `activation_string` does not correspond to a known
activation.
"""
# We assume that anything that"s not a string is already an activation
# function, so we just return it.
if not isinstance(activation_string, six.string_types):
return activation_string
if not activation_string:
return None
act = activation_string.lower()
if act == "linear":
return None
elif act == "relu":
return tf.nn.relu
elif act == "gelu":
return gelu
elif act == "tanh":
return tf.tanh
else:
raise ValueError("Unsupported activation: %s" % act)
def tokenid2bow(input_ids, vocab_size):
[term_count,
term_binary,
term_freq] = tfidf_utils.tokenid2tf(input_ids, vocab_size)
return term_count, term_binary, term_freq
def mlp(input_tensor,
num_hidden_layers,
hidden_size,
is_training,
dropout_prob,
intermediate_act_fn,
initializer_range,
scope=None,
use_bias=True,
matrix_start_zero=False
):
prev_output = input_tensor
if matrix_start_zero:
kernel_initializer = create_zero_initializer(initializer_range)
tf.logging.info("** apply zero initializer **")
else:
kernel_initializer = create_initializer(initializer_range)
tf.logging.info("** apply truncated normal initializer **")
with tf.variable_scope(scope, default_name="mlp"):
for layer_idx in range(num_hidden_layers):
with tf.variable_scope("layer_%d" % layer_idx):
layer_input = prev_output
layer_output = tf.layers.dense(
layer_input,
hidden_size,
kernel_initializer=kernel_initializer,
bias_initializer=tf.zeros_initializer(),
activation=intermediate_act_fn,
use_bias=use_bias)
prev_output = layer_output
final_outputs = prev_output
return final_outputs
def reparameterize(mu_q_theta, sigma_std_q_theta, is_training):
if is_training:
sigma_q_theta = sigma_std_q_theta
eps = tf.random.normal(get_shape_list(sigma_q_theta),
mean=0.0, stddev=1.0, dtype=tf.float32)
return eps*sigma_q_theta+mu_q_theta
else:
return mu_q_theta
def get_shape_list(tensor, expected_rank=None, name=None):
"""Returns a list of the shape of tensor, preferring static dimensions.
Args:
tensor: A tf.Tensor object to find the shape of.
expected_rank: (optional) int. The expected rank of `tensor`. If this is
specified and the `tensor` has a different rank, and exception will be
thrown.
name: Optional name of the tensor for the error message.
Returns:
A list of dimensions of the shape of tensor. All static dimensions will
be returned as python integers, and dynamic dimensions will be returned
as tf.Tensor scalars.
"""
if isinstance(tensor, np.ndarray) or isinstance(tensor, list):
shape = np.array(tensor).shape
if isinstance(expected_rank, six.integer_types):
assert len(shape) == expected_rank
elif expected_rank is not None:
assert len(shape) in expected_rank
return shape
if name is None:
name = tensor.name
if expected_rank is not None:
assert_rank(tensor, expected_rank, name)
shape = tensor.shape.as_list()
non_static_indexes = []
for (index, dim) in enumerate(shape):
if dim is None:
non_static_indexes.append(index)
if not non_static_indexes:
return shape
dyn_shape = tf.shape(tensor)
for index in non_static_indexes:
shape[index] = dyn_shape[index]
return shape
def reshape_to_matrix(input_tensor):
"""Reshapes a >= rank 2 tensor to a rank 2 tensor (i.e., a matrix)."""
ndims = input_tensor.shape.ndims
if ndims < 2:
raise ValueError("Input tensor must have at least rank 2. Shape = %s" %
(input_tensor.shape))
if ndims == 2:
return input_tensor
width = input_tensor.shape[-1]
output_tensor = tf.reshape(input_tensor, [-1, width])
return output_tensor
def reshape_from_matrix(output_tensor, orig_shape_list):
"""Reshapes a rank 2 tensor back to its original rank >= 2 tensor."""
if len(orig_shape_list) == 2:
return output_tensor
output_shape = get_shape_list(output_tensor)
orig_dims = orig_shape_list[0:-1]
width = output_shape[-1]
return tf.reshape(output_tensor, orig_dims + [width])
def assert_rank(tensor, expected_rank, name=None):
"""Raises an exception if the tensor rank is not of the expected rank.
Args:
tensor: A tf.Tensor to check the rank of.
expected_rank: Python integer or list of integers, expected rank.
name: Optional name of the tensor for the error message.
Raises:
ValueError: If the expected shape doesn't match the actual shape.
"""
if name is None:
name = tensor.name
expected_rank_dict = {}
if isinstance(expected_rank, six.integer_types):
expected_rank_dict[expected_rank] = True
else:
for x in expected_rank:
expected_rank_dict[x] = True
actual_rank = tensor.shape.ndims
if actual_rank not in expected_rank_dict:
scope_name = tf.get_variable_scope().name
raise ValueError(
"For the tensor `%s` in scope `%s`, the actual rank "
"`%d` (shape = %s) is not equal to the expected rank `%s`" %
(name, scope_name, actual_rank, str(tensor.shape), str(expected_rank)))
def get_assigment_map_from_checkpoint(tvars, init_checkpoint):
"""Compute the union of the current variables and checkpoint variables."""
assignment_map = {}
initialized_variable_names = {}
name_to_variable = collections.OrderedDict()
for var in tvars:
name = var.name
m = re.match("^(.*):\\d+$", name)
if m is not None:
name = m.group(1)
name_to_variable[name] = var
init_vars = tf.train.list_variables(init_checkpoint)
assignment_map = collections.OrderedDict()
for x in init_vars:
(name, var) = (x[0], x[1])
if name not in name_to_variable:
continue
if var != name_to_variable[name].shape.as_list():
continue
assignment_map[name] = name
initialized_variable_names[name] = 1
initialized_variable_names[name + ":0"] = 1
return (assignment_map, initialized_variable_names)
| 1.414063 | 1 |
Python/1. Python Basics/mit-6.00.1-python solutions/lec5prob9-semordnilap.py | okara83/Becoming-a-Data-Scientist | 0 | 100740 | # lec5prob9-semordnilap.py
# edX MITx 6.00.1x
# Introduction to Computer Science and Programming Using Python
# Lecture 5, problem 9
# A semordnilap is a word or a phrase that spells a different word when backwards
# ("semordnilap" is a semordnilap of "palindromes"). Here are some examples:
#
# nametag / gateman
# dog / god
# live / evil
# desserts / stressed
#
# Write a recursive program, semordnilap, that takes in two words and says if
# they are semordnilap.
def semordnilap(str1, str2):
'''
str1: a string
str2: a string
returns: True if str1 and str2 are semordnilap;
False otherwise.
'''
# Your code here
# Check to see if both strings are empty
if not (len(str1) or len(str2)): return True
# Check to see if only one string is empty
if not (len(str1) and len(str2)): return False
# Check to see if first char of str1 = last of str2
# If not, no further comparison needed, return False
if str1[0] != str2[-1]: return False
return semordnilap(str1[1:], str2[:-1])
# Performing a semordnilap comparison using slicing notation,
# but this is not valid for this assigment
# elif str1 == str2[::-1]:
# return True
# Example of calling semordnilap()
theResult = semordnilap('may', 'yam')
print (str(theResult))
| 3.53125 | 4 |
tests/test_execute.py | facultyai/faculty-mill | 2 | 100868 | from io import StringIO
from pathlib import Path
from unittest.mock import Mock, patch
import pytest
from faculty_mill.execute import run_notebook
@pytest.fixture
def tempdir(tmpdir):
"""Fixture that wraps pytest fixture to return Path object"""
return Path(str(tmpdir))
@pytest.fixture
def tmpnotebook():
test_notebook = StringIO()
test_notebook.write("test notebook content")
test_notebook.seek(0)
return test_notebook
@pytest.fixture
def mock_click_context():
click_context = Mock()
click_context.args = ["arg 1", "arg 2"]
return click_context
def test_that_run_notebook_copies_content(
tempdir, tmpnotebook, mock_click_context
):
output_notebook = run_notebook(
tmpnotebook, tempdir, execute=False, click_context=mock_click_context
)
assert output_notebook.parent == tempdir
assert output_notebook.read_text() == "test notebook content"
@patch("faculty_mill.execute.papermill")
def test_that_run_notebook_calls_papermill(
mock_papermill, tempdir, tmpnotebook, mock_click_context
):
output_notebook = run_notebook(
tmpnotebook, tempdir, execute=True, click_context=mock_click_context
)
assert output_notebook.parent == tempdir
mock_papermill.make_context.assert_called_once_with(
"The papermill execution command.",
[str(tempdir / "input.ipynb"), str(tempdir / "output.ipynb")]
+ mock_click_context.args,
parent=mock_click_context,
)
mock_papermill.invoke.assert_called_once_with(
mock_papermill.make_context.return_value
)
| 1.6875 | 2 |
Invaders/Displays/display.py | JaredsGames/SpaceInvaders | 0 | 100996 | <gh_stars>0
# <NAME>
# CPSC 386-01
# 2021-11-29
# <EMAIL>
# @JaredDyreson
#
# Lab 00-04
#
# Some filler text
#
"""
This module contains a basic "factory" pattern for generating new Display instances
"""
import abc
import dataclasses
import functools
import json
import pathlib
import pygame
import sys
import time
import typing
from datetime import datetime
from Invaders.UI.button import Button
from Invaders.Dataclasses.direction import Direction
from Invaders.Dataclasses.player import Player
from Invaders.Dataclasses.point import Point
class Display:
"""
Not fully virtual class for each display to
inherit from
"""
def __init__(
self, width: int = 900, height: int = 900, color=pygame.Color("black")
):
# Checks for errors encountered
_, num_fail = pygame.init()
if num_fail > 0:
print(f"[FATAL] There were {num_fail} error(s) produced!")
sys.exit(-1)
else:
print("[+] Game successfully initialised")
pygame.font.init()
self.width, self.height = width, height
self._display_surface = pygame.display.set_mode(
(self.width, self.height), pygame.HWSURFACE
)
self.last_position = Point(-1, -1)
self.background_color = color
self.fps_meter = pygame.time.Clock()
@abc.abstractmethod
def draw(self):
"""
Abstract draw class that must be implemented
"""
raise NotImplementedError(
f"Display.draw isn abstract method and should not be invoked directly"
)
def get_surface(self) -> pygame.Surface:
"""
Obtain the current display surface
to a given window
@return - pygame.Surface
"""
return self._display_surface
def clear_text(self) -> None:
"""
This removes all text from the screen
"""
self._display_surface.fill(self.background_color)
def draw_image(self, img_object: pygame.Surface, position: Point) -> None:
"""
Draw an image object (in the form of a surface) to the screen
at a given position
@param img_object : currently loaded pygame surface that represents an image
@param position : Cartesian coordinates that represent where on the screen to be drawn to
"""
self._display_surface.blit(img_object, dataclasses.astuple(position))
def write_text(
self, text: str, position: Point, font, color=pygame.Color("white")
) -> None:
"""
Write text to the screen, thanks to @NICE
for helping with this!
@param text - stuff we want to write to the screen
@param position - where on the screen should it be writing to
@param font - current font used
@param color - selected color
"""
lines = [line.split(" ") for line in text.splitlines()]
space = font.size(" ")[0]
x, y = dataclasses.astuple(position)
self.last_position = position
for line in lines:
for word in line:
word_surface = font.render(word, 0, color)
width, height = word_surface.get_size()
if x + width >= self.width + 100:
x = position.x
y += height
self._display_surface.blit(word_surface, (x, y))
x += width + space
x = position.x
y += height
def center(self) -> Point:
"""
Obtain the center of the current scene
@return Point
"""
return Point(self.width // 4, self.height // 4)
class HighScoreDisplay(Display):
"""
Class that represents the high score display
"""
def __init__(self, current_score: int, username: str):
super().__init__()
self.title_position = Point(250, 45)
self.logo_position = Point(575, 435)
self.break_from_draw = False
self.back_button = Button(
self._display_surface,
Point(300, 575),
300,
50,
"Quit",
functools.partial(self.terminate_intro),
)
self.scoreboard_file = pathlib.Path("scores/scoreboard.json")
self.scores = self.obtain_high_score_list(self.scoreboard_file)
self.scores.append(
Player(username, current_score,
datetime.now().strftime("%m/%d/%Y %H:%M"))
)
self.scores = sorted(self.scores, reverse=True)
def obtain_high_score_list(self, path: pathlib.Path) -> typing.List[Player]:
"""
Read in high score list found in a json file
that is then loaded and sorted by the score obtained
by a given player
@param path - path to JSON file
@return - typing.List[Player]
"""
with open(path, "r") as fp:
contents = json.load(fp)
return [Player(**element) for element in contents["players"]]
def terminate_intro(self):
"""
This terminates the current scene
"""
self.break_from_draw = True
self._display_surface.fill(self.background_color)
master = {"players": []}
for score in self.scores:
master["players"].append(dataclasses.asdict(score))
with open(self.scoreboard_file, "w") as fp:
json.dump(master, fp)
pygame.quit()
sys.exit()
def draw(self):
"""
Draw all the high scores in a row like
manner
"""
draw_loop = True
while draw_loop and not self.break_from_draw:
for event in pygame.event.get():
if event.type == pygame.QUIT:
self.terminate_intro()
self.write_text(
f"HIGH SCORES", self.title_position, pygame.font.SysFont(
None, 50)
)
self.write_text(
self.back_button.contents,
self.back_button.center(),
pygame.font.SysFont(None, 30),
)
self.back_button.draw()
for i, score in enumerate(self.scores[0:5]):
x, y = dataclasses.astuple(self.center())
self.write_text(
score.name,
Point((x - 50), y + i * 50),
pygame.font.SysFont(None, 33),
)
self.write_text(
str(score.score),
Point((x - 50) + 200, y + i * 50),
pygame.font.SysFont(None, 33),
)
self.write_text(
score.tod,
Point((x - 50) + 400, y + i * 50),
pygame.font.SysFont(None, 33),
)
pygame.display.flip()
| 2.484375 | 2 |
trips/tests.py | chorna/taxi24 | 0 | 101124 | <filename>trips/tests.py<gh_stars>0
from django.contrib.gis.geos import Point
from rest_framework import status
from rest_framework.test import APIClient, APITestCase
from customers.models import Customer
from drivers.models import Cab, Driver, Vehicle
# Create your tests here.
class TripTests(APITestCase):
def setUp(self):
driver1 = {
'first_name': 'soy',
'last_name': 'driver1',
'document_number': '11111'
}
driver2 = {
'first_name': 'soy',
'last_name': 'driver2',
'document_number': '22222'
}
driver3 = {
'first_name': 'soy',
'last_name': 'driver3',
'document_number': '333333'
}
driver4 = {
'first_name': 'soy',
'last_name': 'driver4',
'document_number': '44444'
}
self.driver_1 = self.create_helper(Driver, driver1)
self.driver_2 = self.create_helper(Driver, driver2)
self.driver_3 = self.create_helper(Driver, driver3)
self.driver_4 = self.create_helper(Driver, driver4)
vehicle1 = {
'number_plate': 'kl-111',
}
vehicle2 = {
'number_plate': 'kl-222',
}
vehicle3 = {
'number_plate': 'kl-333',
}
vehicle4 = {
'number_plate': 'kl-444',
}
self.vehicle_1 = self.create_helper(Vehicle, vehicle1)
self.vehicle_2 = self.create_helper(Vehicle, vehicle2)
self.vehicle_3 = self.create_helper(Vehicle, vehicle3)
self.vehicle_4 = self.create_helper(Vehicle, vehicle4)
cab1_data = {
'driver_id': self.driver_1,
'vehicle_id': self.vehicle_1,
'location': Point([1000,2000]),
'state': 0,
}
cab2_data = {
'driver_id': self.driver_2,
'vehicle_id': self.vehicle_2,
'location': Point([100, 200]),
'state': 0,
}
cab3_data = {
'driver_id': self.driver_3,
'vehicle_id': self.vehicle_3,
'location': Point([10, 20]),
'state': 0,
}
cab4_data = {
'driver_id': self.driver_4,
'vehicle_id': self.vehicle_4,
'location': Point([50, -10]),
'state': 0,
}
self.cab1 = self.create_helper(Cab, cab1_data)
self.cab2 = self.create_helper(Cab, cab2_data)
self.cab3 = self.create_helper(Cab, cab3_data)
self.cab4 = self.create_helper(Cab, cab4_data)
customer1 = {
'first_name': 'soy',
'last_name': 'customer',
'document_number': '333444'
}
self.customer_1 = self.create_helper(Customer, customer1)
self.request_trip = None
self.client = APIClient()
def create_helper(self, model=None, d={}):
return model.objects.create(**d)
def test_request_trip(self):
path = '/api-v1.0/request/'
data = {
'customer_id': self.customer_1.id,
'cab_id': self.cab1.id,
'location': 'Point(20 20)',
}
response_request = self.client.post(path, data)
self.request_trip = response_request.json()
# test create request trip
self.assertEquals(status.HTTP_201_CREATED, response_request.status_code)
response2 = self.client.get(path)
self.assertEquals(len(response2.json()), 1)
# test top 3 nearest cabs
response10 = self.client.get(f"{path}{response_request.json()['id']}/find_nearest_cabs/")
self.assertEquals(status.HTTP_200_OK, response10.status_code)
trip_url = '/api-v1.0/trip/'
# test create trip
data_trip = {
'request_id': self.request_trip['id'],
'price': 100.00
}
trip = self.client.post(f"{trip_url}", data_trip)
self.assertEquals(status.HTTP_201_CREATED, trip.status_code)
trip_id = trip.json()['id']
# test get trip by id
response = self.client.get(f"{trip_url}{trip_id}/")
self.assertEquals(status.HTTP_200_OK, response.status_code)
# test get all trips
response2 = self.client.get(trip_url)
self.assertEquals(status.HTTP_200_OK, response2.status_code)
# test get all availables trips
response3 = self.client.get(f"{trip_url}availables/")
self.assertEquals(status.HTTP_200_OK, response3.status_code)
# test start trip
response4 = self.client.patch(f"{trip_url}{trip_id}/start/")
self.assertEquals(status.HTTP_204_NO_CONTENT, response4.status_code)
self.assertTrue(response4.data.get('start_date', False))
# test complete trip
response5 = self.client.patch(f"{trip_url}{trip_id}/complete/")
self.assertEquals(status.HTTP_204_NO_CONTENT, response5.status_code)
self.assertTrue(response5.data.get('end_date', False))
# test create invoice after complete trip
response6 = self.client.patch(f"{trip_url}{trip_id}/invoices/")
self.assertEquals(len(response6.json()), 1)
| 1.3125 | 1 |
flight/config.py | pieperm/IARC-2020 | 12 | 101252 | <reponame>pieperm/IARC-2020<filename>flight/config.py
"""Sets parameters for the drone and holds constant values needed for configuration"""
from flight.utils.latlon import LatLon, Latitude, Longitude
from mavsdk import System
MAX_SPEED: float = 6.352 # m/s
ALT_CORRECTION_SPEED: float = 0.25 # m/s down
MAX_ALT: float = 9.0 # m
TAKEOFF_ALT: float = 1.0 # m
MAST_ALT: float = 1.3 # m
FLYING_ALT: float = 6.0 # m
# What percentage of the hight can we loos/gain before unsafe
ALT_PERCENT_ACCURACY: float = 0.15
ALT_RANGE_MAX: float = FLYING_ALT + (FLYING_ALT * ALT_PERCENT_ACCURACY) # m
ALT_RANGE_MIN: float = FLYING_ALT - (FLYING_ALT * ALT_PERCENT_ACCURACY) # m
POINT_PERCENT_ACCURACY: float = 0.2
# Position for pylon 1
# lat1: Latitude = Latitude(37.9497800)
# lon1: Longitude = Longitude(-92.7854470)
lat1: Latitude = Latitude(degree=37, minute=56, second=55.6)
lon1: Longitude = Longitude(degree=-91, minute=-47, second=-3.3)
pylon1: LatLon = LatLon(lat1, lon1)
# Position for pylon 2
# lat2: float = 37.9486433
# lon2: float = -91.7839372
# lat2: float = Latitude(37.9504260)
# lon2: float = Longitude(-91.7848542)
lat2: Latitude = Latitude(degree=37, minute=56, second=53.3)
lon2: Longitude = Longitude(degree=-91, minute=-47, second=0)
pylon2: LatLon = LatLon(lat2, lon2)
# Takeoff Position set in takeoff.py
takeoff_pos = LatLon
# Position for the mast
MAST_LAT: Latitude = Latitude(degree=37, minute=56, second=53.0) # placeholder postion
MAST_LON: Longitude = Longitude(degree=-91, minute=-47, second=-5.0)
MAST_LOCATION: LatLon = LatLon(MAST_LAT, MAST_LON)
OFFSET: float = 0.005 # km
DEG_OFFSET: int = 90 # deg
NUM_LAPS: int = 2
THINK_FOR_S: float = 2.0
FAST_THINK_S: float = 1.0
REALSENSE_WIDTH: int = 0
REALSENSE_HEIGHT: int = 0
REALSENSE_FRAMERATE: int = 60
run_states = {"early_laps": True, "to_mast": True}
async def config_params(drone: System):
await drone.action.set_maximum_speed(MAX_SPEED)
await drone.param.set_param_float("MIS_TAKEOFF_ALT", TAKEOFF_ALT)
await drone.action.set_maximum_speed(MAX_SPEED)
await drone.param.set_param_float("MPC_XY_VEL_MAX", MAX_SPEED)
await drone.param.set_param_float("MPC_XY_CRUISE", MAX_SPEED)
# Set data link loss failsafe mode HOLD
await drone.param.set_param_int("NAV_DLL_ACT", 1)
# Set offboard loss failsafe mode HOLD
await drone.param.set_param_int("COM_OBL_ACT", 1)
# Set offboard loss failsafe mode when RC is available HOLD
await drone.param.set_param_int("COM_OBL_RC_ACT", 5)
# Set RC loss failsafe mode HOLD
await drone.param.set_param_int("NAV_RCL_ACT", 1)
await drone.param.set_param_float("LNDMC_XY_VEL_MAX", 0.5)
# await drone.param.set_param_float("LNDMC_FFALL_THR", 3)
# await drone.param.set_param_float("LNDMC_FFALL_TTRI", 0.15)
await drone.param.set_param_float("LNDMC_ALT_MAX", MAX_ALT)
# await drone.param.set_param_float("LNDMC_LOW_T_THR", 0.2)
| 1.703125 | 2 |
ssr/ssr_types/extrinsics.py | SBCV/SatelliteSurfaceReconstruction | 34 | 101380 | import math
import numpy as np
def quaternion_to_rotation_matrix(q):
# Original C++ Method defined in pba/src/pba/DataInterface.h
qq = math.sqrt(q[0] * q[0] + q[1] * q[1] + q[2] * q[2] + q[3] * q[3])
qw = qx = qy = qz = 0
if qq > 0: # NORMALIZE THE QUATERNION
qw = q[0] / qq
qx = q[1] / qq
qy = q[2] / qq
qz = q[3] / qq
else:
qw = 1
qx = qy = qz = 0
m = np.zeros((3, 3), dtype=float)
m[0][0] = float(qw * qw + qx * qx - qz * qz - qy * qy)
m[0][1] = float(2 * qx * qy - 2 * qz * qw)
m[0][2] = float(2 * qy * qw + 2 * qz * qx)
m[1][0] = float(2 * qx * qy + 2 * qw * qz)
m[1][1] = float(qy * qy + qw * qw - qz * qz - qx * qx)
m[1][2] = float(2 * qz * qy - 2 * qx * qw)
m[2][0] = float(2 * qx * qz - 2 * qy * qw)
m[2][1] = float(2 * qy * qz + 2 * qw * qx)
m[2][2] = float(qz * qz + qw * qw - qy * qy - qx * qx)
return m
def rotation_matrix_to_quaternion(m):
# Original C++ Method defined in pba/src/pba/DataInterface.h
q = np.array([0, 0, 0, 0], dtype=float)
q[0] = 1 + m[0][0] + m[1][1] + m[2][2]
if q[0] > 0.000000001:
q[0] = math.sqrt(q[0]) / 2.0
q[1] = (m[2][1] - m[1][2]) / (4.0 * q[0])
q[2] = (m[0][2] - m[2][0]) / (4.0 * q[0])
q[3] = (m[1][0] - m[0][1]) / (4.0 * q[0])
else:
if m[0][0] > m[1][1] and m[0][0] > m[2][2]:
s = 2.0 * math.sqrt(1.0 + m[0][0] - m[1][1] - m[2][2])
q[1] = 0.25 * s
q[2] = (m[0][1] + m[1][0]) / s
q[3] = (m[0][2] + m[2][0]) / s
q[0] = (m[1][2] - m[2][1]) / s
elif m[1][1] > m[2][2]:
s = 2.0 * math.sqrt(1.0 + m[1][1] - m[0][0] - m[2][2])
q[1] = (m[0][1] + m[1][0]) / s
q[2] = 0.25 * s
q[3] = (m[1][2] + m[2][1]) / s
q[0] = (m[0][2] - m[2][0]) / s
else:
s = 2.0 * math.sqrt(1.0 + m[2][2] - m[0][0] - m[1][1])
q[1] = (m[0][2] + m[2][0]) / s
q[2] = (m[1][2] + m[2][1]) / s
q[3] = 0.25 * s
q[0] = (m[0][1] - m[1][0]) / s
return q
class Extrinsics:
def __init__(self):
# center is the coordinate of the camera center with respect to the
# world coordinate frame (t = -R C)
self._center = np.array([0, 0, 0], dtype=float)
# the translation vector is the vector used to transform points in
# world coordinates to camera coordinates (C = -R^T t)
self._translation_vec = np.array([0, 0, 0], dtype=float)
# use for these attributes the getter and setter methods
self._quaternion = np.array([0, 0, 0, 0], dtype=float)
# for rotations the inverse is equal to the transpose
# self._rotation_inv_mat = np.linalg.transpose(self._rotation_mat)
self._rotation_mat = np.zeros((3, 3), dtype=float)
@staticmethod
def invert_transformation_mat(trans_mat):
# Exploit that the inverse of the rotation part is equal to the
# transposed of the rotation part. This should be more robust than
# trans_mat_inv = np.linalg.inv(trans_mat)
trans_mat_inv = np.zeros_like(trans_mat)
rotation_part_inv = trans_mat[0:3, 0:3].T
trans_mat_inv[0:3, 0:3] = rotation_part_inv
trans_mat_inv[0:3, 3] = -np.dot(rotation_part_inv, trans_mat[0:3, 3])
trans_mat_inv[3, 3] = 1
return trans_mat_inv
def is_rotation_mat_valid(self, some_mat):
# TEST if rotation_mat is really a rotation matrix
# (i.e. det = -1 or det = 1)
det = np.linalg.det(some_mat)
is_close = np.isclose(det, 1) or np.isclose(det, -1)
# if not is_close:
# logger.vinfo('some_mat', some_mat)
# logger.vinfo('determinante', det)
return is_close
def set_quaternion(self, quaternion):
self._quaternion = quaternion
# we must change the rotation matrixes as well
self._rotation_mat = quaternion_to_rotation_matrix(quaternion)
def set_rotation_mat(self, rotation_mat):
assert self.is_rotation_mat_valid(rotation_mat)
self._rotation_mat = rotation_mat
# we must change the quaternion as well
self._quaternion = rotation_matrix_to_quaternion(rotation_mat)
def set_camera_center_after_rotation(self, center):
assert self.is_rotation_mat_valid(self._rotation_mat)
self._center = center
self._translation_vec = -np.dot(self._rotation_mat, center)
def set_camera_translation_vector_after_rotation(self, translation_vector):
# translation_vector: trans_vec = -Rc
assert self.is_rotation_mat_valid(self._rotation_mat)
self._translation_vec = translation_vector
self._center = -np.dot(
self._rotation_mat.transpose(), translation_vector
)
def get_quaternion(self):
return self._quaternion
def get_rotation_mat(self):
# Note:
# self._rotation_mat.T or self._rotation_mat.transpose()
# DO NOT CHANGE THE MATRIX
return self._rotation_mat
def get_translation_vec(self):
return self._translation_vec
def get_camera_center(self):
return self._center
def get_4x4_world_to_cam_mat(self):
# This matrix can be used to convert points given in world coordinates
# into points given in camera coordinates
# M = [R -Rc]
# [0 1],
# https://en.wikipedia.org/wiki/Transformation_matrix#/media/File:2D_affine_transformation_matrix.svg
homogeneous_mat = np.identity(4, dtype=float)
homogeneous_mat[0:3, 0:3] = self.get_rotation_mat()
homogeneous_mat[0:3, 3] = -self.get_rotation_mat().dot(
self.get_camera_center()
)
return homogeneous_mat
def set_4x4_cam_to_world_mat(self, cam_to_world_mat):
# This matrix can be used to convert points given in camera coordinates
# into points given in world coordinates
# M = [R^T c]
# [0 1]
#
# https://en.wikipedia.org/wiki/Transformation_matrix#/media/File:2D_affine_transformation_matrix.svg
rotation_part = cam_to_world_mat[0:3, 0:3]
translation_part = cam_to_world_mat[0:3, 3]
self.set_rotation_mat(rotation_part.transpose())
self.set_camera_center_after_rotation(translation_part)
def get_4x4_cam_to_world_mat(self):
# This matrix can be used to convert points given in camera coordinates
# into points given in world coordinates
# M = [R^T c]
# [0 1]
# :return:
#
# https://en.wikipedia.org/wiki/Transformation_matrix#/media/File:2D_affine_transformation_matrix.svg
homogeneous_mat = np.identity(4, dtype=float)
homogeneous_mat[0:3, 0:3] = self.get_rotation_mat().transpose()
homogeneous_mat[0:3, 3] = self.get_camera_center()
return homogeneous_mat
def cam_to_world_coord_multiple_coords(self, cam_coords):
num_coords = cam_coords.shape[0]
hom_entries = np.ones(num_coords).reshape((num_coords, 1))
cam_coords_hom = np.hstack((cam_coords, hom_entries))
world_coords_hom = (
self.get_4x4_cam_to_world_mat().dot(cam_coords_hom.T).T
)
world_coords = np.delete(world_coords_hom, 3, 1)
return world_coords
| 2.171875 | 2 |
Scores/migrations/0001_initial.py | beren5000/LudumDare41web | 0 | 101508 | <gh_stars>0
# Generated by Django 2.0.4 on 2018-04-22 06:19
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Scores',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created', models.DateTimeField(auto_now_add=True, verbose_name='Created')),
('modified', models.DateTimeField(auto_now=True, verbose_name='Modified')),
('name', models.CharField(max_length=100, verbose_name='Player Name')),
('score', models.IntegerField(default=0, verbose_name='Score')),
],
options={
'verbose_name': 'Score',
'verbose_name_plural': 'Scores',
},
),
]
| 0.847656 | 1 |
misclientes/migrations/0015_auto_20181011_1503.py | mrbrazzi/django-misclientes | 0 | 101636 | # Generated by Django 2.0.6 on 2018-10-11 19:03
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('misclientes', '0014_auto_20181011_1455'),
]
operations = [
migrations.AlterField(
model_name='cliente',
name='enterprise',
field=models.ForeignKey(null=True, on_delete='models.CASCADE', to='misclientes.Enterprise'),
),
]
| 1.007813 | 1 |
src/common/env.py | sarvi/wisk | 2 | 101764 | <filename>src/common/env.py
'''
Created on Feb 20, 2017
@author: sarvi
@copyright: 2017 Cisco Inc. All rights reserved.
'''
# pylint: disable=locally-disabled, too-many-lines
from __future__ import print_function
import base64
import collections
import datetime
import hashlib
import logging.handlers
import os
import platform
import pprint
import pwd
import random
import stat
import subprocess
import sys
import threading
import time
import uuid
from configparser import SafeConfigParser, InterpolationMissingOptionError, InterpolationSyntaxError, ParsingError, Error
from io import StringIO
SIMPLE_FMT = '%(message)s'
SIMPLE_FMT_CORR_ID = '%(corr_id)s %(message)s'
VERBOSE_FMT_THREAD = '[%(asctime)s-%(thread)s] %(levelname)s/%(processName)s [%(name)s.%(funcName)s:%(lineno)d] %(message)s'
VERBOSE_FMT_CORR_ID = '[%(asctime)s-%(corr_id)s] %(levelname)s/%(processName)s [%(name)s.%(funcName)s:%(lineno)d] %(message)s'
VERBOSE_FMT_CORR_ID_TASK = '[%(asctime)s-%(corr_id)s] %(levelname)s/%(processName)s/%(task_name)s [%(name)s.%(funcName)s:%(lineno)d] %(message)s'
log = logging.getLogger(__name__) # pylint: disable=locally-disabled, invalid-name
printlog = logging.getLogger('wisk.print') # pylint: disable=locally-disabled, invalid-name
ENV_VARIABLE_PREFIX = 'WISK_'
ENV_INIT_DONE = False
LOGGING_INIT_DONE = False
MONITOR_INTERVAL = 3
MONITOR_LOW = 20*1024
MONITOR_RESIZE = 50*1024
MIN_CONFIG = '''
[common]
logdir = /auto/wit-log/wisklogs/%(toolrootname)s
savelogs = all
fileloglevel = DEBUG
consoleloglevel = WARNING
log_iso8601_dates = False
'''
INSTALL_TYPE_SEARCH = ['%(config_dir)s/wisk_install_type.cfg']
CLIENT_CFG_SEARCH = ['%(config_dir)s/wisk_common.cfg',
'%(config_dir)s/wisk_%(Site)s.cfg']
INSTALL_PKG_ROOT = os.path.abspath(os.path.normpath(os.path.join(os.path.dirname(__file__), '../')))
INSTALLED = True if os.path.basename(INSTALL_PKG_ROOT) != 'src' else False
LOCAL_ONLY_CONFIGS = []
if not INSTALLED:
INSTALL_ROOT = os.path.abspath(os.path.join(INSTALL_PKG_ROOT, '../'))
INSTANCE_NAME = os.path.basename(INSTALL_ROOT)
CONFIG_DIR = os.path.abspath(os.path.join(INSTALL_PKG_ROOT, '../config'))
INSTALL_BIN_DIR = os.path.join(INSTALL_ROOT, 'scripts')
INSTALL_LIB_DIR = os.path.join(INSTALL_ROOT, 'src/lib')
LATEST_INSTALL_ROOT = INSTALL_ROOT
LATEST_BIN_DIR = INSTALL_BIN_DIR
LATEST_LIB_DIR = os.path.abspath(os.path.join(INSTALL_PKG_ROOT, '../src/lib'))
else:
INSTALL_ROOT = os.path.normpath(os.path.join(INSTALL_PKG_ROOT, '../'))
INSTANCE_NAME = None
while os.path.basename(INSTALL_ROOT) != 'lib':
INSTALL_ROOT = os.path.normpath(os.path.join(INSTALL_ROOT, '../'))
INSTALL_ROOT = os.path.normpath(os.path.join(INSTALL_ROOT, '../'))
CONFIG_DIR = os.path.abspath(os.path.join(INSTALL_ROOT, 'var/config'))
INSTALL_BIN_DIR = os.path.join(INSTALL_ROOT, 'bin')
INSTALL_LIB_DIR = os.path.join(INSTALL_ROOT, 'lib')
LATEST_INSTALL_ROOT = os.path.normpath(os.path.join(INSTALL_ROOT, '../current'))
LATEST_BIN_DIR = os.path.join(LATEST_INSTALL_ROOT, 'bin')
LATEST_LIB_DIR = os.path.join(LATEST_INSTALL_ROOT, 'lib')
HOST_UNAME = platform.uname()
UMASK = 0o22 # Set default umask to file permissions of 0644 and directory permissions of 0755
os.umask(UMASK)
ENVIRONMENT = {
'start_time': datetime.datetime.now().strftime("_%Y%m%d%H%M%S"),
'pid': str(os.getpid()),
'username': os.environ['SUDO_USER'] if pwd.getpwuid(os.getuid())[0] == 'root' and 'SUDO_USER' in os.environ else pwd.getpwuid(os.getuid())[0],
'installed': INSTALLED,
'instance_name': INSTANCE_NAME,
'install_pkg_root': INSTALL_PKG_ROOT,
'install_root': INSTALL_ROOT,
'config_dir': CONFIG_DIR,
# 'Site': 'local',
'OS': HOST_UNAME[0],
'OS-Version': 'X.XX',
'CPU': 'x86',
'Bits': '64',
'Host OS': HOST_UNAME[0],
'Host-osver': HOST_UNAME[2],
'Host Machine arch': HOST_UNAME[4],
'Host CPU family': HOST_UNAME[5],
'Host Name': HOST_UNAME[1].split('.')[0],
'UMASK': UMASK,
'log_iso8601_dates': False,
}
LUMENS_DATA = {
'dry_run': False,
'group_name': 'wisk',
'data_source': 'cli',
'submitter_id': ENVIRONMENT['username'],
# 'timestamp': None,
# 'uuid': 'valid UUID',
# 'state': 'SUCCESS/FAILURE/IN PROGRESS',
'metadata': {}
}
LOCAL = threading.local()
def get_current_correlation_id():
''' Retrieve operation_id saved to thread '''
try:
return LOCAL.operation_id or ENVIRONMENT.get('uniqueid', None) or '{}.{}'.format(os.getpid(), threading.get_ident())
except AttributeError:
return ENVIRONMENT.get('uniqueid', None) or '{}.{}'.format(os.getpid(), threading.get_ident())
class CorrelationIdFilter(logging.Filter):
''' Correlation ID Filter '''
def filter(self, record):
record.corr_id = get_current_correlation_id()
return True
class MicroFormatter(logging.Formatter):
""" Microsecond precision for CLIP """
def formatTime(self, record, datefmt=None):
"""
Override date format for microseconds
"""
converted = self.converter(record.created)
if datefmt:
s = time.strftime(datefmt, converted)
else:
t = time.strftime("%Y-%m-%d %H:%M:%S", converted)
if ENVIRONMENT['log_iso8601_dates']:
s = "%s,%03d" % (t, record.msecs)
else:
s = "%s.%06d" % (t, 1000 * record.msecs)
return s
def env_siteinfo_update():
''' Get site information about where the client is running from '''
if ENVIRONMENT['OS'] == 'Darwin':
servinfo = {}
servinfo['Site'] = 'sjc'
servinfo['DC'] = os.environ.get('MY_DEFAULT_DC', 'sjc5c').lower()
servinfo['OS-Version'] = '6.20'
else:
try:
out = subprocess.check_output(['/router/bin/servinfo'])
except subprocess.CalledProcessError as ex:
log.error('Could not get servinfo for client: %s', ex)
return {}
log.debug(out)
out = out.strip()
out = [k.decode("utf-8").split(':') for k in out.splitlines()]
servinfo = {k.strip(): v.strip() for k, v in out}
# servinfo = {k: v for k, v in servinfo.items() if k not in ENVIRONMENT}
ENVIRONMENT.update(servinfo)
LUMENS_DATA['metadata'].update(servinfo)
env_update = {k.replace(ENV_VARIABLE_PREFIX, ''): v for k, v in os.environ.items() if k.startswith(ENV_VARIABLE_PREFIX)}
ENVIRONMENT.update(env_update)
ENVIRONMENT['Host Name'] = ENVIRONMENT['Host Name'].split('.')[0]
return servinfo
def get_unique_id():
''' generate a short unique id for client '''
# return str(uuid.uuid4())
intstr = hex(int(time.time() * 10000))[2:] + hex(random.randrange(0, 0xFFFF))[2:]
return base64.b64encode(intstr.encode(), 'AB'.encode())[:-2].decode('utf-8')
def config_read(cfg_search, doclientcfg=False): # pylint: disable=locally-disabled, too-many-statements, too-many-branches, too-many-locals
''' Read configuration files in a certain order of precedence '''
config = SafeConfigParser()
# Consider using initial values as defaults, so are accessible outside common
# config = SafeConfigParser(ENVIRONMENT)
ENVIRONMENT['config'] = config
config.add_section('common')
config.set('common', 'home_dir', os.path.expanduser('~/'))
config.set('common', 'installed', str(INSTALLED))
config.set('common', 'install_pkg_root', INSTALL_PKG_ROOT)
config.set('common', 'install_root', INSTALL_ROOT)
config.set('common', 'config_dir', CONFIG_DIR)
config.set('common', 'instance_name', str(INSTANCE_NAME))
config.set('common', 'username', ENVIRONMENT['username'])
config.set('common', 'hostname', ENVIRONMENT['Host Name'])
config.set('common', 'toolname', ENVIRONMENT['toolname'])
toolrootname = ENVIRONMENT['toolrootname']
config.set('common', 'toolrootname', toolrootname)
config.set('common', 'buildtag', os.environ.get('BUILD_TAG', ''))
config.set('common', 'buildid', os.environ.get('BUILD_ID', ''))
config.set('common', 'pid', '%d' % os.getpid())
config.set('common', 'worker_id', '0')
config.set('common', 'site', ENVIRONMENT['Site'])
config.set('common', 'datacenter', ENVIRONMENT['DC'])
config.add_section('wisk')
config.set('wisk', 'monitor_interval', str(MONITOR_INTERVAL))
config.set('wisk', 'monitor_low', str(MONITOR_LOW))
config.set('wisk', 'monitor_resize', str(MONITOR_RESIZE))
# Exceptions
if toolrootname in ['uwsgi']:
config.set('common', 'widsuffix', '{worker_id}')
else:
config.set('common', 'widsuffix', '')
if ENVIRONMENT['username'] != 'flxclone':
doclientcfg = True
ENVIRONMENT['doclientcfg'] = doclientcfg
if doclientcfg:
# ENVIRONMENT['uniqueid'] = '%s' % hex(int(time.time() * 10000))[2:]
ENVIRONMENT['uniqueid'] = get_unique_id()
LUMENS_DATA['metadata']['uniqueid'] = ENVIRONMENT['uniqueid']
LOCAL.operation_id = ENVIRONMENT['uniqueid']
config.set('common', 'uniqueid', ENVIRONMENT['uniqueid'])
config.set('common', 'log_root', '%(client_log_root)s')
ENVIRONMENT['logfile'] = '%(logdir)s/%(toolname)s_%(username)s_%(hostname)s_%(uniqueid)s.log'
config.set('common', 'logfilename', ENVIRONMENT['logfile'])
else:
config.set('common', 'uniqueid', '')
config.set('common', 'log_root', '%(server_log_root)s')
if toolrootname in ['uwsgi']:
ENVIRONMENT['logfile'] = '%(logdir)s/%(toolname)s_%(hostname)s_%(widsuffix)s.log'
else:
ENVIRONMENT['logfile'] = '%(logdir)s/%(toolname)s_%(hostname)s.log'
config.set('common', 'logfilename', ENVIRONMENT['logfile'])
if not ENVIRONMENT['installed']:
config.set('common', 'logdir', '%(install_root)s/logs/%(toolrootname)s')
# Read system defaults
cfgfiles = list(cfg_search)
found = []
try:
# Read the minimum configuration
config.readfp(StringIO(MIN_CONFIG))
# read the tool specific list of config files
cfgfiles = [os.path.expanduser(p) % ENVIRONMENT for p in cfgfiles]
found.extend(config.read(cfgfiles))
# Search for install_type config files
installtypecfg = [os.path.expanduser(p) % ENVIRONMENT for p in INSTALL_TYPE_SEARCH]
foundinstalltype = config.read(installtypecfg)
if not foundinstalltype:
sys.exit('Error: install_type config files not found: %s' % (installtypecfg))
found.extend(foundinstalltype)
cfgfiles.extend(installtypecfg)
if doclientcfg:
clientcfg = [os.path.join(get_tool_dir(), 'wisk.cfg')]
found.extend(config.read(clientcfg))
cfgfiles.extend(clientcfg)
else:
servercfgfiles = [os.path.join(get_tool_dir(), 'wisk_server.cfg')]
if config.get('common', 'install_type', vars={'install_type': None}) != 'local':
servercfgfiles.append(os.path.join(get_tool_dir(), 'wisk_server_%s.cfg' % config.get('common', 'install_type', None)))
servercfgfiles = [os.path.expanduser(i) for i in servercfgfiles]
cfgfiles.extend(servercfgfiles)
found.extend(config.read(servercfgfiles))
if config.get('common', 'install_type', vars={'install_type': None}) == 'local':
localcfgfiles = [os.path.join(get_tool_dir(), 'wisk_local.cfg'),
os.path.join(get_tool_dir(), 'wisk_local_%(instance_name)s.cfg')]
localcfgfiles = [os.path.expanduser(i) for i in localcfgfiles]
cfgfiles.extend(localcfgfiles)
found.extend(config.read(localcfgfiles))
# read the config files specified in WISK_CFG environment variable, used for trouble shooting
env_cfg = None if INSTALLED else os.environ.get('WISK`_CFG', None)
if env_cfg is not None:
cfgfiles.append(env_cfg)
found.extend(config.read([env_cfg]))
# Temp code: Remove after CLIP configured
try:
ENVIRONMENT['log_iso8601_dates'] = config.getboolean('common', 'log_iso8601_dates')
except Error:
ENVIRONMENT['log_iso8601_dates'] = False
except (ParsingError, OSError) as ex:
sys.exit('Error reading/parsing Confg files %s : %s' % (cfgfiles, ex))
ENVIRONMENT['install_type'] = config.get('common', 'install_type', vars={'install_type': None})
not_found = set(cfgfiles) - set(found)
ENVIRONMENT['cfg_found'] = found
ENVIRONMENT['cfg_notfound'] = not_found
return found, not_found
def config_dict(section='common', options=None):
"""
Safely return options from config section as dictionary
"""
cdict = {}
config = ENVIRONMENT.get('config', None)
if config and config.has_section(section):
if options is None:
cdict = {option: value for option, value in config.items(section)}
else:
cdict = {key: config.get(section, key) for key in options if config.has_option(section, key)}
return cdict
def loglevelint2str(llevel):
''' Translate a loglevel string to a loglevel integer '''
loglevels = {
logging.DEBUG: 'debug',
logging.INFO: 'info',
logging.WARNING: 'warning',
logging.ERROR: 'error',
logging.CRITICAL: 'critical'}
return loglevels.get(llevel, 'notset')
def loglevelstr2int(llevel):
''' Translate a loglevel string to a loglevel integer '''
loglevels = {
'debug': logging.DEBUG,
'info': logging.INFO,
'warn': logging.WARNING,
'warning': logging.WARNING,
'error': logging.ERROR,
'critical': logging.CRITICAL}
return loglevels.get(llevel, logging.NOTSET)
def logverbosity2str(verbosity):
''' Return loglevel as a string '''
if verbosity > 3:
verbosity = 3
return ['ERROR', 'WARNING', 'INFO', 'DEBUG'][verbosity]
def loglevel(verbosity):
''' Change log levels if needed '''
if verbosity == 0:
llevel = logging.ERROR
elif verbosity == 1:
llevel = logging.WARNING
elif verbosity == 2:
llevel = logging.INFO
elif verbosity >= 3:
llevel = logging.DEBUG
else:
llevel = logging.DEBUG
ENVIRONMENT['consoleloghandler'].setLevel(llevel)
if hasattr(sys, '_getframe'):
def currentframe():
''' Return Frame '''
# noinspection PyProtectedMember
return sys._getframe(3) # pylint: disable=locally-disabled, protected-access
else:
def currentframe():
"""Return the frame object for the caller's stack frame."""
try:
raise Exception
except Exception: # pylint: disable=locally-disabled, broad-except
return sys.exc_info()[2].tb_frame.f_back
# _SRCFILE = os.path.normcase(currentframe.__code__.co_filename) # pylint: disable=locally-disabled, no-member
#
#
# def findcaller():
# """
# Find the stack frame of the caller so that we can note the source
# file name, line number and function name.
# """
# f = currentframe()
# # On some versions of IronPython, currentframe() returns None if
# # IronPython isn't run with -X:Frames.
# if f is not None:
# f = f.f_back
# rv = "(unknown file)", 0, "(unknown function)"
# while hasattr(f, "f_code"):
# code = f.f_code
# filename = os.path.normcase(code.co_filename)
# if filename != _SRCFILE:
# f = f.f_back
# continue
# if f.f_back is None or not hasattr(f.f_back, "f_code"):
# rv = (code.co_filename, f.f_lineno, '*%s*' % code.co_name)
# else:
# f = f.f_back
# code = f.f_code
# rv = (code.co_filename, f.f_lineno, code.co_name)
# break
# return rv
#
#
# def genemitmethod(console, origemit):
# ''' generate emit method for handlers '''
#
# def emitmethod(self, record):
# ''' emit method for handlers '''
# try:
# thr = self.threads.setdefault(record.thread, dict(isprint=False, levelno=None, record=None))
# tisprint = thr.get('isprint')
# tlevelno = thr.get('levelno')
# trecord = thr.get('record')
# isprint = getattr(record, 'isprint', False)
# if tlevelno != record.levelno or tisprint != isprint:
# trecord = thr.get('record')
# if trecord:
# origemit(self, trecord)
# thr['record'] = None
# thr['isprint'] = isprint
# thr['levelno'] = record.levelno
# if not isprint:
# return origemit(self, record)
# if console:
# return
# trecord = thr.get('record')
# if trecord is not None:
# trecord.msg += record.msg
# else:
# thr['record'] = record
# record.pathname, record.lineno, record.funcName = findcaller()
# if record.msg.endswith('\n'):
# thr['record'].msg = thr['record'].msg[:-1]
# origemit(self, thr['record'])
# thr['record'] = None
# except (KeyboardInterrupt, SystemExit):
# raise
# except Exception: # pylint: disable=locally-disabled, broad-except
# self.handleError(record)
# return emitmethod
#
from logging import StreamHandler
# class StreamHandler(logging.StreamHandler):
# ''' Stream Handler '''
# threads = {}
# emit = genemitmethod(console=True, origemit=logging.StreamHandler.emit)
#
# def __init__(self, *args, **kwargs):
# if isinstance(kwargs.setdefault('stream', sys.stdout), OutputRedirector):
# kwargs['stream'] = kwargs['stream'].filep
#
# super(StreamHandler, self).__init__(*args, **kwargs)
#
#
from logging import FileHandler
# class FileHandler(logging.FileHandler):
# ''' File Handler '''
# threads = {}
# emit = genemitmethod(console=False, origemit=logging.FileHandler.emit)
#
#
from logging.handlers import RotatingFileHandler
# class RotatingFileHandler(logging.handlers.RotatingFileHandler):
# ''' Rotating Filehandler '''
# threads = {}
# emit = genemitmethod(console=False, origemit=logging.handlers.RotatingFileHandler.emit)
#
#
# class OutputRedirector(object):
# """ Wrapper to redirect stdout or stderr """
#
# def __init__(self, filep, logmethod):
# ''' Output Redirector init '''
# self.filep = filep
# self.logmethod = logmethod
#
# def write(self, s):
# ''' Write '''
# self.logmethod(s, extra={'isprint': True})
# self.filep.write(s)
#
# def origwrite(self, s):
# ''' Write data to stream '''
# self.filep.write(s)
#
# def writelines(self, lines):
# ''' Writelines '''
# self.logmethod('\n'.join(lines), extra={'isprint': True})
# self.filep.writelines(lines)
#
# def origwritelines(self, lines):
# ''' Write data to stream '''
# self.filep.writelines(lines)
#
# def flush(self):
# ''' Flush '''
# self.filep.flush()
#
# def isatty(self, *args, **kwargs): # pylint: disable=locally-disabled, unused-argument, no-self-use
# ''' isatty is False when in redirection '''
# return False
def logging_setup(verbosity, corridfilter=None, onlyerrorlogs=False): # pylint: disable=locally-disabled, too-many-statements, too-many-branches
''' Logging Setup '''
global LOGGING_INIT_DONE # pylint: disable=locally-disabled, global-statement
if LOGGING_INIT_DONE:
return
LOGGING_INIT_DONE = True
config = ENVIRONMENT['config']
if onlyerrorlogs:
config.set('common', 'savelogs', 'error')
# All logging is done in UTC for CLIP
os.environ['TZ'] = 'UTC'
time.tzset()
# create file handler which logs with log level specified in config
ENVIRONMENT['logfile'] = config.get('common', 'logfilename')
if not os.path.exists(os.path.dirname(ENVIRONMENT['logfile'])):
dname = os.path.dirname(ENVIRONMENT['logfile'])
try:
os.makedirs(dname)
os.chmod(dname,
os.stat(dname).st_mode |
stat.S_IRGRP | stat.S_IWGRP | stat.S_IROTH | stat.S_IWOTH)
except OSError as e:
sys.exit('Error creating log directory: %s' % e)
logger = logging.getLogger()
logger.setLevel(logging.NOTSET)
verbosity = verbosity if verbosity <= 4 else 4
clidebuglevel = (5 - verbosity) * 10
ENVIRONMENT['verbosity'] = verbosity
# create console handler with a default log level from config and increased by verbosity
consoleloglevel = loglevelstr2int(config.get('common', 'consoleloglevel').lower())
if clidebuglevel < consoleloglevel:
consoleloglevel = clidebuglevel
ENVIRONMENT['consoleloglevel'] = consoleloglevel
doclientcfg = ENVIRONMENT.get('doclientcfg', False)
toolname = ENVIRONMENT.get('toolname', '')
if not corridfilter:
corridfilter = CorrelationIdFilter()
logging.getLogger('').addFilter(corridfilter)
if logger.handlers:
ENVIRONMENT['consoleloghandler'] = logger.handlers[0]
else:
if toolname.startswith('eventlistener'):
ENVIRONMENT['consoleloghandler'] = StreamHandler(stream=sys.stderr)
else:
ENVIRONMENT['consoleloghandler'] = StreamHandler()
ENVIRONMENT['consoleloghandler'].addFilter(corridfilter)
ENVIRONMENT['consoleloghandler'].setLevel(consoleloglevel)
if doclientcfg:
if verbosity >= 4:
cformat = MicroFormatter(VERBOSE_FMT_CORR_ID)
else:
cformat = MicroFormatter(SIMPLE_FMT)
else:
cformat = MicroFormatter(VERBOSE_FMT_CORR_ID) # Server
ENVIRONMENT['consoleloghandler'].setFormatter(cformat)
logger.addHandler(ENVIRONMENT['consoleloghandler'])
if doclientcfg:
fileloglevel = config.get('common', 'fileloglevel').lower()
ENVIRONMENT['fileloglevel'] = logging.DEBUG if fileloglevel == 'debug' else logging.INFO
try:
ENVIRONMENT['fileloghandler'] = FileHandler(ENVIRONMENT['logfile'])
except OSError as e:
sys.exit('Error setting up file logging handler: %s, %s' % (ENVIRONMENT['logfile'], e))
ENVIRONMENT['fileloghandler'].addFilter(corridfilter)
logger.addHandler(ENVIRONMENT['fileloghandler'])
ENVIRONMENT['fileloghandler'].setLevel(ENVIRONMENT['fileloglevel'])
fformat = MicroFormatter(VERBOSE_FMT_CORR_ID)
ENVIRONMENT['fileloghandler'].setFormatter(fformat)
# sys.stdout = OutputRedirector(sys.stdout, printlog.info)
# sys.stderr = OutputRedirector(sys.stderr, printlog.error)
elif toolname.startswith('eventlistener') or toolname.startswith('celery-flower'):
# sys.stderr = OutputRedirector(sys.stderr, printlog.info)
handler = FileHandler(ENVIRONMENT['logfile'])
handler.addFilter(corridfilter)
handler.setLevel(logging.DEBUG)
handler.setFormatter(MicroFormatter(VERBOSE_FMT_CORR_ID))
logger.addHandler(handler)
ENVIRONMENT['fileloghandler'] = handler
ENVIRONMENT['fileloglevel'] = logging.DEBUG
loglevel(verbosity)
log.debug('Incoming Environment: %s', pprint.pformat(dict(os.environ), indent=4))
log.debug('Command line: "%s"', '" "'.join(sys.argv))
log.debug('Workspace- ID: %s', ENVIRONMENT['workspace_id'])
log.debug('Config files read from search path: %s', ENVIRONMENT['cfg_found'])
log.debug('Config files not found in search path: %s', ENVIRONMENT['cfg_notfound'])
log.debug('Environment: %s', pprint.pformat(ENVIRONMENT, indent=4))
def gettoolname(programname, subcommands=0):
''' Get toolname from program name and subcommand '''
nodashargs = [i for i in sys.argv if not i.startswith('-')]
for i, v in enumerate(nodashargs):
if programname in v:
return '-'.join([programname] + nodashargs[i + 1:i + subcommands + 1])
return programname
def env_init(toolname, cfgsearch, doclientcfg=False, **kwargs):
''' Initialize the environment. Read platform information. Read configuration. Setup logging '''
global ENV_INIT_DONE # pylint: disable=locally-disabled, global-statement
if not ENV_INIT_DONE:
ENV_INIT_DONE = True
ENVIRONMENT['toolname'] = toolname
ENVIRONMENT['toolrootname'] = toolname.split('-')[0]
ENVIRONMENT.update(kwargs)
env_siteinfo_update()
ENVIRONMENT['workspace_path'] = workspace_path()
ENVIRONMENT['workspace_id'] = workspace_id()
ENVIRONMENT['workspace_guid'] = workspace_guid()
config_read(cfgsearch, doclientcfg)
celmajor, _ = ENVIRONMENT['OS-Version'].strip().split('.')
if int(celmajor) < 6:
sys.exit('ERROR: Tooling requires CEL 6 or above')
return ENV_INIT_DONE
def exit_clean(err):
''' Cleanup and save logs if error or needed before exiting '''
if err is None:
err = 0
try:
logfile = ENVIRONMENT['logfile']
config = ENVIRONMENT['config']
tlogdir = os.path.expanduser(config.get('common', 'logdir'))
savelogs = config.get('common', 'savelogs').lower()
except (InterpolationMissingOptionError, InterpolationSyntaxError) as ex:
log.info(ex)
return err
except KeyError as ex:
return err
if err != 0 or savelogs == 'all':
if not os.path.exists(logfile):
log.error('Log file does not exist: %s', logfile)
return err
logfilename = os.path.basename(logfile)
tlogfilename = list(os.path.splitext(logfilename))
if err:
tlogfilename.insert(-1, '_error')
tlogdir0 = tlogdir
tlogdir = '%s_error' % tlogdir
if not os.path.exists(tlogdir):
# Don't create if does not exist, would be wrong ID
log.error('Error Log dir does not exist: %s', tlogdir)
tlogdir = tlogdir0
tlogfile = os.path.join(tlogdir, ''.join(tlogfilename))
logmsg = log.info if err == 0 else print
if logfile != tlogfile:
try:
# Try to create hardlink
os.link(logfile, tlogfile)
except OSError as e:
log.warning('Error with hard link of %s to %s: %s', logfile, tlogfile, e)
try:
# Try to create symlink
os.symlink(logfile, tlogfile)
except OSError as e:
log.error('Error creating symlink of %s to %s: %s', logfile, tlogfile, e)
return err
try:
os.chmod(tlogfile,
os.stat(tlogfile).st_mode |
stat.S_IRGRP | stat.S_IWGRP | stat.S_IROTH | stat.S_IWOTH)
except OSError as e:
log.error('Error updating permissions on log files: %s', e)
logmsg('Detailed Logs at %s' % tlogfile)
else:
log.debug('Save Logs on Error is Enabled. Removing success Logfile: %s', logfile)
if os.path.exists(logfile):
os.remove(logfile)
return err
def get_relversion(client):
''' Get the release version of the current software instance '''
if 'rel-version' in ENVIRONMENT:
return ENVIRONMENT['rel-version']
if INSTALLED:
ENVIRONMENT['rel-version'] = os.path.basename(os.path.realpath(INSTALL_ROOT))
return ENVIRONMENT['rel-version']
try:
prefix = 'WISK_CLIENT' if client else 'WISK_SERVER'
ENVIRONMENT['rel-version'] = subprocess.check_output(
['git', 'describe', '--tags', '--match', '%s_[--9A-Z]*' % prefix, '--always', '--abbrev=4', 'HEAD'],
stderr=subprocess.STDOUT).decode('utf-8').strip().replace('_', '.')
except subprocess.CalledProcessError as ex:
log.debug('Could not get servinfo for client: %s', ex)
ENVIRONMENT['rel-version'] = 'unknown-development-version'
return ENVIRONMENT['rel-version']
def get_reldatetime():
''' Get the release date of the current software instance '''
if 'rel-datetime' in ENVIRONMENT:
return ENVIRONMENT['rel-datetime']
ENVIRONMENT['rel-datetime'] = time.ctime(os.path.getmtime(os.path.realpath(INSTALL_ROOT)))
return ENVIRONMENT['rel-datetime']
def get_verbosity(default=None):
''' Get verbosity from the command line '''
if 'verbosity' in ENVIRONMENT:
return ENVIRONMENT['verbosity']
for v in ['-v', '-verbose', '-verbosity', '--verbose', '--verbosity']:
if v in sys.argv:
i = sys.argv.index(v)
try:
return ENVIRONMENT.setdefault('verbosity', int(sys.argv[i + 1]))
except (ValueError, IndexError):
return ENVIRONMENT.setdefault('verbosity', 1)
for i, o in enumerate(sys.argv):
if o.startswith(v):
if '=' in o:
_, rv = o.split('=')
return ENVIRONMENT.setdefault('verbosity', int(rv))
elif o.startswith('-vv'):
return ENVIRONMENT.setdefault('verbosity', int(len(o[1:])))
else:
rv = o.replace(v, '')
return ENVIRONMENT.setdefault('verbosity', int(rv))
return ENVIRONMENT.setdefault('verbosity', default)
def workspace_path():
''' Recognize the root of the workspace if you are in one '''
wspath = os.path.normpath(os.getcwd())
try:
while '.git' not in os.listdir(wspath):
wspath = os.path.split(wspath)[0]
if wspath == '/':
return None
except OSError:
return '/router/bin'
return wspath
def workspace_id():
''' Generate a workspace id unique to the username, host and workspace path'''
wid = '%(username)s_%(Host Name)s_%(workspace_path)s' % (ENVIRONMENT)
wid = wid.replace('/', '.')
wid = wid.replace('_.', '_')
return wid
def workspace_guid():
''' Generate a workspace guid that can be used for Oracle user name '''
# Cleaning up DB after another user will be challenging if workspace_id varies with user calling this routine
ws_id = '%(Host Name)s_%(workspace_path)s' % ENVIRONMENT # Should be unique across cisco
# Add the owner of the workspace so more easily identified
ws_stat = os.stat(INSTALL_ROOT)
ws_uid = ws_stat.st_uid
ws_owner = pwd.getpwuid(ws_uid).pw_name
slug = hashlib.sha1(ws_id.encode('utf-8')).hexdigest()[:10]
guid = '%s_%s' % (ws_owner, slug)
return guid
def get_tool_dir():
''' Get .wisk dir '''
return os.environ.get('TOOL_DIR', os.path.join(os.path.expanduser('~'), '.wisk'))
def get_correlation_header():
""" Retrieve uniqueid / workspace_guid from config """
uniqueid = ENVIRONMENT.get('uniqueid', '')
if not uniqueid:
uniqueid = workspace_guid()
log.error('Missing uniqueid - using workspace_guid: %s', uniqueid)
log.info('Setting HTTP_X_OPERATION_ID header: %s', uniqueid)
correlation_header = {'X-Operation-ID': uniqueid}
return correlation_header
def get_team_from_local():
''' Get default team from home dir ~/.wisk/wisk.cfg '''
config = ENVIRONMENT['config']
if config.has_option('common', 'team'):
return config.get('common', 'team', '')
return None
def cliprint(obj, cindent=4, indent=4):
''' Display Dictionaries, Lists and Scalars in an indented fashion for CLI display'''
retval = ''
if isinstance(obj, dict) or isinstance(obj, collections.OrderedDict):
width = max([len(i) for i in obj.keys()]) + 1
for k in sorted(obj.keys()):
retval = retval + '\n{}{:<{}} '.format(' ' * cindent, k + ':', width) + cliprint(obj[k], cindent + indent)
return retval
elif isinstance(obj, list):
retval = retval + '['
sep = ''
for v in obj:
retval = retval + sep + '{0}'.format(' ' * cindent) + cliprint(v, cindent)
sep = ','
retval = retval + ']'
return retval
else:
retval = ' %s' % obj
retval = retval.replace('\n', '\n' + ' ' * cindent)
return retval
| 1.148438 | 1 |
opencorpora/__init__.py | OpenCorpora/opencorpora-tools | 3 | 101892 | # -*- coding: utf-8 -*-
from __future__ import absolute_import
from .reader import CorpusReader
from .reader_lxml import load
| 0.554688 | 1 |
TheLengthofSequence.py | Arnabsaha6/Snakify | 0 | 102020 | <reponame>Arnabsaha6/Snakify
Code:
len = 0
while int(input()) != 0:
len += 1
print(len) | 1.445313 | 1 |
renderchan/contrib/ffmpeg.py | ave4/RenderChan | 30 | 102148 |
__author__ = '<NAME>'
from renderchan.module import RenderChanModule
from renderchan.utils import which
import subprocess
import os
import random
class RenderChanFfmpegModule(RenderChanModule):
def __init__(self):
RenderChanModule.__init__(self)
self.conf['binary']=self.findBinary("ffmpeg")
self.conf["packetSize"]=0
def getInputFormats(self):
return ["mov", "avi", "mpg", "mp4"]
def getOutputFormats(self):
return ["png"]
def render(self, filename, outputPath, startFrame, endFrame, format, updateCompletion, extraParams={}):
updateCompletion(0.0)
if not os.path.exists(outputPath):
os.mkdir(outputPath)
# TODO: Progress callback
commandline=[self.conf['binary'], "-i", filename, os.path.join(outputPath,"output_%04d.png")]
subprocess.check_call(commandline)
updateCompletion(1.0)
| 1.109375 | 1 |
lib/data/graph_dataset_base.py | shamim-hussain/egt | 7 | 102276 | <reponame>shamim-hussain/egt
from threading import Condition
import tensorflow as tf
from .svd import SVDFeatures
from .eigen_gt import EigenFeatures
from .graph import GraphMatrix
from .pipeline import ExcludeFeatures
class GraphDatasetBase:
def __init__(self,
max_length = None,
**kwargs
):
super().__init__(**kwargs)
self.max_length = max_length
self._inclusion_conditions = {}
def include_if(self, feature_name, condition):
self._inclusion_conditions[feature_name] = condition
def map_data_split(self, split, data):
excluded_feats = []
for feature_name, condition in self._inclusion_conditions.items():
if not condition():
excluded_feats.append(feature_name)
if len(excluded_feats)>0:
return data.map(ExcludeFeatures(excluded_feats))
else:
return data
class MatrixDatasetBase:
def __init__(self ,
normalize = False ,
symmetric = False ,
laplacian = False ,
return_edges = False ,
matrix_pad_value = 0. ,
**kwargs
):
super().__init__(**kwargs)
self.normalize = normalize
self.symmetric = symmetric
self.laplacian = laplacian
self.return_edges = return_edges
self.matrix_pad_value = matrix_pad_value
self.include_if('edges', lambda: self.return_edges)
def load_split(self, split):
db_record = super().load_split(split)
# AT = tf.data.experimental.AUTOTUNE
matrix = GraphMatrix(normalize=self.normalize,
symmetric=self.symmetric,
laplacian=self.laplacian)
mapped_record = db_record.map(matrix)
return mapped_record
def get_paddings(self):
return dict(
**super().get_paddings(),
graph_matrix = self.matrix_pad_value,
)
def get_padded_shapes(self):
return dict(
**super().get_padded_shapes(),
graph_matrix = [self.max_length, self.max_length],
)
class SVDDatasetBase:
def __init__(self,
normalize = False,
symmetric = False,
laplacian = False,
num_features = 16,
norm_for_svd = False,
norm_sym_for_svd = False,
mult_sing_vals = True,
return_mat = False,
return_sing_vals = False,
**kwargs
):
super().__init__(
normalize = normalize ,
symmetric = symmetric ,
laplacian = laplacian ,
**kwargs
)
self.num_features = num_features
self.norm_for_svd = norm_for_svd
self.norm_sym_for_svd = norm_sym_for_svd
self.mult_sing_vals = mult_sing_vals
self.return_mat = return_mat
self.return_sing_vals = return_sing_vals
self.include_if('graph_matrix', lambda: return_mat)
self.include_if('singular_values', lambda: return_sing_vals)
def load_split(self, split):
db_matrix = super().load_split(split)
AT = tf.data.experimental.AUTOTUNE
singular = SVDFeatures(num_features = self.num_features,
mult_sing_vals = self.mult_sing_vals,
norm_first = self.norm_for_svd,
norm_symmetric = self.norm_sym_for_svd)
return db_matrix.map(singular,AT)
def get_paddings(self):
return dict(
**super().get_paddings(),
singular_values = 0.,
singular_vectors = 0.,
)
def get_padded_shapes(self):
return dict(
**super().get_padded_shapes(),
singular_values = [self.num_features],
singular_vectors = [self.max_length, self.num_features, 2],
)
class EigenDatasetBase:
def __init__(self,
num_features = 8,
sparse = True,
**kwargs
):
super().__init__(**kwargs)
self.num_features = num_features
self.sparse = sparse
def load_split(self, split):
db_matrix = super().load_split(split)
AT = tf.data.experimental.AUTOTUNE
eigen = EigenFeatures(num_features=self.num_features,
sparse=self.sparse)
return db_matrix.map(eigen,AT)
def get_paddings(self):
return dict(
**super().get_paddings(),
eigen_vectors = 0.,
)
def get_padded_shapes(self):
return dict(
**super().get_padded_shapes(),
eigen_vectors = [self.max_length, self.num_features],
)
| 1.789063 | 2 |