max_stars_repo_path
stringlengths 4
197
| max_stars_repo_name
stringlengths 6
120
| max_stars_count
int64 0
191k
| id
stringlengths 1
8
| content
stringlengths 6
964k
| score
float64 -0.88
3.95
| int_score
int64 0
4
|
---|---|---|---|---|---|---|
whats_fresh/whats_fresh_api/tests/views/test_stories.py | osu-cass/whats-fresh-api | 4 | 102404 | from django.test import TestCase
from django.core.urlresolvers import reverse
from django.contrib.auth.models import Group
from django.contrib.auth.models import User
import json
class StoriesTestCase(TestCase):
fixtures = ['test_fixtures']
def setUp(self):
user = User.objects.create_user(username='test', password='<PASSWORD>')
admin_group = Group(name='Administration Users')
admin_group.save()
user.groups.add(admin_group)
self.client.post(reverse('login'), {'username': 'test',
'password': '<PASSWORD>'})
self.maxDiff = None
self.expected_json = """
{
"error": {
"status": false,
"name": null,
"text": null,
"debug": null,
"level": null
},
"id": 1,
"name": "Star Wars",
"history": "A long time ago, in a galaxy far, far away...",
"facts": "Star Wars is awesome",
"buying": "Always buy all related products",
"preparing": "Fried",
"products": "Fish",
"season": "Spring",
"ext": {},
"images": [
{"caption": "Woof!", "link": "/media/dog.jpg", "name": "A dog"}
],
"videos": [
{"caption": "Traveling at the speed of light!", "name": "A Starship",
"link": "http://www.youtube.com/watch?v=efgDdSWDg0g"}
],
"created": "2014-08-08T23:27:05.568Z",
"modified": "2014-08-08T23:27:05.568Z"
}"""
self.expected_not_found = """
{
"error": {
"status": true,
"text": "Story id 999 was not found.",
"name": "Story Not Found",
"debug": "DoesNotExist: Story matching query does not exist.",
"level": "Error"
}
}"""
def test_url_endpoint(self):
url = reverse('story-details', kwargs={'id': '1'})
self.assertEqual(url, '/1/stories/1')
def test_story_items(self):
response = self.client.get(
reverse('story-details', kwargs={'id': '1'})).content
parsed_answer = json.loads(response)
expected_answer = json.loads(self.expected_json)
self.assertEqual(parsed_answer, expected_answer)
def test_story_not_found(self):
response = self.client.get(
reverse('story-details', kwargs={'id': '999'}))
parsed_answer = json.loads(response.content)
self.assertEqual(response.status_code, 404)
expected_answer = json.loads(self.expected_not_found)
self.assertEqual(parsed_answer, expected_answer)
| 1.578125 | 2 |
scannerdoesntwork.py | JNBarr/supersimple_pythonhackingtools | 2 | 102532 | <reponame>JNBarr/supersimple_pythonhackingtools
"""
import socket
import struct
import binascii
s = socket.socket(socket.AF_INET, socket.SOCK_RAW, socket. htnos(0x0800))
while True:
packet s.recvfrom(2048)
ethernet_header = packet [0:14]
eth_header = struct.unpack("!6s6s2s", ethernet_header)
print "Destination MAC: " + bianscii.hexlify(eth_header[0]) + " Source MAC:" + binascii.hexlify(eth_header[1]) + " Type:" + binascii
ipheader = pkt[0][14:34]
ip_header = struct.unpack("!12s4s4s", ipheader)
print "Source IP:" + socket.inet_ntoa(ip_header[1]) + " Destination IP:" + socket.inet_ntoa(ip_header[2])
"""
import socket
import struct
import binascii
s = socket.socket(socket.AF_INET, socket.SOCK_RAW, socket. htons(0x0800))
while True:
packet = s.recvfrom(2048)
ethernet_header = packet[0][0:14]
eth_header = struct.unpack("!6s6s2s", ethernet_header)
print "Destination MAC:" + binascii.hexlify(eth_header[0]) + " Source MAC:" + binascii.hexlify(eth_header[1]) + " Type: " + binascii.hexlify(eth_header[2])
ipheader = pkt[0][14:34]
ip_header = struct.unpack("!12s4s4s", ipheader)
print "Source IP:" + socket.inet_ntoa(ip_header[1]) + " Destination IP:" + socket.inet_ntoa(ip_header[2])
| 1.351563 | 1 |
05-containers/namedtuple_example.py | johnehunt/Python3Intro | 1 | 102660 | from collections import namedtuple
# Creates a new tuple subclass called Person
Person = namedtuple('Person', ['age', 'gender', 'name'])
# Can use new subclass to create tuple-like objects
row1 = Person(age=22, gender='male', name='Gryff')
print(row1.age)
row2 = Person(age=22, gender='female', name='Phoebe')
print(row2.name)
| 2.21875 | 2 |
beebole/base.py | Dogeek/beebole | 1 | 102788 | from inspect import Parameter, _ParameterKind, signature, Signature
from typing import Any, Callable, Optional, Tuple, Union, List, Dict
from dacite import Config, from_dict
from requests import Response
from beebole.interfaces.responses import SimpleResponse
from beebole.exceptions import (
BeeboleException, BeeboleAPIException, BeeboleRateLimited,
BeeboleNotFound,
)
from beebole.utils import Sentinel
# TODO: Handle rate limiting
class BaseService:
name: str
config: Optional[Config] = None
def _get_service_name(self, method: Callable) -> str:
'''Gets the name of the service from the method's dunders'''
# Step 1: extract classname and method_name
classname, method_name = method.__qualname__.split('.')
if hasattr(method, '__servicename__'):
method_name = method.__servicename__
# Get the actual service class from its name (using __globals__ is a bit dirty)
service_class: 'BaseService' = method.__globals__.get(classname)
if service_class is None:
# This should never happen in theory...
raise BeeboleException('Service class not found')
# get the name and format it into the final service name, i.e.
# $service_name.$method_name ('person.add_group' for instance)
service_name = service_class.name
return f'{service_name}.{method_name}'
def _add_keyword_argument(
self, argname: str, default: Any, kwargs: dict,
annotation: type,
) -> Tuple[dict, Any]:
'''Add a keyword argument to a method. Handling is done in the decorator'''
value = kwargs.pop(argname, Sentinel())
if isinstance(value, Sentinel):
value = getattr(self, argname, default)
self.extra_args[argname] = Parameter(
argname, _ParameterKind.KEYWORD_ONLY,
default=default, annotation=annotation
)
return kwargs, value
def _parse_api_response(
self, response: Response, out_type: Optional[type] = None
) -> Union[dict, SimpleResponse]:
'''
Parses the API response using the return type of the method.
Raises exceptions when applicable.
'''
body = response.json()
if body['status'] == 'error' or response.status_code != 200:
if response.status_code == 429:
raise BeeboleRateLimited(response)
if response.status_code == 404:
raise BeeboleNotFound(response)
# Raise an exception if the response is an error
raise BeeboleAPIException(response)
# If a return type is set in the annotations, parse the body of the
# response with dacite. The config is set as part of a class attribute
if out_type is not None and isinstance(out_type, type):
return from_dict(
out_type, body,
config=self.config
)
return body
def _do_cast(self, arg: Any, param: Parameter) -> Any:
try:
if isinstance(param.annotation, type):
return param.annotation(arg)
return globals()[param.annotation](arg)
except KeyError:
raise BeeboleException(
f"Can't find type {param.annotation} globally."
)
except Exception:
raise BeeboleException((
f"Can't cast {param.name} into {param.annotation.__name__} "
f"for argument of type {type(arg)} : {arg}"
))
def _cast_input_args_or_raise(
self, args: List[Any], kwargs: Dict[str, Any], signature: Signature
) -> Tuple[List[Any], Dict[str, Any]]:
positional: List[Parameter] = []
keyword: Dict[str, Parameter] = {}
# Split keyword and positional arguments
for param in signature.parameters.values():
if param.kind in (_ParameterKind.POSITIONAL_ONLY, _ParameterKind.VAR_POSITIONAL):
positional.append(param)
elif param.kind in (_ParameterKind.KEYWORD_ONLY, _ParameterKind.VAR_KEYWORD):
keyword[param.name] = param
elif param.kind == _ParameterKind.POSITIONAL_OR_KEYWORD:
if param.name in kwargs:
keyword[param.name] = param
else:
positional.append(param)
# Cast positional arguments
for i, (arg, param) in enumerate(zip(args, positional)):
args[i] = self._do_cast(arg, param)
# Cast keyword arguments
for argname, argvalue in kwargs.items():
kwargs[argname] = self._do_cast(argvalue, keyword[argname])
return args, kwargs
def _requester(self, method: Callable):
'''
Decorator that takes in a service method, and make it do the actual
request. Returns the response as a validated dataclass.
Flow:
- extract service name and method name from __qualname__
- construct the full service name
- fetch the payload by executing the method with its arguments
- add the service key to the payload as fetched previously
- perform the request
- fetch the return type from the method's __annotations__
- cast the response with the return type using dacite.
Set the __doc__, __name__ and __annotations__ attributes on the
wrapped method as to not interfere with static type checkers.
Use the functools.wraps decorator to preserve the original signature.
'''
def wrapped(*args, **kwargs):
service_name = self._get_service_name(method)
# Extract the external keyword argument before running the method
kwargs, is_external = self._add_keyword_argument(
'external', False, kwargs, bool
)
# FIXME: Fix this to support typing.Union, Optional, Enums, etc.
# args, kwargs = self._cast_input_args_or_raise(
# args, kwargs, signature(method)
# )
# Fetch the payload by running the wrapped method.
payload: dict = method(*args, **kwargs)
# Add the service to that payload (required by the API)
payload['service'] = service_name
if is_external and 'id' in payload:
# Handle external IDs
payload['xid'] = payload['id']
del payload['id']
# Perform the request using the client of the service
response: Response = self.client._request(payload)
return self._parse_api_response(
response, method.__annotations__.get('return')
) # Return the raw body otherwise
# Using inspect, get the signature of the original method
sig = signature(method)
# Hack to make the parameters dict mutable
sig._parameters = dict(sig.parameters)
# Add extra arguments to the signature
sig._parameters.update(self.extra_args)
# Finally, set all the documentation attributes of the wrapped method
# to their original values. Helpful to not interfere with static type
# checkers, and the help() built-in function.
wrapped.__annotations__ = method.__annotations__
wrapped.__doc__ = method.__doc__
wrapped.__name__ = method.__name__
wrapped.__signature__ = sig
return wrapped
def __init__(self, client, external: bool = False):
self.client = client
self.external = external
self.extra_args = {}
methods = (m for m in dir(self) if callable(getattr(self, m)))
for method_name in methods:
if method_name.startswith('_'):
# Do not decorate private methods and dunders
continue
method = getattr(self, method_name)
setattr(self, method_name, self._requester(method))
| 1.976563 | 2 |
gopdb/api/wsgi/impl/__init__.py | lolizeppelin/gopdb | 0 | 102916 | # -*- coding:utf-8 -*-
import abc
import six
from gopdb import common
from gopdb import privilegeutils
from gopdb import utils
from gopdb.api import endpoint_session
from gopdb.api import exceptions
from gopdb.models import GopDatabase
from gopdb.models import GopSalveRelation
from gopdb.models import GopSchema
from gopdb.models import SchemaQuote
from simpleservice.ormdb.api import model_count_with_key
from simpleservice.ormdb.api import model_query
from simpleutil.common.exceptions import InvalidArgument
from simpleutil.log import log as logging
from simpleutil.utils import argutils
from sqlalchemy.orm import joinedload
from sqlalchemy.sql import and_
from sqlalchemy.sql import or_
LOG = logging.getLogger(__name__)
MANAGERCACHE = {}
def _impl(database_id):
try:
return MANAGERCACHE[database_id]
except KeyError:
session = endpoint_session(readonly=True)
try:
database = model_query(session, GopDatabase, GopDatabase.database_id == database_id).one()
if database_id not in MANAGERCACHE:
dbmanager = utils.impl_cls('wsgi', database.impl)
MANAGERCACHE.setdefault(database_id, dbmanager)
return MANAGERCACHE[database_id]
finally:
session.close()
def _address(database_ids):
IMPLMAP = {}
NOT_CACHED = []
for database_id in database_ids:
if database_id in MANAGERCACHE:
try:
IMPLMAP[MANAGERCACHE[database_id]].append(database_id)
except KeyError:
IMPLMAP[MANAGERCACHE[database_id]] = [database_id, ]
else:
NOT_CACHED.append(database_id)
if NOT_CACHED:
session = endpoint_session(readonly=True)
query = model_query(session, (GopDatabase.database_id, GopDatabase.impl),
filter=GopDatabase.database_id.in_(NOT_CACHED))
for r in query:
dbmanager = utils.impl_cls('wsgi', r[1])
try:
IMPLMAP[dbmanager].append(r[0])
except KeyError:
IMPLMAP[dbmanager] = [r[0], ]
session.close()
maps = dict()
for dbmanager in IMPLMAP:
maps.update(dbmanager.address(IMPLMAP[dbmanager]))
return maps
@six.add_metaclass(abc.ABCMeta)
class DatabaseManagerBase(object):
# ----------database action-------------
def slaves_address(self, databases):
"""get slve database connect info"""
databases = set(databases)
session = endpoint_session(readonly=True)
query = model_query(session, GopDatabase, filter=GopDatabase.database_id.in_(databases))
databases = query.all()
slaves = set([slave.slave_id for database in databases for slave in database.slaves])
address_maps = self.address(slaves)
results = {}
for database in databases:
results[database.database_id] = []
for slave in database.slaves:
results[database.database_id].append({'database_id': slave.slave_id,
'address': address_maps[slave.slave_id]})
return results
def select_database(self, **kwargs):
dbtype = kwargs.pop('dbtype', 'mysql')
filters = [GopDatabase.status == common.OK, GopDatabase.dbtype == dbtype]
affinitys = kwargs.get('affinitys')
if affinitys:
affinitys = argutils.map_with(affinitys, int)
ors = []
# 通过位运算匹配亲和性
for affinity in affinitys:
ors.append(GopDatabase.affinity.op('&')(affinity) > 0)
if len(ors) > 1:
filters.append(or_(*ors))
else:
filters.append(ors[0])
session = endpoint_session(readonly=True)
query = model_query(session, GopDatabase, filter=and_(*filters))
query = query.options(joinedload(GopDatabase.schemas, innerjoin=False))
results = self._select_database(session, query, dbtype, **kwargs)
# 结果按照亲和性从小到大排序
# 亲和性数值越大,匹配范围越广
results.sort(key=lambda x: x['affinity'])
return results
@abc.abstractmethod
def _select_database(self, session, query, dbtype, **kwargs):
"""select database"""
def reflect_database(self, **kwargs):
session = endpoint_session(readonly=True)
_result = []
with self._reflect_database(session, **kwargs) as filters:
key = filters[0]
_filter = filters[1]
if _filter is None:
return _result
query = model_query(session, GopDatabase, filter=_filter)
for _database in query:
dbinfo = dict(database_id=_database.database_id,
dbtype=_database.dbtype,
slave=_database.slave)
dbinfo.setdefault(key, _database.reflection_id)
_result.append(dbinfo)
return _result
@abc.abstractmethod
def _reflect_database(self, session, **kwargs):
"""impl reflect code"""
def address(self, databases):
session = endpoint_session(readonly=True)
query = model_query(session, GopDatabase,
filter=GopDatabase.database_id.in_(databases))
databases = query.all()
maps = dict()
for database in databases:
maps[database.reflection_id] = database.database_id
return self._address(session, maps)
@abc.abstractmethod
def _address(self, session, dbmaps):
"""impl get database address code"""
def show_database(self, database_id, **kwargs):
"""show database info"""
session = endpoint_session(readonly=True)
quotes = kwargs.pop('quotes', False)
query = model_query(session, GopDatabase, filter=GopDatabase.database_id == database_id)
_database = query.one()
if _database.slave:
schemas = []
# slave will find masters schemas to show
master_ids = model_query(session, GopSalveRelation.master_id,
filter=GopSalveRelation.slave_id == database_id).all()
if master_ids:
query = model_query(session, GopDatabase,
filter=and_(GopDatabase.database_id.in_([m[0] for m in master_ids]),
GopDatabase.slave == 0))
query = query.options(joinedload(GopDatabase.schemas, innerjoin=False))
for m_database in query.all():
schemas.extend(m_database.schemas)
else:
schemas = _database.schemas
if quotes:
quotes = [dict(entity=quote.entity,
endpoint=quote.endpoint,
quote_id=quote.quote_id,
schema_id=quote.schema_id)
for quote in _database.quotes]
else:
quotes = []
_result = dict(database_id=database_id,
impl=_database.impl,
dbtype=_database.dbtype,
dbversion=_database.dbversion,
status=_database.status,
reflection_id=_database.reflection_id,
slave=_database.slave,
affinity=_database.affinity,
schemas=[dict(schema=schema.schema,
schema_id=schema.schema_id
) for schema in schemas],
quotes=quotes)
if _database.slave:
_result.setdefault('masters', master_ids)
else:
# show master database slaves
_result.setdefault('slaves', _database.slaves)
with self._show_database(session, _database, **kwargs) as address:
host = address[0]
port = address[1]
_result.setdefault('host', host)
_result.setdefault('port', port)
return _result
@abc.abstractmethod
def _show_database(self, session, database, **kwargs):
"""impl show code"""
def create_database(self, user, passwd, dbtype, dbversion, affinity, **kwargs):
"""create new database intance"""
session = endpoint_session()
with session.begin():
bond = kwargs.pop('bond', None)
if bond:
query = model_query(session, GopDatabase, filter=GopDatabase.database_id == bond)
bond = query.one_or_none()
if not bond:
raise InvalidArgument('Target slave databse can not be found, can not bond to slave databases')
if not bond.slave:
raise InvalidArgument('Target database is not salve database')
if bond.status != common.OK:
raise InvalidArgument('Targe slave database is not active')
count = model_count_with_key(session, GopSalveRelation,
filter=GopSalveRelation.slave_id == bond.database_id)
if count >= bond.slave:
raise InvalidArgument('Target slave database is full')
else:
bond = None
_database = GopDatabase(user=user, passwd=<PASSWORD>, slave=kwargs.pop('slave'),
dbtype=dbtype, dbversion=dbversion, affinity=affinity,
desc=kwargs.pop('desc', None))
_result = dict(dbversion=_database.dbversion,
slave=_database.slave,
dbtype=_database.dbtype)
with self._create_database(session, _database, bond, **kwargs) as address:
host = address[0]
port = address[1]
_result.setdefault('host', host)
_result.setdefault('port', port)
_result.setdefault('affinity', affinity)
session.add(_database)
session.flush()
if bond:
LOG.info('Add GopSalveRelation for database')
relation = GopSalveRelation(master_id=_database.database_id, slave_id=bond.database_id)
session.add(relation)
session.flush()
self._esure_create(_database, **kwargs)
_result.setdefault('database_id', _database.database_id)
return _result
@abc.abstractmethod
def _create_database(self, session, database, bond, **kwargs):
"""impl create code"""
def _esure_create(self, database, **kwargs):
"""impl esure create result"""
def delete_database(self, database_id, master, **kwargs):
"""delete master database intance"""
session = endpoint_session()
query = model_query(session, GopDatabase, filter=GopDatabase.database_id == database_id)
if master:
query = query.options(joinedload(GopDatabase.schemas, innerjoin=False))
else:
query = query.options(joinedload(GopDatabase.quotes, innerjoin=False))
with session.begin():
_database = query.one()
_result = dict(database_id=_database.database_id, slave=_database.slave,
impl=_database.impl, dbtype=_database.dbtype,
dbversion=_database.dbversion)
# 删除主库
if master:
if _database.slave:
raise exceptions.AcceptableDbError('Target database is a salve database')
if _database.schemas or _database.slaves:
raise exceptions.AcceptableDbError('can not delete master database, slaves or schemas exist')
if model_count_with_key(session, SchemaQuote.qdatabase_id,
filter=SchemaQuote.qdatabase_id == _database.database_id):
raise exceptions.AcceptableDbError('Database in schema quote list')
with self._delete_database(session, _database, **kwargs) as address:
host = address[0]
port = address[1]
_result.setdefault('host', host)
_result.setdefault('port', port)
query.delete()
# 删除从库
else:
if not _database.slave:
raise exceptions.AcceptableDbError('Target database is not a slave database')
if _database.quotes:
raise exceptions.AcceptableDbError('Target slave database in schema quote list')
_masters = [m[0] for m in model_query(session, GopSalveRelation.master_id,
filter=GopSalveRelation.slave_id == database_id).all()]
if _masters:
masters = model_query(session, GopDatabase, filter=and_(GopDatabase.database_id.in_(_masters),
GopDatabase.slave == 0))
if len(_masters) != len(masters):
raise exceptions.UnAcceptableDbError('Target slave database master missed')
raise exceptions.AcceptableDbError('Slave is bond to masters, unbond before delete')
with self._delete_database(session, _database, **kwargs) as address:
query.delete()
host = address[0]
port = address[1]
_result.setdefault('host', host)
_result.setdefault('port', port)
return _result
@abc.abstractmethod
def _delete_database(self, session, database):
"""impl delete database code"""
def start_database(self, database_id, **kwargs):
session = endpoint_session(readonly=True)
query = model_query(session, GopDatabase, filter=GopDatabase.database_id == database_id)
_database = query.one()
if _database.status != common.OK:
raise exceptions.AcceptableDbError('Database is not OK now')
return self._start_database(_database, **kwargs)
@abc.abstractmethod
def _start_database(self, database, **kwargs):
"""impl start a database code"""
def stop_database(self, database_id, **kwargs):
session = endpoint_session(readonly=True)
query = model_query(session, GopDatabase, filter=GopDatabase.database_id == database_id)
_database = query.one()
return self._stop_database(_database, **kwargs)
@abc.abstractmethod
def _stop_database(self, database, **kwargs):
"""impl stop a database code"""
def status_database(self, database_id, **kwargs):
session = endpoint_session(readonly=True)
query = model_query(session, GopDatabase, filter=GopDatabase.database_id == database_id)
_database = query.one()
return self._status_database(_database, **kwargs)
@abc.abstractmethod
def _status_database(self, database, **kwargs):
"""impl status a database code"""
def bond_database(self, database_id, **kwargs):
master_id = kwargs.pop('master')
file = kwargs.get('file')
position = kwargs.get('position')
schemas = kwargs.get('schemas')
if not file or not position:
raise InvalidArgument('Can not bond slave without file and position')
master = None
slave = None
relation = None
session = endpoint_session()
query = model_query(session, GopDatabase, filter=GopDatabase.database_id.in_([database_id, master_id]))
query = query.options(joinedload(GopDatabase.slaves, innerjoin=False))
with session.begin(subtransactions=True):
for database in query:
if database.database_id == database_id:
slave = database
elif database.database_id == master_id:
master = database
# 校验主从
if slave is None or slave.slave <= 0:
raise InvalidArgument('slave database with id %d can not be found' % database_id)
if slave.status != common.OK:
raise InvalidArgument('Slave database is not active')
if master is None or master.slave > 0:
raise InvalidArgument('Master database with id %d can not be found' % master_id)
if master.impl != slave.impl or master.dbtype != slave.dbtype:
raise InvalidArgument('Master and slave not the same type or impl')
_schemas = set([schema.schema for schema in master.schemas])
# 校验master中的schemas是否正确
if set(_schemas) != set(schemas):
raise ValueError('Master schemas info error')
for _relation in master.slaves:
if _relation.slave_id == database_id:
# 找到绑定关系
if _relation.ready:
raise InvalidArgument('Slave already in master slave list')
relation = _relation
break
# 没有绑定关系, 确认从库上限
if not relation:
count = model_count_with_key(session, GopSalveRelation,
filter=GopSalveRelation.slave_id == database_id)
if count >= slave.slave:
raise InvalidArgument('Target slave database is full')
relation = GopSalveRelation(master_id=master.database_id, slave_id=slave.database_id)
session.add(relation)
session.flush()
return self._bond_database(session, master, slave, relation, **kwargs)
@abc.abstractmethod
def _bond_database(self, session, master, slave, relation, **kwargs):
"""impl bond slave database"""
@abc.abstractmethod
def _revoke_database_user(self, database, auth, **kwargs):
"""impl unbond slave database"""
def unbond_database(self, database_id, **kwargs):
master_id = kwargs.pop('master')
force = kwargs.get('force')
master = None
slave = None
relation = None
session = endpoint_session()
query = model_query(session, GopDatabase, filter=GopDatabase.database_id.in_([database_id, master_id]))
query = query.options(joinedload(GopDatabase.slaves, innerjoin=False))
with session.begin(subtransactions=True):
for database in query:
if database.database_id == database_id:
slave = database
elif database.database_id == master_id:
master = database
# 校验主从
if slave is None or slave.slave <= 0:
raise InvalidArgument('slave database with id %d can not be found' % database_id)
if slave.status != common.OK:
raise InvalidArgument('Slave database is not active')
if master is None or master.slave > 0:
raise InvalidArgument('Master database with id %d can not be found' % master_id)
if master.impl != slave.impl or master.dbtype != slave.dbtype:
raise InvalidArgument('Master and slave not the same type or impl')
for _relation in master.slaves:
if _relation.slave_id == database_id:
# 找到绑定关系
relation = _relation
break
if not relation:
raise InvalidArgument('Target slave database no relation with %d' % master.database_id)
if master.schemas and not force:
raise InvalidArgument('Schemas in master database, can not unbond without argv force')
return self._unbond_database(session, master, slave, relation, **kwargs)
@abc.abstractmethod
def _unbond_database(self, session, master, slave, relation, **kwargs):
"""impl unbond slave database"""
def slave_database(self, database_id, **kwargs):
slave_id = kwargs.pop('slave')
file = kwargs.get('file')
position = kwargs.get('position')
master = None
slave = None
session = endpoint_session()
query = model_query(session, GopDatabase, filter=GopDatabase.database_id.in_([database_id, slave_id]))
query = query.options(joinedload(GopDatabase.slaves, innerjoin=False))
with session.begin(subtransactions=True):
for database in query:
if database.database_id == database_id:
master = database
elif database.database_id == slave_id:
slave = database
# 校验主从
if slave is None or slave.slave <= 0:
raise InvalidArgument('slave database with id %d can not be found' % slave_id)
if slave.status != common.OK:
raise InvalidArgument('Slave database is not active')
if master is None or master.slave > 0:
raise InvalidArgument('Master database with id %d can not be found' % database_id)
if master.impl != slave.impl or master.dbtype != slave.dbtype:
raise InvalidArgument('Master and slave not the same type or impl')
schemas = [schema.schema for schema in master.schemas]
# master中有schemas
if schemas:
if not file or not position:
raise InvalidArgument('Can not bond slave without file and position')
for _relation in master.slaves:
# 找到绑定关系
if _relation.slave_id == slave_id:
raise InvalidArgument('Slave already in master slave list')
# 没有绑定关系, 确认从库上限
count = model_count_with_key(session, GopSalveRelation,
filter=GopSalveRelation.slave_id == slave_id)
if count >= slave.slave:
raise InvalidArgument('Target slave database is full')
kwargs['schemas'] = schemas
return self._slave_database(session, master, slave, **kwargs)
@abc.abstractmethod
def _slave_database(self, session, master, slave, **kwargs):
"""impl unbond slave database"""
def ready_relation(self, database_id, **kwargs):
slave_id = kwargs.pop('slave')
force = kwargs.pop('force')
session = endpoint_session()
query = model_query(session, GopSalveRelation, filter=and_(GopSalveRelation.master_id == database_id,
GopSalveRelation.slave_id == slave_id))
with session.begin(subtransactions=True):
relation = query.one_or_none()
if not relation:
raise InvalidArgument('Can not find relation of master %d slave %d' % (database_id, slave_id))
if relation.ready:
return None
if force:
relation.ready = True
return None
query = model_query(session, GopDatabase, filter=GopDatabase.database_id.in_([database_id, slave_id]))
query = query.options(joinedload(GopDatabase.schemas, innerjoin=False))
for database in query:
if database.database_id == database_id:
master = database
elif database.database_id == slave_id:
slave = database
return self._ready_relation(session, master, slave, relation, **kwargs)
@abc.abstractmethod
def _ready_relation(self, session, master, slave, relation, **kwargs):
"""impl check ready"""
# ----------schema action-------------
def show_schema(self, database_id, schema, **kwargs):
"""show schema info"""
session = endpoint_session()
query = model_query(session, GopSchema, filter=and_(GopSchema.database_id == database_id,
GopSchema.schema == schema))
secret = kwargs.pop('secret', False)
show_quotes = kwargs.pop('quotes', False)
if show_quotes:
query = query.options(joinedload(GopSchema.quotes, innerjoin=False))
_schema = query.one_or_none()
if not _schema:
raise exceptions.AcceptableSchemaError('Schema not not be found in %d' % database_id)
_database = _schema.database
if _database.slave:
raise exceptions.AcceptableDbError('Database is slave, can not get schema')
_result = dict(database_id=database_id,
impl=_database.impl,
dbtype=_database.dbtype,
dbversion=_database.dbversion,
schema=_schema.schema,
schema_id=_schema.schema_id,
desc=_schema.desc)
if show_quotes:
_result.setdefault('quotes', [dict(quote_id=_quote.quote_id, desc=_quote.desc)
for _quote in _schema.quotes])
if secret:
_result.update({'user': _schema.user,
'passwd': _schema.passwd,
'ro_user': _schema.ro_user,
'ro_passwd': _schema.ro_passwd})
with self._show_schema(session, _database, _schema, **kwargs) as address:
host = address[0]
port = address[1]
_result.setdefault('host', host)
_result.setdefault('port', port)
return _result
@abc.abstractmethod
def _show_schema(self, session, database, schema, **kwargs):
"""impl show schema code"""
def create_schema(self, database_id, schema, auth, options, **kwargs):
"""create new schema intance on reflection_id"""
auths = privilegeutils.mysql_privileges(auth)
bond = kwargs.get('bond')
affinity = kwargs.get('affinity', None)
session = endpoint_session()
query = model_query(session, GopDatabase, filter=GopDatabase.database_id == database_id)
query = query.options(joinedload(GopDatabase.schemas, innerjoin=False))
quote_id = 0
with session.begin():
_database = query.one()
_result = dict(database_id=database_id, impl=_database.impl,
dbtype=_database.dbtype, dbversion=_database.dbversion)
if _database.slave:
raise exceptions.AcceptableDbError('Database is slave, can not create schema')
if _database.status != common.OK:
raise exceptions.AcceptableDbError('Database is not OK now')
if affinity is not None and (_database.affinity & affinity) == 0:
raise exceptions.AcceptableDbError('Database affinity not match')
schemas = [_schema.schema for _schema in _database.schemas]
if schema in schemas:
raise exceptions.AcceptableDbError('Duplicate schema name %s' % schema)
options = options or {'character_set': 'utf8'}
with self._create_schema(session, _database, schema, auths, options, **kwargs) as address:
gop_schema = GopSchema(schema=schema,
database_id=_database.database_id,
user=auth.get('user'),
passwd=auth.get('passwd'),
ro_user=auth.get('ro_user'),
ro_passwd=auth.get('ro_passwd'),
source=auth.get('source') or '%',
rosource=auth.get('rosource') or '%',
character_set=options.get('character_set'),
collation_type=options.get('collation_type'))
session.add(gop_schema)
session.flush()
if bond:
_quote = SchemaQuote(schema_id=gop_schema.schema_id,
qdatabase_id=_database.database_id,
entity=bond.get('entity'),
endpoint=bond.get('endpoint'),
desc=bond.get('desc'))
session.add(_quote)
session.flush()
quote_id = _quote.quote_id
host = address[0]
port = address[1]
_result.setdefault('host', host)
_result.setdefault('port', port)
_result.setdefault('character_set', options.get('character_set'))
_result.setdefault('collation_type', options.get('collation_type'))
_result.setdefault('schema_id', gop_schema.schema_id)
_result.setdefault('schema', gop_schema.schema)
_result.setdefault('quote_id', quote_id)
return _result
@abc.abstractmethod
def _create_schema(self, session, database, schema, auths, options, **kwargs):
"""impl create new schema code"""
def copy_schema(self, src_database_id, src_schema,
dst_database_id, dst_schema, auth,
**kwargs):
"""create a schema"""
auths = privilegeutils.mysql_privileges(auth)
session = endpoint_session()
query = model_query(session, GopDatabase,
filter=GopDatabase.database_id.in_([src_database_id, dst_database_id]))
query = query.options(joinedload(GopDatabase.schemas, innerjoin=False))
src_database, dst_database = None, None
for _database in query.all():
if _database.database_id == src_database_id:
if _database.slave:
raise exceptions.AcceptableDbError('Source database is not master')
if not _database.passwd:
raise exceptions.AcceptableDbError('Source database has no passwd, can not copy')
schemas = [_schema.name for _schema in _database.schemas]
if src_schema not in schemas:
raise exceptions.AcceptableSchemaError('Source schemas %s not exist' % src_schema)
src_database = _database
elif _database.database_id == dst_database_id:
if _database.slave:
raise exceptions.AcceptableDbError('Destination database is not master')
if not _database.passwd:
raise exceptions.AcceptableDbError('Destination database has no passwd, can not copy')
schemas = [_schema.name for _schema in _database.schemas]
if dst_schema in schemas:
raise exceptions.AcceptableSchemaError('Destination schemas %s alreday exist' % dst_schema)
dst_database = _database
if not src_database or not dst_database:
raise
_result = dict(database_id=dst_database.database_id,
impl=dst_database.impl, dbtype=dst_database.dbtype,
dbversion=dst_database.dbversion)
with session.begin():
with self._copy_schema(session,
src_database, src_schema,
dst_database, dst_schema,
auths, **kwargs) as options:
character_set = options[0] or 'utf8'
collation_type = options[1]
gop_schema = GopSchema(schema=dst_schema,
database_id=dst_database.database_id,
user=auth.get('user'),
passwd=<PASSWORD>'),
ro_user=auth.get('ro_user'),
ro_passwd=<PASSWORD>('ro_<PASSWORD>'),
source=auth.get('source'),
character_set=character_set,
collation=collation_type)
session.add(gop_schema)
session.flush()
_result.setdefault('schema_id', gop_schema.schema_id)
_result.setdefault('schema', gop_schema.schema)
return _result
@abc.abstractmethod
def _copy_schema(self, session,
src_database, src_schema,
dst_database, dst_schema,
auths, **kwargs):
"""impl copy schema code"""
def delete_schema(self, database_id, schema, **kwargs):
"""delete schema intance on reflection_id"""
unquotes = set(kwargs.get('unquotes', []))
ignores = set(kwargs.get('ignores', []))
force = kwargs.get('force', False)
session = endpoint_session()
query = model_query(session, GopDatabase, filter=GopDatabase.database_id == database_id)
query = query.options(joinedload(GopDatabase.schemas, innerjoin=False))
with session.begin():
_database = query.one()
_result = dict(database_id=_database.database_id,
impl=_database.impl, dbtype=_database.dbtype,
dbversion=_database.dbversion)
if _database.slave:
raise exceptions.AcceptableDbError('can not delete schema from slave database')
squery = model_query(session, GopSchema, filter=and_(GopSchema.schema == schema,
GopSchema.database_id == database_id))
squery = squery.options(joinedload(GopSchema.quotes, innerjoin=False))
_schema = squery.one()
_result.setdefault('schema_id', _schema.schema_id)
_slaves = [_slave.slave_id for _slave in _database.slaves]
_slaves_q_query = model_query(session, SchemaQuote,
filter=and_(SchemaQuote.schema_id == _schema.schema_id,
SchemaQuote.qdatabase_id.in_(_slaves)))
quotes = {}
# quote of slave
slave_quotes = _slaves_q_query.all()
for _quotes_list in (slave_quotes, _schema.quotes):
if _quotes_list:
for _quote in _quotes_list:
quotes[_quote.quote_id] = _quote.desc
# check quotes
for quote_id, desc in quotes.items():
if quote_id in unquotes:
quotes.pop(quote_id, None)
if desc in ignores:
quotes.pop(quote_id, None)
if quotes:
if force:
for quote_id, desc in quotes.items():
LOG.warning('Quote %d: [%s] force delete' % (quote_id, desc))
else:
raise exceptions.AcceptableSchemaError('Schema in quote, can not be delete')
with self._delete_schema(session, _database, _schema, **kwargs) as address:
host = address[0]
port = address[1]
_result.setdefault('host', host)
_result.setdefault('port', port)
_result.setdefault('schema', schema)
squery.delete()
return _result
@abc.abstractmethod
def _delete_schema(self, session, database, schema, **kwargs):
"""impl delete schema intance code"""
| 1.164063 | 1 |
supplemental_content/migrations/0008_auto_20211005_1906.py | PhilR8/cmcs-eregulations | 6 | 103044 | <gh_stars>1-10
# Generated by Django 3.2.7 on 2021-10-05 19:06
import django.core.validators
from django.db import migrations, models
from django.db.migrations.operations.fields import RemoveField
import django.db.models.deletion
from supplemental_content.models import AbstractModel
def make_category(id, title, description, order):
return {
"id": id,
"title": title,
"description": description,
"order": order,
"children": [],
}
def migrate_categories(apps, schema_editor):
OldCategory = apps.get_model("supplemental_content", "OldCategory")
Category = apps.get_model("supplemental_content", "Category")
SubCategory = apps.get_model("supplemental_content", "SubCategory")
# no cases of 3-level depth before now, so deal with 2 levels only
old_categories = OldCategory.objects.all()
parent_categories = [i for i in old_categories if i.parent is None]
child_categories = [i for i in old_categories if i.parent is not None]
new_categories = {}
# construct tree of old parent categories
for category in parent_categories:
new_categories[category.id] = make_category(
category.id, category.title, category.description, category.order
)
# append child categories
for child in child_categories:
try:
new_categories[child.parent.id]["children"].append(make_category(
child.id, child.title, child.description, child.order
))
except KeyError:
pass
# create new category objects
for category in list(new_categories.values()):
parent = Category.objects.create(
old_id=category["id"],
name=category["title"],
description=category["description"],
order=category["order"],
)
for child in category["children"]:
SubCategory.objects.create(
old_id=child["id"],
name=child["title"],
description=child["description"],
order=child["order"],
parent=parent,
)
def migrate_sections(apps, schema_editor):
OldRegulationSection = apps.get_model("supplemental_content", "OldRegulationSection")
Section = apps.get_model("supplemental_content", "Section")
for section in OldRegulationSection.objects.all():
Section.objects.create(
title=int(section.title),
part=int(section.part),
section_id=int(section.section),
old_id=section.id,
)
def migrate_supplemental_content(apps, schema_editor):
OldSupplementaryContent = apps.get_model("supplemental_content", "OldSupplementaryContent")
SupplementalContent = apps.get_model("supplemental_content", "SupplementalContent")
AbstractCategory = apps.get_model("supplemental_content", "AbstractCategory")
Section = apps.get_model("supplemental_content", "Section")
for content in OldSupplementaryContent.objects.all():
# acquire category from old ID
new_category = None
try:
if content.category:
new_category = AbstractCategory.objects.filter(old_id=content.category.id)[0]
except IndexError:
pass
# acquire list of sections from old ID's
new_sections = []
if content.sections:
for section in content.sections.all():
try:
new_sections.append(
Section.objects.filter(old_id=section.id)[0]
)
except IndexError:
pass
# build new supplemental content object
new_content = SupplementalContent.objects.create(
name=content.title,
description=content.description,
url=content.url,
date=content.date,
approved=content.approved,
created_at=content.created_at,
updated_at=content.updated_at,
category=new_category,
)
new_content.locations.set(new_sections)
new_content.save()
class Migration(migrations.Migration):
dependencies = [
('supplemental_content', '0007_auto_20210831_1612'),
]
operations = [
migrations.RenameModel(
old_name='Category',
new_name='OldCategory',
),
migrations.RenameModel(
old_name='RegulationSection',
new_name='OldRegulationSection',
),
migrations.RenameModel(
old_name='SupplementaryContent',
new_name='OldSupplementaryContent',
),
migrations.CreateModel(
name='AbstractCategory',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=512, unique=True)),
('description', models.TextField(blank=True, null=True)),
('order', models.IntegerField(blank=True, default=0)),
('show_if_empty', models.BooleanField(default=False)),
('old_id', models.IntegerField()),
],
bases=(models.Model, AbstractModel),
),
migrations.CreateModel(
name='AbstractLocation',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.IntegerField()),
('part', models.IntegerField()),
],
bases=(models.Model, AbstractModel),
),
migrations.CreateModel(
name='AbstractSupplementalContent',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created_at', models.DateTimeField(auto_now_add=True)),
('updated_at', models.DateTimeField(auto_now=True)),
('approved', models.BooleanField(default=False)),
('category', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='supplemental_content', to='supplemental_content.abstractcategory')),
('locations', models.ManyToManyField(blank=True, null=True, related_name='supplemental_content', to='supplemental_content.AbstractLocation')),
],
bases=(models.Model, AbstractModel),
),
migrations.CreateModel(
name='Category',
fields=[
('abstractcategory_ptr', models.OneToOneField(auto_created=True, on_delete=django.db.models.deletion.CASCADE, parent_link=True, primary_key=True, serialize=False, to='supplemental_content.abstractcategory')),
],
options={
'verbose_name': 'Category',
'verbose_name_plural': 'Categories',
},
bases=('supplemental_content.abstractcategory',),
),
migrations.CreateModel(
name='Section',
fields=[
('abstractlocation_ptr', models.OneToOneField(auto_created=True, on_delete=django.db.models.deletion.CASCADE, parent_link=True, primary_key=True, serialize=False, to='supplemental_content.abstractlocation')),
('section_id', models.IntegerField()),
('parent', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='children', to='supplemental_content.abstractlocation')),
('old_id', models.IntegerField()),
],
options={
'verbose_name': 'Section',
'verbose_name_plural': 'Sections',
},
bases=('supplemental_content.abstractlocation',),
),
migrations.CreateModel(
name='SubCategory',
fields=[
('abstractcategory_ptr', models.OneToOneField(auto_created=True, on_delete=django.db.models.deletion.CASCADE, parent_link=True, primary_key=True, serialize=False, to='supplemental_content.abstractcategory')),
('parent', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='sub_categories', to='supplemental_content.category')),
],
options={
'verbose_name': 'Sub-category',
'verbose_name_plural': 'Sub-categories',
},
bases=('supplemental_content.abstractcategory',),
),
migrations.CreateModel(
name='SubjectGroup',
fields=[
('abstractlocation_ptr', models.OneToOneField(auto_created=True, on_delete=django.db.models.deletion.CASCADE, parent_link=True, primary_key=True, serialize=False, to='supplemental_content.abstractlocation')),
('subject_group_id', models.CharField(max_length=512)),
],
options={
'verbose_name': 'Subject Group',
'verbose_name_plural': 'Subject Groups',
},
bases=('supplemental_content.abstractlocation',),
),
migrations.CreateModel(
name='Subpart',
fields=[
('abstractlocation_ptr', models.OneToOneField(auto_created=True, on_delete=django.db.models.deletion.CASCADE, parent_link=True, primary_key=True, serialize=False, to='supplemental_content.abstractlocation')),
('subpart_id', models.CharField(max_length=12)),
],
options={
'verbose_name': 'Subpart',
'verbose_name_plural': 'Subparts',
},
bases=('supplemental_content.abstractlocation',),
),
migrations.CreateModel(
name='SubSubCategory',
fields=[
('abstractcategory_ptr', models.OneToOneField(auto_created=True, on_delete=django.db.models.deletion.CASCADE, parent_link=True, primary_key=True, serialize=False, to='supplemental_content.abstractcategory')),
('parent', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='sub_sub_categories', to='supplemental_content.subcategory')),
],
options={
'verbose_name': 'Sub-sub-category',
'verbose_name_plural': 'Sub-sub-categories',
},
bases=('supplemental_content.abstractcategory',),
),
migrations.CreateModel(
name='SupplementalContent',
fields=[
('abstractsupplementalcontent_ptr', models.OneToOneField(auto_created=True, on_delete=django.db.models.deletion.CASCADE, parent_link=True, primary_key=True, serialize=False, to='supplemental_content.abstractsupplementalcontent')),
('name', models.CharField(blank=True, max_length=512, null=True)),
('description', models.TextField(blank=True, null=True)),
('url', models.URLField(blank=True, max_length=512, null=True)),
('date', models.CharField(blank=True, help_text='Leave blank or enter one of: "YYYY", "YYYY-MM", or "YYYY-MM-DD".', max_length=10, null=True, validators=[django.core.validators.RegexValidator(message='Date field must be blank or of format "YYYY", "YYYY-MM", or "YYYY-MM-DD"! For example: 2021, 2021-01, or 2021-01-31.', regex='^\\d{4}((-(0[1-9]|1[0-2])-(0[1-9]|[12][0-9]|3[01]))|(-(0[1-9]|1[0-2])))?$')])),
],
options={
'verbose_name': 'Supplemental Content',
'verbose_name_plural': 'Supplemental Content',
},
bases=('supplemental_content.abstractsupplementalcontent',),
),
migrations.RunPython(migrate_sections),
migrations.RunPython(migrate_categories),
migrations.RunPython(migrate_supplemental_content),
migrations.AlterModelOptions(
name='section',
options={'ordering': ['title', 'part', 'section_id'], 'verbose_name': 'Section', 'verbose_name_plural': 'Sections'},
),
migrations.AlterModelOptions(
name='subjectgroup',
options={'ordering': ['title', 'part', 'subject_group_id'], 'verbose_name': 'Subject Group', 'verbose_name_plural': 'Subject Groups'},
),
migrations.AlterModelOptions(
name='subpart',
options={'ordering': ['title', 'part', 'subpart_id'], 'verbose_name': 'Subpart', 'verbose_name_plural': 'Subparts'},
),
migrations.AlterModelOptions(
name='abstractlocation',
options={'ordering': ['title', 'part', 'section__section_id', 'subpart__subpart_id', 'subjectgroup__subject_group_id']},
),
migrations.RemoveField(
model_name='AbstractCategory',
name='old_id',
),
migrations.RemoveField(
model_name='Section',
name='old_id',
),
migrations.AlterUniqueTogether(
name='oldregulationsection',
unique_together=None,
),
migrations.RemoveField(
model_name='oldregulationsection',
name='supplementary_content',
),
migrations.RemoveField(
model_name='oldsupplementarycontent',
name='category',
),
migrations.DeleteModel(
name='OldCategory',
),
migrations.DeleteModel(
name='OldRegulationSection',
),
migrations.DeleteModel(
name='OldSupplementaryContent',
),
]
| 1.5 | 2 |
src/api/jobnavi/views/task_log_views.py | Chromico/bk-base | 84 | 103172 | # -*- coding: utf-8 -*-
"""
Tencent is pleased to support the open source community by making BK-BASE 蓝鲸基础平台 available.
Copyright (C) 2021 THL A29 Limited, a Tencent company. All rights reserved.
BK-BASE 蓝鲸基础平台 is licensed under the MIT License.
License for BK-BASE 蓝鲸基础平台:
--------------------------------------------------------------------
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated
documentation files (the "Software"), to deal in the Software without restriction, including without limitation
the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software,
and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all copies or substantial
portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT
LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN
NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
"""
import json
from rest_framework.response import Response
from django.conf import settings
from common.decorators import params_valid, detail_route
from common.log import sys_logger
from common.views import APIViewSet
from jobnavi.api_helpers.resourcecenter.resourcecenter_helper import ResourcecenterHelper
from jobnavi.api.jobnavi_api import JobNaviApi
from jobnavi.exception.exceptions import InterfaceError
from jobnavi.views.serializers import (
RetrieveTaskLogSerializer,
CommonSerializer,
)
class TaskLogViewSet(APIViewSet):
"""
@apiDefine task_log
任务日志API
"""
lookup_field = "execute_id"
lookup_value_regex = "\\d+"
@params_valid(serializer=RetrieveTaskLogSerializer)
def retrieve(self, request, cluster_id, execute_id, params):
"""
@api {get} /jobnavi/cluster/:cluster_id/task_log/:execute_id/?begin=X&end=X 获取任务日志内容
@apiName retrieve_task_log
@apiGroup task_log
@apiParam {long} execute_id 任务执行记录ID
@apiParam {int} begin 读取开始偏移量(字节)
@apiParam {int} end 读取结束偏移量(字节)(结果不包含end位置字节)
@apiParamExample {json} 参数样例:
{
"begin": 0,
"end": 520
}
@apiSuccessExample {json} 成功返回
HTTP/1.1 200 OK
{
"message": "ok",
"code": "1500200",
"data": {
"lines_begin": 0,
"lines_end": 512,
"lines": "log_content"
},
"result": true
}
"""
geog_area_code = params["tags"][0]
jobnavi = JobNaviApi(geog_area_code, cluster_id)
begin = request.GET.get("begin")
end = request.GET.get("end")
result = jobnavi.retrieve_task_log_info(execute_id)
if not result or not result.is_success():
raise InterfaceError(message=result.message)
info = json.loads(result.data)
log_url = info["url"]
aggregate_time = info["aggregate_time"]
result = jobnavi.retrieve_task_log(execute_id, begin, end, log_url, aggregate_time)
if not result or not result.is_success():
raise InterfaceError(message=result.message)
else:
return Response(json.loads(result.data))
@detail_route(methods=["get"], url_path="query_file_size")
@params_valid(serializer=CommonSerializer)
def query_file_size(self, request, cluster_id, execute_id, params):
"""
@api {get} /jobnavi/cluster/:cluster_id/task_log/:execute_id/query_file_size/ 查询日志文件大小
@apiName query_file_size
@apiGroup task_log
@apiParamExample {json} 参数样例:
{
}
@apiSuccessExample {json} 成功返回
HTTP/1.1 200 OK
{
"message": "ok",
"code": "1500200",
"data": {
"file_size" : 1024,
"status" : "finished"
},
"result": true
}
"""
geog_area_code = params["tags"][0]
jobnavi = JobNaviApi(geog_area_code, cluster_id)
result = jobnavi.retrieve_task_log_info(execute_id)
if not result or not result.is_success():
raise InterfaceError(message=result.message)
info = json.loads(result.data)
log_url = info["url"]
aggregate_time = info["aggregate_time"]
result = jobnavi.query_task_log_file_size(execute_id, log_url, aggregate_time)
if not result or not result.is_success():
raise InterfaceError(message=result.message)
file_size = result.data
result = jobnavi.get_execute_status(execute_id)
if not result or not result.is_success():
raise InterfaceError(message=result.message)
status = json.loads(result.data)["status"]
data = {"file_size": file_size, "status": status}
return Response(data)
@detail_route(methods=["get"], url_path="query_application_id")
@params_valid(serializer=CommonSerializer)
def query_application_id(self, request, cluster_id, execute_id, params):
"""
@api {get} /jobnavi/cluster/:cluster_id/task_log/:execute_id/query_application_id/ 查询任务application ID
@apiName query_application_id
@apiGroup task_log
@apiParamExample {json} 参数样例:
{
}
@apiSuccessExample {json} 成功返回
HTTP/1.1 200 OK
{
"message": "ok",
"code": "1500200",
"data": "application_XXX_XXX",
"result": true
}
"""
try:
resourcecenter = ResourcecenterHelper()
resource_type = settings.JOBNAVI_RESOURCE_TYPE
cluster_type = settings.JOBNAVI_CLUSTER_TYPE
# query job submit ID for given execute ID
cluster_instances = resourcecenter.query_job_submit_instances(resource_type, cluster_type, execute_id)
submit_id = None
for instance in cluster_instances:
# get latest submit
if submit_id is None or ("submit_id" in instance and int(instance["submit_id"]) > submit_id):
submit_id = int(instance["submit_id"])
if submit_id is not None:
processing_resource_type = settings.PROCESSING_RESOURCE_TYPE
# retrieve application ID of given execute from resource center
applications = resourcecenter.retrieve_job_submit_instances(submit_id, processing_resource_type)
if isinstance(applications, list) and len(applications) > 0:
if "inst_id" in applications[0]:
return Response(applications[0]["inst_id"])
except Exception as e:
sys_logger.exception("从资源系统查询任务application ID异常: %s" % e)
# application ID not found in resource center, extract from task log
geog_area_code = params["tags"][0]
jobnavi = JobNaviApi(geog_area_code, cluster_id)
result = jobnavi.retrieve_task_log_info(execute_id)
if not result or not result.is_success():
raise InterfaceError(message=result.message)
info = json.loads(result.data)
log_url = info["url"]
aggregate_time = info["aggregate_time"]
result = jobnavi.query_application_id(execute_id, log_url, aggregate_time)
if not result or not result.is_success():
raise InterfaceError(message=result.message)
else:
return Response(result.data)
| 1.234375 | 1 |
Ryu_Application/authenticators/dot1xforwarder/rest.py | MrLeeang/ACLSwitch | 0 | 103300 | """
This file contains the REST class for the 802.1X controller.
"""
# Copyright (C) 2012 Nippon Telegraph and Telephone Corporation.
# Copyright (C) 2012 <NAME> <yamahata at private email ne jp>
# Copyright (C) 2014 <NAME> < joe at wand net nz >
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import logging
import socket
from webob import Response
from ryu.app.wsgi import ControllerBase
class UserController(ControllerBase):
"""The REST class, that accepts requests for the user that is tryin to authenticate using 802.1X
"""
def __init__(self, req, link, data, **config):
super(UserController, self).__init__(req, link, data, **config)
self.dpList = data
min_lvl = logging.DEBUG
console_handler = logging.StreamHandler()
console_handler.setLevel(min_lvl)
#formatter = logging.Formatter("%(asctime)s - %(levelname)s - "
# "%(name)s - %(message)s")
formatter = logging.Formatter("%(levelname)s - %(name)s - %("
"message)s")
console_handler.setFormatter(formatter)
logging_config = {"min_lvl": min_lvl, "propagate":
False, "handler": console_handler}
self._logging = logging.getLogger(__name__)
self._logging.setLevel(logging_config["min_lvl"])
self._logging.propagate = logging_config["propagate"]
self._logging.addHandler(logging_config["handler"])
self._logging.info("Started Dot1XForwarder's REST interface...");
@staticmethod
def register(wsgi):
s = wsgi.mapper.submapper(controller=UserController)
s.connect('idle', '/idle', action='idle_post',
conditions=dict(method=['POST']))
s.connect('auth', '/authenticate/auth', action="authenticate_post", conditions=dict(method=['POST']))
s.connect('auth', '/authenticate/auth', action="authenticate_delete", conditions=dict(method=['DELETE']))
def authenticate_post(self, req, **kwargs):
try:
authJSON = json.loads(req.body)
except:
return Response(status=400, body="Unable to parse JSON")
self._logging.info("POST with JSON, MAC: %s, User: %s", authJSON['mac'], authJSON['user'])
self.dpList.new_client(authJSON['mac'], authJSON['user'])
return Response(status=200)
def idle_post(self, req, **_kwargs):
"""the REST endpoint for an HTTP POST when the client has been idle.
"""
try:
authJSON = json.loads(req.body)
except:
return Response(status=400, body="Unable to parse JSON")
mac = authJSON['mac']
user = authJSON['user']
retrans = authJSON['retrans']
self._logging.info("retrans: %s, MAC: %s, user: %s", str(retrans), mac, user)
self.dpList.idle_mac(mac, retrans)
@staticmethod
def validate(address):
"""is the ip address given an actual IP address.
"""
try:
socket.inet_aton(address)
return True
except:
return False
def authenticate_delete(self, req, **_kwargs):
try:
authJSON = json.loads(req.body)
except:
return Response(status=400, body="Unable to parse JSON")
# TODO
user = authJSON['user']
mac = authJSON['mac']
self.dpList.log_client_off(mac, user)
self._logging.info("User %s at mac %s should now logged off.", user, mac)
return Response(status=200)
| 1.5625 | 2 |
apps/logs/tests/test_api_importbatch.py | techlib/czechelib-stats | 1 | 103428 | import pytest
from django.urls import reverse
from logs.models import ImportBatch
from test_fixtures.entities.credentials import CredentialsFactory
from test_fixtures.entities.fetchattempts import FetchAttemptFactory
from test_fixtures.entities.logs import ManualDataUploadFactory, ImportBatchFullFactory
from test_fixtures.entities.scheduler import FetchIntentionFactory
from test_fixtures.scenarios.basic import (
users,
report_types,
counter_report_types,
organizations,
platforms,
data_sources,
clients,
identities,
credentials,
) # noqa
@pytest.mark.django_db
class TestImportBatchesAPI:
lookup_url = reverse('import-batch-lookup')
purge_url = reverse('import-batch-purge')
@pytest.fixture()
def data(
self, report_types, counter_report_types, organizations, platforms, clients, credentials
):
FetchIntentionFactory(
start_date="2020-01-01",
end_date="2020-01-31",
credentials=credentials["standalone_tr"],
counter_report=counter_report_types["tr"],
attempt=FetchAttemptFactory(
start_date="2020-01-01",
end_date="2020-01-31",
error_code="3031",
credentials=credentials["standalone_tr"],
counter_report=counter_report_types["tr"],
import_batch=None,
),
)
FetchIntentionFactory(
start_date="2020-01-01",
end_date="2020-01-31",
credentials=credentials["standalone_tr"],
counter_report=counter_report_types["tr"],
attempt=FetchAttemptFactory(
start_date="2020-01-01",
end_date="2020-01-31",
credentials=credentials["standalone_tr"],
counter_report=counter_report_types["tr"],
import_batch=ImportBatchFullFactory(
date="2020-01-01",
organization=organizations["standalone"],
platform=platforms["standalone"],
report_type=report_types["tr"],
),
),
)
FetchIntentionFactory(
start_date="2020-02-01",
end_date="2020-02-29",
credentials=credentials["standalone_tr"],
counter_report=counter_report_types["tr"],
attempt=FetchAttemptFactory(
start_date="2020-02-01",
end_date="2020-02-29",
credentials=credentials["standalone_tr"],
counter_report=counter_report_types["tr"],
import_batch=ImportBatchFullFactory(
date="2020-02-01",
organization=organizations["standalone"],
platform=platforms["standalone"],
report_type=report_types["tr"],
),
),
)
FetchIntentionFactory(
start_date="2020-02-01",
end_date="2020-02-29",
credentials=credentials["standalone_br1_jr1"],
counter_report=counter_report_types["br1"],
attempt=FetchAttemptFactory(
start_date="2020-02-01",
end_date="2020-02-29",
credentials=credentials["standalone_br1_jr1"],
counter_report=counter_report_types["br1"],
import_batch=ImportBatchFullFactory(
date="2020-02-01",
organization=organizations["standalone"],
platform=platforms["standalone"],
report_type=report_types["br1"],
),
),
)
FetchIntentionFactory(
start_date="2020-01-01",
end_date="2020-01-31",
credentials=credentials["branch_pr"],
counter_report=counter_report_types["pr"],
attempt=FetchAttemptFactory(
start_date="2020-01-01",
end_date="2020-01-31",
credentials=credentials["branch_pr"],
counter_report=counter_report_types["pr"],
import_batch=ImportBatchFullFactory(
date="2020-01-01",
organization=organizations["branch"],
platform=platforms["branch"],
report_type=report_types["pr"],
),
),
)
ManualDataUploadFactory(
organization=organizations["standalone"],
platform=platforms["standalone"],
report_type=report_types["tr"],
import_batches=(
ImportBatchFullFactory(
date="2020-03-01",
organization=organizations["standalone"],
platform=platforms["standalone"],
report_type=report_types["tr"],
),
ImportBatchFullFactory(
date="2020-04-01",
organization=organizations["standalone"],
platform=platforms["standalone"],
report_type=report_types["tr"],
),
),
)
ManualDataUploadFactory(
organization=organizations["branch"],
platform=platforms["branch"],
report_type=report_types["pr"],
import_batches=(
ImportBatchFullFactory(
date="2020-02-01",
organization=organizations["branch"],
platform=platforms["branch"],
report_type=report_types["pr"],
),
ImportBatchFullFactory(
date="2020-03-01",
organization=organizations["branch"],
platform=platforms["branch"],
report_type=report_types["pr"],
),
ImportBatchFullFactory(
date="2020-04-01",
organization=organizations["branch"],
platform=platforms["branch"],
report_type=report_types["pr"],
),
),
)
def test_lookup(self, data, clients, organizations, platforms, report_types):
# empty lookup
resp = clients["su"].post(self.lookup_url, data=[], format="json")
assert resp.status_code == 200
assert resp.data == []
resp = clients["su"].post(
f"{self.lookup_url}?order_by=date",
[
{
"organization": organizations["standalone"].pk,
"platform": platforms["standalone"].pk,
"report_type": report_types["tr"].pk,
"months": ["2020-02-01", "2020-03-01"],
}
],
format="json",
)
assert resp.status_code == 200
assert len(resp.data) == 2
assert resp.data[0]["date"] == "2020-02-01"
assert resp.data[0]["report_type"]["pk"] == report_types["tr"].pk
assert resp.data[0]["organization"]["pk"] == organizations["standalone"].pk
assert resp.data[0]["platform"]["pk"] == platforms["standalone"].pk
assert not resp.data[0]["mdu"]
assert resp.data[0]["sushifetchattempt"]
assert resp.data[1]["date"] == "2020-03-01"
assert resp.data[1]["report_type"]["pk"] == report_types["tr"].pk
assert resp.data[1]["organization"]["pk"] == organizations["standalone"].pk
assert resp.data[1]["platform"]["pk"] == platforms["standalone"].pk
assert resp.data[1]["mdu"]
assert not resp.data[1]["sushifetchattempt"]
resp = clients["su"].post(
f"{self.lookup_url}?order_by=date",
[
{
"organization": organizations["standalone"].pk,
"platform": platforms["standalone"].pk,
"report_type": report_types["br1"].pk,
"months": ["2020-01-02", "2020-02-01", "2020-03-01"],
},
{
"organization": organizations["branch"].pk,
"platform": platforms["branch"].pk,
"report_type": report_types["pr"].pk,
"months": ["2020-03-01"],
},
],
format="json",
)
assert resp.status_code == 200
assert len(resp.data) == 2
assert resp.data[0]["date"] == "2020-02-01"
assert resp.data[0]["report_type"]["pk"] == report_types["br1"].pk
assert resp.data[0]["organization"]["pk"] == organizations["standalone"].pk
assert resp.data[0]["platform"]["pk"] == platforms["standalone"].pk
assert not resp.data[0]["mdu"]
assert resp.data[0]["sushifetchattempt"]
assert resp.data[1]["date"] == "2020-03-01"
assert resp.data[1]["report_type"]["pk"] == report_types["pr"].pk
assert resp.data[1]["organization"]["pk"] == organizations["branch"].pk
assert resp.data[1]["platform"]["pk"] == platforms["branch"].pk
assert resp.data[1]["mdu"]
assert not resp.data[1]["sushifetchattempt"]
def test_purge(self, data, clients):
# simple delete
batches = list(ImportBatch.objects.all().order_by('pk').values_list("pk", flat=True))
resp = clients["su"].post(self.purge_url, {"batches": [batches[0]]}, format="json")
assert resp.status_code == 200
assert resp.data == {
'logs.AccessLog': 20,
'sushi.SushiFetchAttempt': 2,
'scheduler.FetchIntention': 2,
'logs.ImportBatch': 1,
}, "remove ib with fetch attempt (second is 3031)"
assert ImportBatch.objects.count() == len(batches) - 1
resp = clients["su"].post(self.purge_url, {"batches": [batches[0]]}, format="json")
assert resp.data == {}, "retry same request - nothing deleted"
assert ImportBatch.objects.count() == len(batches) - 1
resp = clients["su"].post(self.purge_url, {"batches": [batches[-1]]}, format="json")
assert resp.status_code == 200
assert resp.data == {
'logs.AccessLog': 20,
'logs.ImportBatch': 1,
'logs.ManualDataUploadImportBatch': 1,
}, "remove ib from mdu"
assert ImportBatch.objects.count() == len(batches) - 2
resp = clients["su"].post(self.purge_url, {"batches": batches[-3:]}, format="json")
assert resp.status_code == 200
assert resp.data == {
'logs.AccessLog': 40,
'logs.ImportBatch': 2,
'logs.ManualDataUpload': 1,
'logs.ManualDataUploadImportBatch': 2,
}, "remove last ibs from mdu"
assert ImportBatch.objects.count() == len(batches) - 4
resp = clients["su"].post(self.purge_url, {"batches": batches}, format="json")
assert resp.status_code == 200
assert resp.data == {
'logs.AccessLog': 100,
'logs.ImportBatch': 5,
'logs.ManualDataUpload': 1,
'logs.ManualDataUploadImportBatch': 2,
'scheduler.FetchIntention': 3,
'sushi.SushiFetchAttempt': 3,
}, "remove rest"
assert ImportBatch.objects.count() == 0
def test_lookup_and_purge(self, data, clients, organizations, platforms, report_types):
# lookup
resp = clients["su"].post(
f"{self.lookup_url}?order_by=date",
[
{
"organization": organizations["standalone"].pk,
"platform": platforms["standalone"].pk,
"report_type": report_types["br1"].pk,
"months": ["2020-01-01", "2020-02-01", "2020-03-01"],
},
{
"organization": organizations["branch"].pk,
"platform": platforms["branch"].pk,
"report_type": report_types["pr"].pk,
"months": ["2020-01-01", "2020-02-01", "2020-03-01"],
},
],
format="json",
)
assert resp.status_code == 200
data = resp.data
count = ImportBatch.objects.count()
resp = clients["su"].post(
self.purge_url, {"batches": [e["pk"] for e in data]}, format="json"
)
assert resp.status_code == 200
assert resp.data['logs.ImportBatch'] == len(data), "number of deleted ibs"
assert ImportBatch.objects.count() == count - len(data), "all ibs were deleted"
| 1.179688 | 1 |
project/cfpq/cfpq.py | achains/formal-lang-course | 0 | 103556 | <reponame>achains/formal-lang-course<gh_stars>0
from networkx import MultiDiGraph
from pyformlang.cfg import CFG, Variable
from typing import Set, Tuple, Callable
from project.grammars.hellings import hellings
from project.grammars.matrix_based import matrix_based
from project.grammars.tensor_based import tensor_based
from project.utils.cfg_utils import transform_cfg_to_wcnf, is_wcnf
__all__ = ["cfpq_hellings", "cfpq_matrix", "cfpq_tensor"]
def _filter_pairs(
pairs: Set[Tuple[int, int]],
start_nodes: Set[int] = None,
final_nodes: Set[int] = None,
) -> Set[Tuple[int, int]]:
"""
Filter pairs.
Keep pairs where first node in start nodes and second node in final nodes
Parameters
----------
pairs: Set[Tuple[int, int]]
Pairs obtained from cfpq algorithm
start_nodes: Set[int], default = None
Start nodes
final_nodes: Set[int], default = None
Final nodes
Returns
-------
filtered_pairs: Set[Tuple[int, int]]
Filtered pairs according to start and final nodes
"""
if start_nodes:
pairs = {(u, v) for u, v in pairs if u in start_nodes}
if final_nodes:
pairs = {(u, v) for u, v in pairs if v in final_nodes}
return pairs
def _cfpq(
graph: MultiDiGraph,
cfg: CFG,
start_nodes: Set[int] = None,
final_nodes: Set[int] = None,
start_var: Variable = Variable("S"),
algorithm: Callable = hellings,
) -> Set[Tuple[int, int]]:
"""
Context-Free Path Querying function
Available algorithms:
1. hellings
2. matrix_based
3. [WIP] tensor
Parameters
----------
graph: MultiDiGraph
Labeled graph for the Path Querying task
cfg: CFG
Query given in Context Free Grammar form
start_nodes: set, default=None
Set of graph start nodes
final_nodes: set, default=None
Set of graph final nodes
start_var: Variable, default=Variable("S")
Start variable of a grammar
Returns
-------
cfpq: Set[Tuple[int, int]]
Context Free Path Querying
"""
cfg._start_symbol = start_var
wcnf = cfg if is_wcnf(cfg) else transform_cfg_to_wcnf(cfg)
reach_pairs = {
(u, v) for u, h, v in algorithm(wcnf, graph) if h == wcnf.start_symbol
}
return _filter_pairs(reach_pairs, start_nodes, final_nodes)
def cfpq_hellings(
graph: MultiDiGraph,
cfg: CFG,
start_nodes: Set[int] = None,
final_nodes: Set[int] = None,
start_var: Variable = Variable("S"),
) -> Set[Tuple[int, int]]:
"""
Context-Free Path Querying based on Hellings Algorithm
Parameters
----------
graph: MultiDiGraph
Labeled graph for the Path Querying task
cfg: CFG
Query given in Context Free Grammar form
start_nodes: Set[int], default=None
Set of graph start nodes
final_nodes: Set[int], default=None
Set of graph final nodes
start_var: Variable, default=Variable("S")
Start variable of a grammar
Returns
-------
cfpq: Set[Tuple[int, int]]
Context Free Path Querying
"""
return _cfpq(graph, cfg, start_nodes, final_nodes, start_var, algorithm=hellings)
def cfpq_matrix(
graph: MultiDiGraph,
cfg: CFG,
start_nodes: Set[int] = None,
final_nodes: Set[int] = None,
start_var: Variable = Variable("S"),
) -> Set[Tuple[int, int]]:
"""
Context-Free Path Querying based on Matrix Multiplication
Parameters
----------
graph: MultiDiGraph
Labeled graph for the Path Querying task
cfg: CFG
Query given in Context Free Grammar form
start_nodes: Set[int], default=None
Set of graph start nodes
final_nodes: Set[int], default=None
Set of graph final nodes
start_var: Variable, default=Variable("S")
Start variable of a grammar
Returns
-------
cfpq: Set[Tuple[int, int]]
Context Free Path Querying
"""
return _cfpq(
graph, cfg, start_nodes, final_nodes, start_var, algorithm=matrix_based
)
def cfpq_tensor(
graph: MultiDiGraph,
cfg: CFG,
start_nodes: Set[int] = None,
final_nodes: Set[int] = None,
start_var: Variable = Variable("S"),
) -> Set[Tuple[int, int]]:
"""
Context-Free Path Querying based on Kronecker product
Parameters
----------
graph: MultiDiGraph
Labeled graph for the Path Querying task
cfg: CFG
Query given in Context Free Grammar form
start_nodes: Set[int], default=None
Set of graph start nodes
final_nodes: Set[int], default=None
Set of graph final nodes
start_var: Variable, default=Variable("S")
Start variable of a grammar
Returns
-------
cfpq: Set[Tuple[int, int]]
Context Free Path Querying
"""
return _cfpq(
graph, cfg, start_nodes, final_nodes, start_var, algorithm=tensor_based
)
| 2.734375 | 3 |
bin/libsw/wordpress.py | Rondore/sitewrangler | 0 | 103684 | #!/usr/bin/env python3
import os
import time
import subprocess
import random
import inquirer
import stat
import wget
from libsw import php, nginx, user, bind, cert, db, settings, input_util
from getpass import getpass
from mysql import connector
from pwd import getpwnam
def list_installations():
"""
List all domains with a valid WordPress installation
"""
sites = nginx.enabled_sites()
wp_sites = []
for site in sites:
if is_wordpress_installation(site):
wp_sites.append(site)
return wp_sites
def select_installation(query_message):
"""
Have the user select from a list of all domains with enabled vhost files.
Args:
query_message - The message to display to the user in the prompt
"""
domain_list = list_installations()
questions = [
inquirer.List('f',
message=query_message,
choices=domain_list
)
]
domain = inquirer.prompt(questions)['f']
return domain
def is_wordpress_installation(domain):
"""
Check if a domain has a valid WordPress installation
Args:
domain - The domain associated with the installation
"""
sys_user = nginx.user_from_domain(domain)
webroot = user.webroot(sys_user)
if os.path.exists(webroot + 'wp-content') and \
os.path.exists(webroot + 'wp-includes') and \
os.path.exists(webroot + 'wp-config.php'):
return True
return False
def wp_cron_disabled(domain):
"""
Check if a domain has it's built in cron disabled
Args:
domain - The domain associated with the installation
"""
sys_user = nginx.user_from_domain(domain)
webroot = user.webroot(sys_user)
output = subprocess.getoutput('sudo -u "' + sys_user + '" -i wp config get --path="' + webroot + '" DISABLE_WP_CRON')
output = output.lower()
if output == 'true':
return True
return False
def sys_cron_enabled(domain):
"""
Check if a domain has a system cron
Args:
domain - The domain associated with the installation
"""
sys_user = nginx.user_from_domain(domain)
output = subprocess.getoutput('sudo -u "' + sys_user + '" -i crontab -l 2>/dev/null | grep -Ev "^[ \s]*#"')
if output.find('/wp-cron.php') == -1:
return False
return True
# for line in output:
# if line.endswith('/wp-cron.php'):
# return True
# return False
def get_version(domain):
"""
Check the WordPress version for a domain
Args:
domain - The domain associated with the installation
"""
sys_user = nginx.user_from_domain(domain)
webroot = user.webroot(sys_user)
return subprocess.getoutput('sudo -u "' + sys_user + '" -i wp core version --path="' + webroot + '"')
def get_db_info(sys_user, webroot=False):
"""
Get the database name, user and password for an existing WordPress
installation.
Args:
sys_user - The system user that the WorpPress site is stored in
webroot - (optional) the webroot for the WordPress installation
"""
if webroot == False:
webroot = user.webroot(sys_user)
db_user = subprocess.getoutput('sudo -u "' + sys_user + '" -i wp config get --path="' + webroot + '" DB_USER')
name = subprocess.getoutput('sudo -u "' + sys_user + '" -i wp config get --path="' + webroot + '" DB_NAME')
password = subprocess.getoutput('sudo -u "' + sys_user + '" -i wp config get --path="' + webroot + '" DB_PASSWORD')
return (name, db_user, password)
def update_config(sys_user, db_name, db_user, db_password, path=False):
"""
Update the database name, user and password for a WordPress installation.
Args:
sys_user - The system user that the WorpPress site is stored in
db_name - The new database name
db_user - The new database user
db_password - The new database password
path - (optional) the webroot for the WordPress installation
"""
if path == False:
path = user.home_dir(sys_user) + 'public_html/'
set_config_value('DB_USER', db_user, sys_user, path)
set_config_value('DB_PASSWORD', db_password, sys_user, path)
set_config_value('DB_NAME', db_name, sys_user, path)
def set_config_value(name, value, sys_user, path):
"""
Update a text value in a WordPress installation's configuraiton file.
Args:
sys_user - The system user that the WorpPress site is stored in
db_name - The new database name
db_user - The new database user
db_password - <PASSWORD>
path - (optional) the webroot for the WordPress installation
"""
if path == False:
path = user.home_dir(sys_user) + 'public_html/'
os.system('sudo -u "' + sys_user + '" -i wp config set ' +
' --path="' + path + '" "' + name + '" "' + value + '"')
def install_files(sys_user, db_name, db_user, db_password, path=False):
"""
Download Wordpress for a given system user. Then set the database name, user
and password for the new WordPress installation.
Args:
sys_user - The system user that the WorpPress site is stored in
db_name - The existing database name
db_user - The existing database user
db_password - <PASSWORD>
path - (optional) the webroot for the WordPress installation
"""
if path == False:
path = user.home_dir(sys_user) + 'public_html/'
# Set environment
pwd = os.getcwd()
whoami = os.geteuid()
os.seteuid(getpwnam(sys_user).pw_uid)
os.chdir(path)
# Download WordPress
os.system("su - '" + sys_user + "' -c \"wp core download --path='" + path + "'\"")
# Configure WordPress
command = "su - '" + sys_user + "' -c \"wp config create --skip-check" + \
" --path='" + path + "'" + \
" --dbname='" + db_name + "'" + \
" --dbuser='" + db_user + "'" + \
" --dbpass='" + db_password + "'" + \
"\""
print(command)
os.system(command)
# Reset environment
os.seteuid(whoami)
os.chdir(pwd)
def cert_try_loop(domain, username):
"""
Try up to 5 times to get a certificate for a domain.
Args:
domain - The domain to generate the certificate for
username - The system user the domain belongs to
"""
cert_try = 0
no_cert = True
time.sleep(2)
while no_cert and cert_try < 5:
cert_try += 1
no_cert = not cert.create_std_le_certs(domain, username)
if no_cert:
wait = 30 * cert_try
print('Cert Failed. Waiting ' + str(wait) + ' seconds and then trying again...')
time.sleep(wait)
if no_cert:
cert_try += 1
no_cert = not cert.create_std_le_certs(domain, username)
if no_cert:
print('Cert Failed. Investigate, then wait at least 30 seconds; then to try again run: sw nginx addssl ' + domain)
return not no_cert
def make_site(username, domain, php_version, db_conn):
"""
Create a new WordPress website.
Args:
username - The existing system user for the site
domain - The domain name for the site
php_version - The php version to user for the site (subversion)
db_conn - An open database connection with rights to create the database
and user
"""
user.make_user(username)
bind.make_zone(domain)
bind.rebuild_zone_index()
nginx.make_vhost(username, domain)
php.make_vhost(username, domain, php_version)
database_name = username[:18]
database_user = database_name
database_pass = input_util.random_string()
print("Setting db password to: " + database_pass)
db.create_database_with_user(database_name, database_user, database_pass, db_conn)
install_files(username, database_name, database_user, database_pass)
has_cert = cert_try_loop(domain, username)
if has_cert:
nginx.add_ssl_to_site_hosts(domain)
return has_cert
def clone_site(old_site, new_user, new_domain, db_conn):
"""
Create a new WordPress website cloned from an existing site.
Args:
old_site - The domain name for the site to be cloned from
new_user - The non-existing system user for the cloned site
new_domain - The domain name for the cloned site
db_conn - An open database connection with rights to create the database
and user
"""
php_version = php.get_site_version(old_site)
old_user = php.user_from_domain(old_site)
user.make_user(new_user)
bind.make_zone(new_domain)
bind.rebuild_zone_index()
nginx.make_vhost(new_user, new_domain)
for rule_id in nginx.get_bypassed_modsec_rules(old_site):
nginx.bypass_modsec_rule(new_domain, rule_id)
php.make_vhost(new_user, new_domain, php_version)
db_name = new_user[:18]
db_user = db_name
db_pass = input_util.random_string(20, False)
print("Setting db password to: " + db_pass)
db.create_database_with_user(db_name, db_user, db_pass, db_conn)
old_db_user, old_db, old_pass = get_db_info(old_user)
db.clone(old_db, db_name, db_conn)
old_dir = user.webroot(old_user)
new_dir = user.webroot(new_user)
print('Copying site files...')
os.system("cp -a '" + old_dir + ".' '" + new_dir + "'")
print('Copy complete, fixing permissions...')
os.system("find '" + new_dir + "' -user '" + old_user + "' -exec chown '" + new_user + "' {} \;")
os.system("find '" + new_dir + "' -group '" + old_user + "' -exec chgrp '" + new_user + "' {} \;")
print('Permissions fixed')
os.system("sed -i 's~" + old_dir + "~" + new_dir + "~g' " + new_dir + "wp-config.php")
update_config(new_user, db_name, db_user, db_pass)
os.system("sudo -u '" + new_user + "' -i wp search-replace --path='" + new_dir + "' '" + old_site + "' '" + new_domain + "'")
os.system("sudo -u '" + new_user + "' -i wp cache flush --path='" + new_dir + "'")
has_cert = cert_try_loop(new_domain, new_user)
if has_cert:
nginx.add_ssl_to_site_hosts(new_domain)
return has_cert
def wizard_make_site():
"""
Create a new WordPress site, promting the user for all needed information.
"""
print('Your domain should already be using this server as it\'s nameservers.')
print('Wait at least five minutes after changing nameservers to continue with this script.')
username = user.select_new_username()
domain = input('New Domain: ')
php_version = php.select_version()
mydb = db.get_connection()
is_ssl = make_site(username, domain, php_version, mydb)
add_cron(username)
protocol = 'http'
if is_ssl:
protocol = 'https'
print('Now go to ' + protocol + '://' + domain + ' to complete the WordPress setup wizard.')
def wizard_clone_site():
"""
Clone an existing WordPress site, promting the user for all needed
information.
"""
print('Enter information for new site:')
new_user = user.select_new_username()
new_domain = input('New Domain: ')
old_site = php.select_conf('Select site to clone from: ')['file']
mydb = db.get_connection()
is_ssl = clone_site(old_site, new_user, new_domain, mydb)
add_cron(new_user)
protocol = 'http'
if is_ssl:
protocol = 'https'
print('Now go to ' + protocol + '://' + new_domain + ' to check the cloned site.')
def add_cron(sys_user):
"""
Disable the fake cron job in WordPress and create a real one with the system
cron daemon. (speeds up page loads)
"""
user_info = getpwnam(sys_user)
crons = subprocess.getoutput("su - " + sys_user + " -c 'crontab -l 2>/dev/null'")
found = False
for line in crons:
if line.startswith('#'):
continue
if line.find('wp-cron.php') != -1:
found = True
break
if not found:
minute = random.randint(0,59)
cron = str(minute) + ' 0 * * * ~/.local/bin/php ~/public_html/wp-cron.php'
command = "su - " + sys_user + " -c \"crontab -l 2>/dev/null | { cat; echo '" + cron + "'; } | crontab -\" "
#print(command)
subprocess.getoutput(command)
print('Created system cron')
subprocess.getoutput("su - " + sys_user + " -c \"wp config set --path='~/public_html/' 'DISABLE_WP_CRON' true\" ")
print('Disabled WordPress cron')
return not found
def create_one_time_login(domain):
"""
Create a PHP file to give a one-time login into a WordPress site without a
password. There is no safty measure to remove this link if it is not used.
Args:
domain - The domain that needs a one-time login
"""
sys_user = nginx.user_from_domain(domain)
passcode = input_util.random_string(40, False)
passname = input_util.random_string(40, False)
docroot = nginx.docroot_from_domain(domain)
target_file = docroot + 'wp-admin/wp-autologin-' + passname + '.php'
site_url = get_site_url(sys_user, docroot)
# Set environment
whoami = os.geteuid()
os.seteuid(getpwnam(sys_user).pw_uid)
with open(settings.get('install_path') + 'etc/wp-autologin.php', 'r') as template:
with open(target_file, 'w') as php_file:
for line in template:
line = line.replace('PASSWORDD', passcode, 10000)
php_file.write(line)
# Reset environment
os.seteuid(whoami)
print('Go to: ' + site_url + 'wp-admin/wp-autologin-' + passname + '.php?pass=' + passcode)
def get_outdated(domain):
docroot = nginx.docroot_from_domain(domain)
sys_user = nginx.user_from_domain(domain)
core = subprocess.getoutput("su - " + sys_user + " -c 'wp core check-update --path=\"" + docroot + "\" --fields=update_type --format=csv 2>/dev/null | tail -n +2'")
themes = subprocess.getoutput("su - " + sys_user + " -c 'wp theme list --path=\"" + docroot + "\" --update=available --fields=name --format=csv 2>/dev/null | tail -n +2'")
plugins = subprocess.getoutput("su - " + sys_user + " -c 'wp plugin list --path=\"" + docroot + "\" --update=available --fields=name --format=csv 2>/dev/null | tail -n +2'")
return [core, themes.splitlines(), plugins.splitlines()]
def get_site_option(sys_user, docroot, option):
value = subprocess.getoutput("su - " + sys_user + " -c 'wp option get " + option + " --path=\"" + docroot + "\"' ")
return value
def get_site_url(sys_user, docroot):
url = get_site_option(sys_user, docroot, 'siteurl')
if url[-1] != '/':
url += '/'
return url
def get_site_home(sys_user, docroot):
home = get_site_option(sys_user, docroot, 'home')
if home[-1] != '/':
home += '/'
return home
def install_wp_cli():
install_directory = '/opt/wp-cli/'
download_url = 'https://raw.githubusercontent.com/wp-cli/builds/gh-pages/phar/wp-cli.phar'
save_file = install_directory + 'wp-cli.phar'
bin_path = '/usr/local/bin/wp'
if not os.path.exists(install_directory):
os.makedirs(install_directory)
wget.download(download_url, save_file)
old_mode = os.stat(save_file)
os.chmod(save_file, old_mode.st_mode | stat.S_IEXEC)
if not os.path.exists(bin_path):
os.makedirs('/usr/local/bin')
os.symlink(save_file, bin_path)
| 1.960938 | 2 |
src/bnn/util/plotting.py | beauCoker/variable_selection | 0 | 103812 | <reponame>beauCoker/variable_selection<filename>src/bnn/util/plotting.py
# standard library imports
# package imports
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
| 1.015625 | 1 |
observatory/lib/pyvcs/utils.py | natestedman/Observatory | 1 | 103940 | from difflib import unified_diff
from lib.pyvcs.exceptions import FileDoesNotExist
def generate_unified_diff(repository, changed_files, commit1, commit2):
diffs = []
for file_name in changed_files:
try:
file1 = repository.file_contents(file_name, commit1)
except FileDoesNotExist:
file1 = ''
try:
file2 = repository.file_contents(file_name, commit2)
except FileDoesNotExist:
file2 = ''
diffs.append(unified_diff(
file1.splitlines(), file2.splitlines(), fromfile=file_name,
tofile=file_name, fromfiledate=commit1, tofiledate=commit2
))
return '\n'.join('\n'.join(map(lambda s: s.rstrip('\n'), diff)) for diff in diffs)
| 1.539063 | 2 |
library/interface.py | gillenbrown/library | 0 | 104068 | <reponame>gillenbrown/library<filename>library/interface.py
from pathlib import Path
from PySide2.QtCore import Qt, QEvent
from PySide2.QtGui import (
QKeySequence,
QFontDatabase,
QFont,
QDesktopServices,
QGuiApplication,
)
from PySide2.QtWidgets import (
QApplication,
QWidget,
QMainWindow,
QAction,
QVBoxLayout,
QHBoxLayout,
QLabel,
QScrollArea,
QSplitter,
QLineEdit,
QPushButton,
QLayout,
QFileDialog,
QCheckBox,
)
from library.database import PaperAlreadyInDatabaseError
class LeftPanelTag(QLabel):
"""
Class holding a tag that goes in the left panel
"""
def __init__(self, tagName, papersList):
"""
Initialize a tag that goes in the left panel.
We need to get the papersList so that we can hide papers when a user clicks
on a tag.
:param tagName: Name of the tag to show.
:type tagName: str
:param papersList: Papers list objects
:type papersList: PapersListScrollArea
"""
QLabel.__init__(self, tagName)
self.name = tagName
self.setFont(QFont("Cabin", 14))
self.papersList = papersList
def mousePressEvent(self, _):
"""
When the tag is clicked on, show the papers with that tag in the central panel
:param _: Dummy parameter that contains the event type. Not used here, we do
the same thing for every click type.
:return: None
"""
for paper in self.papersList.papers:
if self.name in paper.getTags():
paper.show()
else:
paper.hide()
class Paper(QWidget):
"""
Class holding paper details that goes in the central panel
"""
def __init__(self, bibcode, db, rightPanel):
"""
Initialize the paper object, which will hold the given bibcode
:param bibcode: Bibcode of this paper
:type bibcode: str
:param db: Database object this interface is using
:type db: library.database.Database
:param rightPanel: rightPanel object of this interface. This is only needed so
we can call the update feature when this is clicked on
:type rightPanel: rightPanel
"""
QWidget.__init__(self)
# store the information that will be needed later
self.bibcode = bibcode
self.db = db
self.rightPanel = rightPanel
# make sure this paper is actually in the database. This should never happen, but
# might if I do something dumb in tests
assert self.bibcode in self.db.get_all_bibcodes()
# Then set up the layout this uses. It will be vertical with the title (for now)
vBox = QVBoxLayout()
self.titleText = QLabel(self.db.get_paper_attribute(self.bibcode, "title"))
self.titleText.setFont(QFont("Cabin", 16))
self.citeText = QLabel(self.db.get_cite_string(self.bibcode))
self.citeText.setFont(QFont("Cabin", 12))
# then add these to the layout, then set this layout
vBox.addWidget(self.titleText)
vBox.addWidget(self.citeText)
self.setLayout(vBox)
def mousePressEvent(self, event):
"""
Handle the clicks - single click will display details, double will open paper
:param event: Mouse click event object
:type event: PySide2.QtGui.QMouseEvent
:return: None
"""
if event.type() is QEvent.Type.MouseButtonPress:
# Pass the bibcode on to the right panel
self.rightPanel.setPaperDetails(self.bibcode)
elif event.type() is QEvent.Type.MouseButtonDblClick:
local_file = self.db.get_paper_attribute(self.bibcode, "local_file")
if local_file is None:
# if there is not a paper, we need to add it
# the file dialog returns a two item tuple, where the first item is the
# file name and the second is the filter. This is true whether the user
# selects something or not. Contrary to the documentation, if the user
# hits cancel it returns a two item tuple with two empty strings!
local_file = QFileDialog.getOpenFileName(filter="PDF(*.pdf)")[0]
# If the user doesn't select anything this returns the empty string.
# Otherwise this returns a two item tuple, where the first item is the
# absolute path to the file they picked
if local_file != "":
self.db.set_paper_attribute(self.bibcode, "local_file", local_file)
# we'll open this file in a minute
else:
# the user didn't pick anything, so don't open anything
return
# if we now have a path, open the file. We get here whether we had to ask
# the user or now
QDesktopServices.openUrl(f"file:{local_file}")
# nothing should be done for other click types
def getTags(self):
"""
Return the list of tags this paper has. This is used by the tags list.
:return: List of tags
:rtype: list
"""
return self.db.get_paper_tags(self.bibcode)
class TagCheckBox(QCheckBox):
"""
Class for the Tag checkboxes that will go in the right panel.
This is only needed as a separate class to implement the changeState correctly.
"""
def __init__(self, text, rightPanel):
"""
Create this tag Checkbox, with given text and belonging to some RightPanel.
:param text: Label to show up next to this checkbox
:type text: str
:param rightPanel: Right Panel this checkbox belongs to.
:type rightPanel: RightPanel
"""
QCheckBox.__init__(self, text)
self.rightPanel = rightPanel
self.stateChanged.connect(self.changeState)
def changeState(self):
"""
Changes the tags of a paper by calling the method in the rightPanel
:return: None
"""
self.rightPanel.changeTags(self.text(), self.isChecked())
class RightPanel(QWidget):
"""
The right panel area for the main window, holding paper info for a single paper
"""
def __init__(self, db):
"""
Initialize the right panel.
:param db: Database object this interface is using
:type db: library.database.Database
"""
QWidget.__init__(self)
self.db = db
self.bibcode = "" # will be set later
self.papersList = None # will be set by the papersList initializer, which
# will have this passed in to it
# This widget will have several main areas, all laid out vertically
vBox = QVBoxLayout()
# for clarity set text to empty, the default values will be set below
self.titleText = QLabel("")
self.citeText = QLabel("")
self.abstractText = QLabel("")
self.copyBibtexButton = QPushButton("Copy Bibtex entry to clipboard")
self.firstDeletePaperButton = QPushButton("Delete this paper")
self.secondDeletePaperButton = QPushButton("Are you sure?")
self.tagText = QLabel("")
# have buttons to hide and show the list of tag checkboxes
self.editTagsButton = QPushButton("Edit Tags")
self.doneEditingTagsButton = QPushButton("Done Editing Tags")
# then add their functionality when clicked
self.editTagsButton.clicked.connect(self.enableTagEditing)
self.doneEditingTagsButton.clicked.connect(self.doneTagEditing)
self.copyBibtexButton.clicked.connect(self.copyBibtex)
self.firstDeletePaperButton.clicked.connect(self.revealSecondDeleteButton)
self.secondDeletePaperButton.clicked.connect(self.deletePaper)
# handle the initial state
self.resetPaperDetails()
# the Tags List has a bit of setup
self.tags = [] # store the tags that are in there
vBoxTags = QVBoxLayout()
# go through the database and add checkboxes for each tag there.
for t in self.db.get_all_tags():
this_tag_checkbox = TagCheckBox(t, self)
self.tags.append(this_tag_checkbox)
vBoxTags.addWidget(this_tag_checkbox)
# hide all tags at the beginning
this_tag_checkbox.hide()
# set text properties
self.titleText.setFont(QFont("Cabin", 20))
self.citeText.setFont(QFont("Cabin", 16))
self.abstractText.setFont(QFont("Cabin", 14))
self.tagText.setFont(QFont("Cabin", 14))
# make delete button red
self.secondDeletePaperButton.setStyleSheet("background-color: #FFCCCC;")
self.titleText.setWordWrap(True)
self.citeText.setWordWrap(True)
self.abstractText.setWordWrap(True)
self.tagText.setWordWrap(True)
# add these to the layout
vBox.addWidget(self.titleText)
vBox.addWidget(self.citeText)
vBox.addWidget(self.abstractText)
vBox.addWidget(self.copyBibtexButton)
vBox.addWidget(self.tagText)
vBox.addWidget(self.editTagsButton)
vBox.addWidget(self.doneEditingTagsButton)
vBox.addLayout(vBoxTags)
vBox.addWidget(self.firstDeletePaperButton)
vBox.addWidget(self.secondDeletePaperButton)
self.setLayout(vBox)
def resetPaperDetails(self):
"""
Set the details in the right panel to be the default when no paper is shown
:return: None, but the text properties are set
"""
self.titleText.setText("")
self.citeText.setText("")
self.abstractText.setText("Click on a paper to show its details here")
self.tagText.setText("")
# all of the buttons
self.editTagsButton.hide()
self.doneEditingTagsButton.hide()
self.copyBibtexButton.hide()
self.firstDeletePaperButton.hide()
self.secondDeletePaperButton.hide()
def setPaperDetails(self, bibcode):
"""
Update the details shown in the right panel.
:param bibcode: Bibcode of the paper. The bibcode will not appear, but it will
be used to query the details from the database.
:type bibcode: str
:return: None, but the text properties are set.
"""
self.bibcode = bibcode
self.titleText.setText(self.db.get_paper_attribute(self.bibcode, "title"))
self.citeText.setText(self.db.get_cite_string(self.bibcode))
self.abstractText.setText(self.db.get_paper_attribute(self.bibcode, "abstract"))
tagsList = self.db.get_paper_tags(self.bibcode)
self.tagText.setText(f"Tags: {', '.join(tagsList)}")
# Go through and set the checkboxes to match the tags the paper has
for tag in self.tags:
if tag.text() in tagsList:
tag.setChecked(True)
else:
tag.setChecked(False)
# then make the edit tags and copy Bibtex buttons appear, since they will be
# hidden at the start
self.editTagsButton.show()
self.copyBibtexButton.show()
self.firstDeletePaperButton.show()
# also hide the second button if it was shown
self.secondDeletePaperButton.hide()
def enableTagEditing(self):
"""
Show the tag selection boxes and the done editing button.
Inverse of `doneTagEditing`
:return: None
"""
self.editTagsButton.hide()
self.doneEditingTagsButton.show()
for tag in self.tags:
tag.show()
def doneTagEditing(self):
"""
Hide the tag selection boxes and the done editing button.
Inverse of `enableTagEditing`
:return: None
"""
self.editTagsButton.show()
self.doneEditingTagsButton.hide()
for tag in self.tags:
tag.hide()
def changeTags(self, tagName, checked):
"""
Add or remove tag to the paper in the panel right now
:param tagName: The tag that was checked or unchecked.
:type tagName: str
:param checked: Whether the tag was added (True) or removed (False)
:type checked: bool
:return: None
"""
if checked:
self.db.tag_paper(self.bibcode, tagName)
else:
self.db.untag_paper(self.bibcode, tagName)
def copyBibtex(self):
"""
Put the text from the selected paper's Bibtex entry into the clipboard
:return: None, but the text is copied to the clipboard
:rtype: None
"""
this_bibtex = self.db.get_paper_attribute(self.bibcode, "bibtex")
QGuiApplication.clipboard().setText(this_bibtex)
def revealSecondDeleteButton(self):
"""
Hides the first delete button, reveals the second
:return: None, but the buttons are shifted
"""
self.firstDeletePaperButton.hide()
self.secondDeletePaperButton.show()
def deletePaper(self):
"""
Delete this paper from the database
:return: None, but the paper is deleted and text reset
"""
self.db.delete_paper(self.bibcode)
self.resetPaperDetails() # clean up right panel
self.papersList.deletePaper(self.bibcode) # remove this frm the center panel
class ScrollArea(QScrollArea):
"""
A wrapper around QScrollArea with a vertical layout, appropriate for lists
"""
def __init__(self):
"""
Setup the scroll area, no parameters needed.
"""
QScrollArea.__init__(self)
# Have a central widget with a vertical box layout
self.container = QWidget()
self.layout = QVBoxLayout()
# the widgets should have their fixed size, no modification. This is also
# needed to get them to show up, I believe to stop this from having zero size?
self.layout.setSizeConstraint(QLayout.SetFixedSize)
# Then add these layouts and widgets
self.container.setLayout(self.layout)
self.setWidget(self.container)
def addWidget(self, widget):
"""
Add a widget to the list of vertical objects
:param widget: Widget to be added to the layout.
:type widget: QWidget
:return: None
"""
# add the widget to the layout
self.layout.addWidget(widget)
class PapersListScrollArea(ScrollArea):
"""
The class to be used for the central list of papers.
It's just a ScrollArea that keeps track of the papers that have been added.
"""
def __init__(self, db, rightPanel):
"""
Set up the papers list
Stores the database and right panel, which are used by the paper objects
themselves
:param db: Database object this interface is using
:type db: library.database.Database
:param rightPanel: rightPanel object of this interface. This is only needed so
we can call the update feature when this is clicked on
:type rightPanel: rightPanel
"""
ScrollArea.__init__(self)
self.papers = []
self.rightPanel = rightPanel
self.db = db
rightPanel.papersList = self
def addPaper(self, bibcode):
"""
Add a paper to the papers scroll area.
This adds it to the internal list of papers and puts the widget in the interface
:param bibcode: Bibcode of the paper to be added
:type bibcode: str
:return: None
"""
# check if this paper is already in the list. This should never happen
assert bibcode not in [p.bibcode for p in self.papers]
# create the paper object, than add to the list and center panel
paper = Paper(bibcode, self.db, self.rightPanel)
self.papers.append(paper)
self.addWidget(paper) # calls the ScrollArea addWidget
def deletePaper(self, bibcode):
"""
Delete a paper from this list of papers
:param bibcode: Bibcode of the paper to delete
:return: None, but the paper is deleted from the list
"""
for paper in self.papers:
if paper.bibcode == bibcode:
paper.hide() # just to be safe
self.papers.remove(paper)
del paper
class TagsListScrollArea(ScrollArea):
"""
The class to be used for the left hand side list of tags.
It's just a ScrollArea that keeps track of the tags that have been added, almost
identical to PapersListScrollArea, except that it has a text area to add tags, and
a button to show all papers.
"""
def __init__(self, addTagBar, papersList):
"""
Set up the papers list, no parameters needed
"""
ScrollArea.__init__(self)
self.tags = []
self.addTagBar = addTagBar
self.papersList = papersList
# Make the button to show all the papers in the list
self.showAllButton = QPushButton("Show All")
self.showAllButton.clicked.connect(self.showAllPapers)
# put the tag bar at the top of the list
self.addWidget(self.addTagBar) # calls ScrollArea addWidget
self.addWidget(self.showAllButton)
def addTag(self, tag):
"""
Add a tag to the tags scroll area.
This adds it to the internal list of tags and puts the widget in the interface
:param tag: Tag object to be added to the list of stored tags.
:type tag: LeftPanelTag
:return: None
"""
# check if this tag is already in the list. This should never happen
assert tag.name not in [t.name for t in self.tags]
self.tags.append(tag)
self.addWidget(tag) # calls the ScrollArea addWidget
def showAllPapers(self):
"""
Show all the papers in the central papers list.
:return: None
"""
for paper in self.papersList.papers:
paper.show()
class MainWindow(QMainWindow):
"""
Main window object holding everything needed in the interface.
"""
def __init__(self, db):
"""
Create the interface around the database passed in.
:param db: database object that will be displayed in this interface.
:type db: library.database.Database
"""
QMainWindow.__init__(self)
self.db = db
# Start with the layout. Our main layout is three vertical components:
# the first is the title, second is the search bar, where the user can paste
# URLs to add to the database, and the third is the place where we show all
# the papers that have been added.
vBoxMain = QVBoxLayout()
# The title is first
self.title = QLabel("Library")
# Mess around with the title formatting
self.title.setFixedHeight(60)
self.title.setAlignment(Qt.AlignCenter)
self.title.setFont(QFont("Lobster", 40))
vBoxMain.addWidget(self.title)
# Then comes the search bar. This is it's own horizontal layout, with the
# text box and the button to add
hBoxSearchBar = QHBoxLayout()
self.searchBar = QLineEdit()
self.searchBar.setPlaceholderText("Enter your paper URL or ADS bibcode here")
self.searchBar.setFont(QFont("Cabin", 14))
# We'll also have an add button
self.addButton = QPushButton("Add")
self.addButton.setFont(QFont("Cabin", 14))
# Define what to do when these things are activated. The user can either hit
# enter or hit the add button
self.searchBar.returnPressed.connect(self.addPaper)
self.addButton.clicked.connect(self.addPaper)
# have both of these quantities have a fixed height. These values are chosen to
# make it look nice. They aren't the same size since the bounding boxes aren't
# quite the same relative to the shown borders for whatever reason
self.searchBar.setFixedHeight(30)
self.addButton.setFixedHeight(35)
# Then add these to the layouts
hBoxSearchBar.addWidget(self.searchBar)
hBoxSearchBar.addWidget(self.addButton)
vBoxMain.addLayout(hBoxSearchBar)
# Then we have the main body. This is a bit more complex. We'll start by just
# initializing the layout for this, which is three panels laid horizonatlly.
# This is the default splitter orientation
splitter = QSplitter()
# then make each of these things
# The right panel is the details on a given paper. It holds the tags list,
# which we need to initialize first
self.rightPanel = RightPanel(self.db)
rightScroll = ScrollArea()
rightScroll.addWidget(self.rightPanel)
# The central panel is the list of papers. This has to be set up after the
# right panel because the paper objects need it, and before the left panel
# because the tags need this panel
self.papersList = PapersListScrollArea(db, self.rightPanel)
for b in self.db.get_all_bibcodes():
self.papersList.addPaper(b)
# The left panel of this is the list of tags the user has, plus the button to
# add papers, which will go at the top of that list. This has to go after the
# center panel since the tags need to access the paper list
addTagBar = QLineEdit()
addTagBar.setFont(QFont("Cabin", 14))
addTagBar.setPlaceholderText("Add a new tag here")
addTagBar.returnPressed.connect(self.addTag)
self.tagsList = TagsListScrollArea(addTagBar, self.papersList)
for t in self.db.get_all_tags():
self.tagsList.addTag(LeftPanelTag(t, self.papersList))
# then add each of these widgets to the central splitter
splitter.addWidget(self.tagsList)
splitter.addWidget(self.papersList)
splitter.addWidget(rightScroll)
# Add this to the main layout
vBoxMain.addWidget(splitter)
# We then have to have a dummy widget to act as the central widget. All that
# is done here is setting the layout
container = QWidget()
container.setLayout(vBoxMain)
self.setCentralWidget(container)
# Then let's set up a menu.
self.menu = self.menuBar()
# have the file option
self.file_menu = self.menu.addMenu("File")
# Things to go in the menu
# Calling the Quit command can't be used, as it is caught by MacOS somehow
# I'll use "close" instead. This does automatically use the keyboard shortcut
# ctrl+q to exit
self.exitAction = QAction("Close", self)
self.exitAction.setShortcut(QKeySequence("Ctrl+q")) # to be clear
# have to connect this to a function to actually do something
self.exitAction.triggered.connect(QApplication.quit())
# Then add all items to the menu
self.file_menu.addAction(self.exitAction)
# and the initial window size
self.resize(1000, 600)
self.show()
def addPaper(self):
"""
Add a paper to the database, taking text from the text box.
If what is in the text box is not recognized, the text will not be cleared, and
nothing will be added (obviously). If the paper is already in the library,
the paper will not be added but the text will be cleared.
:return: None
"""
try: # see if the user put something good
bibcode = self.db.add_paper(self.searchBar.text())
except ValueError: # will be raised if the value isn't recognized
return # don't clear the text or add anything
except PaperAlreadyInDatabaseError:
# here we do clear the search bar, but do not add the paper
self.searchBar.clear()
return
# we only get here if the addition to the database worked. If so we add the
# paper object, then clear the search bar
self.papersList.addPaper(bibcode)
# clear the text so another paper can be added
self.searchBar.clear()
def addTag(self):
"""
Adds a tag to the database, taking the name from the text box.
The text will be cleared if the tag was successfully added, which will only not
be the case if the tag is already in the database.
:return: None
"""
#
try:
tagName = self.tagsList.addTagBar.text()
self.db.add_new_tag(tagName)
self.tagsList.addTag(LeftPanelTag(tagName, self.papersList))
except ValueError: # this tag is already in the database
return
# if we got here we had no error, so it was successfully added and we should
# clear the text box
self.tagsList.addTagBar.clear()
def get_fonts(directory, current_list):
"""
Recursive function to get all the fonts within a directory, including all subdirs
Note that all fonts must have the `.ttf` file extension.
:param directory: Parent directory to search for fonts
:type directory: pathlib.Path
:param current_list: List of fonts that have been found so far. This will be
appended to, so that it can be modified in place, and will
also be returned, so the first caller of this can get the list.
:type current_list: list
:return: None
"""
# go through everything in this directory
for item in directory.iterdir():
# if it's a directory, recursively call this function on that directory.
if item.is_dir():
get_fonts(item, current_list)
# current_list will be modified in place, so we don't need to keep
# the returned value
# otherwise look for ttf files.
if str(item).endswith(".ttf"):
current_list.append(str(item))
def set_up_fonts():
"""
Add all the found fonts to the Qt font database
:return: None, but the fonts are added to the Qt font database
"""
fontDb = QFontDatabase()
# we need to initialize this list to start, as fonts found will be appended to this
fonts = []
get_fonts(Path(__file__).parent.parent / "fonts", fonts)
for font in fonts:
fontDb.addApplicationFont(font)
| 1.859375 | 2 |
lib/python/frugal/tornado/transport/transport.py | ariasheets-wk/frugal | 144 | 104196 | # Copyright 2017 Workiva
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from tornado import gen
from frugal.transport import FTransport
class FTransportBase(FTransport):
"""
FTransportBase extends FTransport using the coroutine decorators used by
all tornado FTransports.
"""
def is_open(self):
raise NotImplementedError("You must override this.")
@gen.coroutine
def open(self):
raise NotImplementedError("You must override this.")
@gen.coroutine
def close(self):
raise NotImplementedError("You must override this.")
@gen.coroutine
def oneway(self, context, payload):
raise NotImplementedError('You must override this.')
@gen.coroutine
def request(self, context, payload):
raise NotImplementedError('You must override this.')
| 1.539063 | 2 |
evaluate_autism_wav2vec.py | HLasse/wav2vec_finetune | 6 | 104324 | """evaluate model performance
TODO
- Evaluate by window and by participant (rewrite to make windows)
"""
import torch
import torch.nn.functional as F
import torchaudio
from transformers import AutoConfig, Wav2Vec2FeatureExtractor, Wav2Vec2ForSequenceClassification
import numpy as np
import pandas as pd
import os
from sklearn.metrics import confusion_matrix, classification_report, accuracy_score
MODEL_PATH = os.path.join("model", "xlsr_autism_stories", "checkpoint-10")
TEST = pd.read_csv(os.path.join("data", "splits", "stories_train_data_gender_False.csv"))
LABEL_COL = "Diagnosis"
def speech_file_to_array_fn(path, sampling_rate):
speech_array, _sampling_rate = torchaudio.load(path)
resampler = torchaudio.transforms.Resample(_sampling_rate, sampling_rate)
speech = resampler(speech_array).squeeze().numpy()
return speech
def predict(path, sampling_rate):
speech = speech_file_to_array_fn(path, sampling_rate)
features = processor(speech, sampling_rate=sampling_rate, return_tensors="pt", padding=True)
input_values = features.input_values.to(device)
attention_mask = features.attention_mask.to(device)
with torch.no_grad():
logits = model(input_values, attention_mask=attention_mask).logits
scores = F.softmax(logits, dim=1).detach().cpu().numpy()[0]
pred = config.id2label[np.argmax(scores)]
confidence = scores[np.argmax(scores)]
return pred, confidence
def add_predicted_and_confidence(df):
pred, confidence = predict(df["file"], target_sampling_rate)
df["pred"] = pred
df["confidence"] = confidence
return df
# setup model
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
config = AutoConfig.from_pretrained(MODEL_PATH)
processor = Wav2Vec2FeatureExtractor.from_pretrained(MODEL_PATH)
target_sampling_rate = processor.sampling_rate
model = Wav2Vec2ForSequenceClassification.from_pretrained(MODEL_PATH).to(device)
# load test data
# apply predictions
test = TEST.apply(add_predicted_and_confidence, axis=1)
print(confusion_matrix(test[LABEL_COL], test["pred"]))
print(classification_report(test[LABEL_COL], test["pred"]))
acc = accuracy_score(test[LABEL_COL], test["pred"])
print(f"accuracy: {acc}") | 2.21875 | 2 |
UltimateTicTacToe/Bot.py | m3rik/nn | 0 | 104452 | class Bot:
'''
state - state of the game
returns a move
'''
def move(self, state, symbol):
raise NotImplementedError('Abstractaaa')
def get_name(self):
raise NotImplementedError('Abstractaaa')
| 1.578125 | 2 |
nettowel/cli/_output.py | InfrastructureAsCode-ch/nettowel | 1 | 104580 | from rich.columns import Columns
from rich.panel import Panel
| 0.146484 | 0 |
copywriting/views.py | uncommitted-and-forgotten/django-copywriting | 2 | 104708 | # -*- coding: utf-8 -*-
import datetime
import urllib
from django.shortcuts import render_to_response
from django.template import RequestContext
from django.contrib.contenttypes.models import ContentType
from django.shortcuts import get_object_or_404
from django.utils import timezone
from .models import *
from .helperFunctions import getLatestArticles
from .helperFunctions import getArticlesByDate
from .helperFunctions import getYearCount
from .helperFunctions import getLatestArticlesByAuthor
def listArticles(request):
"""
"""
articles = getLatestArticles(100)
return render_to_response('copywriting/copywritingIndex.html', {'articles': articles,
'yearCount': getYearCount(),
}, context_instance=RequestContext(request))
def listArticlesByAuthor(request, author):
"""
author is the username of the user in the author model.
"""
authorProfile = AuthorProfile.objects.get(user__username=author)
articles = getLatestArticlesByAuthor(ContentType.objects.get_for_model(authorProfile), authorProfile.id, 100)
return render_to_response('copywriting/copywritingIndex.html', {'articles': articles,
'yearCount': getYearCount(),
'authorProfile': authorProfile,
}, context_instance=RequestContext(request))
def listArticlesByYear(request, requestYear):
"""
"""
articles = getArticlesByDate(year=requestYear)
return render_to_response('copywriting/copywritingIndex.html', {'articles': articles,
'yearCount': getYearCount(),
}, context_instance=RequestContext(request))
def listArticlesByYearMonth(request, requestYear, requestMonth):
"""
"""
articles = getArticlesByDate(year=requestYear, month=requestMonth)
return render_to_response('copywriting/copywritingIndex.html', {'articles': articles,
'yearCount': getYearCount(),
}, context_instance=RequestContext(request))
def listArticlesByYearMonthDay(request, requestYear, requestMonth, requestDay):
"""
"""
articles = getArticlesByDate(year=requestYear, month=requestMonth, day=requestDay)
return render_to_response('copywriting/copywritingIndex.html', {'articles': articles,
'yearCount': getYearCount(),
}, context_instance=RequestContext(request))
def withTag(request, in_tag):
lTag = urllib.unquote(in_tag)
tags = Tag.objects.filter(name=lTag)
articles = Article.objects.filter(tags__in=tags, status=Article.PUBLISHED, pubDate__lte=timezone.now()).order_by('-pubDate')
return render_to_response("copywriting/copywritingIndex.html", {'tag': in_tag,
'articles': articles,
'yearCount': getYearCount(),
}, context_instance=RequestContext(request))
def showArticle(request, slug):
"""
"""
if request.user.is_staff or request.user.is_superuser:
article = get_object_or_404(Article, slug=slug)
else:
article = get_object_or_404(Article, slug=slug, status=Article.PUBLISHED)
if article:
latestArticlesList = getLatestArticlesByAuthor(article.authorProfileModel, article.authorProfileId, 5, slug)
return render_to_response('copywriting/copywritingArticle.html', {'article': article,
'latestArticlesList': latestArticlesList,
}, context_instance=RequestContext(request))
| 1.78125 | 2 |
src/extensions/lang/python/reader/testStateProtoFileParser.py | poobalan-arumugam/stateproto | 1 | 104836 | <filename>src/extensions/lang/python/reader/testStateProtoFileParser.py
from __future__ import print_function
from reader.parseStateProtoFile import *
from reader.StateTreeModel import *
from codegens.pygen import *
from codegens.jsgen import *
from codegens.stgen import *
def tag(tagName, tagValue):
return "<%(tagName)s>%(tagValue)s</%(tagName)s>" % locals()
def walkStateTree_Table(rootNode, arg):
name = rootNode.name()
state = rootNode.state()
entry = state.entry
exit = state.exit
print("<table id='", name, "'><tr><td>")
print("<div ", style," border='2' id='", name, "'><tr><td>")
print(tag("h1", name))
print(tag("p", entry))
print(tag("p", exit))
rootNode.do(walkStateTree, arg)
print("</td></tr></table>")
def walkStateTree_Div(rootNode, zorder):
name = rootNode.name()
state = rootNode.state()
entry = state.entry
exit = state.exit
style = "style='z-index:%s;position:absolute;left:%s;top:%s;width:%s;height:%s'" % (zorder, state.left, state.top, state.width, state.height,)
classId = " class='clsdiv" + str(zorder) + "' "
print("<div ", style, classId, " id='", name, "'>")
#print("<table ", style," id='", name, "'><tr><td>")
print(tag("h1", name))
print(tag("p", entry))
print(tag("p", exit))
#print("</td></tr></table>")
print("</div>")
rootNode.do(walkStateTree, zorder + 1)
def walkStateTree(rootNode, arg):
#walkStateTree_Table(rootNode, None)
walkStateTree_Div(rootNode, arg)
if __name__ == "__main__":
fileList = ["phoneSim1.sm1",
"../../../../../../statemachines/samples/pingpong/pingpong.sm1",
"testsample1.sm1"]
parsedModel = ParsedModel(fileList[-1])
def simplePrintFunction(item, arg):
genericPrintVisitor = GenericPrintVisitor(parsedModel)
item.accept(genericPrintVisitor, arg)
parsedModel.do(simplePrintFunction, None)
# placed three sample languages - all incomplete.
# python, javascript and smalltalk
# need things like - qualified events (due to ports), action parsing for sending to ports and timeout parsing for timeouts
# no need for codegen'ed file to contain user supplied code as these languages support class extension
# python via mixin, javascript directly through prototype and smalltalk - can be done - but don't even know file format for straight code-gen.
pythonVisitor = PythonVisitor(parsedModel)
javascriptVisitor = JavascriptVisitor(parsedModel)
smalltalkVisitor = SmalltalkVisitor(parsedModel)
def visitorPassthroughFunction(item, visitor):
item.accept(visitor, None)
parsedModel.do(visitorPassthroughFunction, pythonVisitor)
parsedModel.do(visitorPassthroughFunction, javascriptVisitor)
parsedModel.do(visitorPassthroughFunction, smalltalkVisitor)
stateTreeRoot = buildStateTree(parsedModel)
print(stateTreeRoot)
walkStateTree(stateTreeRoot, 1)
| 1.335938 | 1 |
b_functions.py | NacerSebtiMS/Google_Hashcode_2020_Qualification | 1 | 104964 | def scorify_library(library):
"""
The aim is to give the libraries a score, that will enable to order them later on
"""
NB = library[0]
BD = library[2]
SB = library_total_book_score(library)
DR = library[1]
library_scoring = (D - DR) * BD * (SB/NB)
return library_scoring
def library_total_book_score(library):
book_ids = library[3]
total_library_book_score = 0
for id in book_ids:
total_library_book_score += BL[id]
return total_library_book_score
def compute_available_days():
available_libraries = []
availability_day = 0
while len(scores)>0:
library_id_score = scores.pop()
library_id = library_id_score[0]
DR = LL[library_id][1]
availability_day += DR
if availability_day > D:
continue
else:
entry = (library_id,availability_day)
available_libraries.append(entry)
return available_libraries
| 2.65625 | 3 |
tests/test_array_utils.py | kjappelbaum/scikit-ued | 0 | 105092 | <reponame>kjappelbaum/scikit-ued
# -*- coding: utf-8 -*-
import unittest
import numpy as np
from skued import (
repeated_array,
mirror,
cart2polar,
polar2cart,
plane_mesh,
spherical2cart,
cart2spherical,
complex_array,
)
np.random.seed(23)
class TestRepeatedArray(unittest.TestCase):
def setUp(self):
self.arr = np.random.random(size=(4, 5))
def test_trivial(self):
""" Test repeated_array of 0 copies """
composite = repeated_array(self.arr, num=0, axes=0)
self.assertTrue(np.allclose(composite, self.arr))
def test_single_axis(self):
""" Test repeating the array over a single axis """
composite = repeated_array(self.arr, num=3, axes=1)
expected_new_shape = (self.arr.shape[0], self.arr.shape[1] * 3)
self.assertEqual(composite.shape, expected_new_shape)
def test_multiple_axes(self):
""" Test repeating an array over multiple axes in all possible orders """
with self.subTest("axes = (0, 1)"):
composite = repeated_array(self.arr, num=(3, 2), axes=(0, 1))
expected_new_shape = (self.arr.shape[0] * 3, self.arr.shape[1] * 2)
self.assertEqual(composite.shape, expected_new_shape)
with self.subTest("axes = (1, 0)"):
composite = repeated_array(self.arr, num=(2, 3), axes=(1, 0))
expected_new_shape = (self.arr.shape[0] * 3, self.arr.shape[1] * 2)
self.assertEqual(composite.shape, expected_new_shape)
class TestComplexArray(unittest.TestCase):
def test_floats(self):
""" Test that two floating arrays are cast correctly """
real, imag = np.empty((3, 4), dtype=np.float), np.empty((3, 4), dtype=np.float)
self.assertEqual(complex_array(real, imag).dtype, np.complex)
def test_non_floats(self):
""" Test that two integer arrays are cast correctly """
real, imag = np.empty((3, 4), dtype=np.int16), np.empty((3, 4), dtype=np.int8)
self.assertEqual(complex_array(real, imag).dtype, np.complex)
def test_results(self):
""" test that ``complex_array`` returns appropriate results """
arr1 = np.random.random((4, 5))
arr2 = np.random.random((4, 5))
from_complex_array = complex_array(arr1, arr2)
by_hand = arr1 + 1j * arr2
self.assertEqual(from_complex_array.dtype, by_hand.dtype)
self.assertTrue(np.allclose(from_complex_array, by_hand))
class TestMirror(unittest.TestCase):
def test_1D(self):
""" Test mirror() on a 1D array """
arr = np.zeros((16,), dtype=np.float)
arr[15] = 1
self.assertTrue(np.allclose(arr[::-1], mirror(arr)))
def test_2D_all_axes(self):
""" Test mirror() on a 2D array for all axes """
arr = np.zeros((16, 16), dtype=np.float)
arr[15, 3] = 1
self.assertTrue(np.allclose(arr[::-1, ::-1], mirror(arr)))
def test_2D_one_axis(self):
""" Test mirror() on a 2D array for one axis """
arr = np.zeros((16, 16), dtype=np.float)
arr[15, 3] = 1
self.assertTrue(np.allclose(arr[:, ::-1], mirror(arr, axes=1)))
self.assertTrue(np.allclose(arr[::-1, :], mirror(arr, axes=0)))
class TestCart2Polar(unittest.TestCase):
def test_back_and_forth(self):
""" Test that cart2polar and polar2cart are reciprocal """
x = np.random.random(size=(16, 8))
y = np.random.random(size=(16, 8))
r, t = cart2polar(x, y)
xp, yp = polar2cart(r, t)
self.assertTrue(np.allclose(x, xp))
self.assertTrue(np.allclose(y, yp))
class TestSpherical2Cart(unittest.TestCase):
def test_back_and_forth(self):
""" Test that cart2polar and polar2cart are reciprocal """
x = np.random.random(size=(16, 8))
y = np.random.random(size=(16, 8))
z = np.random.random(size=(16, 8))
r, p, t = cart2spherical(x, y, z)
xp, yp, zp = spherical2cart(r, p, t)
self.assertTrue(np.allclose(x, xp))
self.assertTrue(np.allclose(y, yp))
self.assertTrue(np.allclose(z, zp))
class TestPlaneMesh(unittest.TestCase):
def test_shape(self):
""" Test that shape is as expected """
extent1 = np.linspace(0, 10, num=64)
extent2 = np.linspace(0, 10, num=128)
v1, v2, _ = np.eye(3)
for arr in plane_mesh(v1, v2, extent1, extent2):
self.assertSequenceEqual(arr.shape, (64, 128))
def test_origin(self):
""" Test that plane_mesh is generated from origin """
extent1 = np.linspace(0, 10, num=64)
extent2 = np.linspace(0, 10, num=128)
v1, v2, _ = np.eye(3)
for arr in plane_mesh(v1, v2, extent1, extent2, origin=(-4, -4, -4)):
self.assertEqual(arr.min(), -4)
if __name__ == "__main__":
unittest.main()
| 1.976563 | 2 |
asterioids-pygame-project/source_code_step_5/space_rocks/utils.py | syberflea/materials | 3,682 | 105220 | <reponame>syberflea/materials
from pygame.image import load
from pygame.math import Vector2
def load_sprite(name, with_alpha=True):
path = f"assets/sprites/{name}.png"
loaded_sprite = load(path)
if with_alpha:
return loaded_sprite.convert_alpha()
else:
return loaded_sprite.convert()
def wrap_position(position, surface):
x, y = position
w, h = surface.get_size()
return Vector2(x % w, y % h)
| 1.726563 | 2 |
manage.py | hunter125555/burnleyffc2025 | 0 | 105348 | import os
import json
import argparse
import requests
from collections import Counter, OrderedDict
from angular_flask.core import mongo
from angular_flask import app
from angular_flask import helper
teamList = ['Arsenal', 'Brighton', 'Bournemouth', 'Burnley', 'Chelsea', 'Crystal Palace', 'Everton', 'Huddersfield', 'Leicester', 'Liverpool', 'Man City', 'Man Utd', 'Newcastle', 'Southampton', 'Stoke', 'Swansea', 'Spurs', 'Watford', 'West Brom', 'West Ham']
team_folder = os.path.join(os.getcwd(),'teams')
static_url = 'https://fantasy.premierleague.com/drf/bootstrap-static'
def soupify(url):
htmltext = requests.get(url)
data = htmltext.json()
return data
static_data = soupify(static_url)
def update_test():
with app.app_context():
test = mongo.db.test
val = test.find_one()['val']
test.find_one_and_update({'var': 'a'}, {'$set': {'val': val + 1}})
def update_current_gw():
with app.app_context():
currentgw = mongo.db.currentgw
eid = currentgw.find_one()['_id']
gw = static_data['current-event']
currentgw.find_one_and_update({'_id': eid}, {'$set': {'gw': gw}})
print 'Updated GW!'
def update_epl_teams():
with app.app_context():
eplteams = mongo.db.eplteams
eplteams.insert_many([{'name': str(team['name']), 'short': str(team['short_name']), 'id': team['id']} for team in static_data['teams']])
def update_epl_players():
with app.app_context():
eplplayers = mongo.db.eplplayers
eplplayers.delete_many({})
eplplayers.insert_many([{'id': player['id'], 'name': player['first_name'] + ' ' + player['second_name'], 'team': player['team'], 'pos': player['element_type']} for player in static_data['elements']])
def update_fpl_managers():
with app.app_context():
fplmanagers = mongo.db.fplmanagers
fplmanagers.delete_many({})
for team in teamList:
team = team.lower() + ".txt"
team_file = os.path.join(team_folder, team)
team_name, ffc_team = helper.read_in_team(team_file)
codenametuples = [(player[1][1], player[1][0]) for player in ffc_team.items()]
fplmanagers.insert_many([{'code': tup[0], 'name': tup[1]} for tup in codenametuples])
def update_ffcteams():
with app.app_context():
ffcteams = mongo.db.ffcteams
ffcteams.delete_many({})
for team in teamList:
teamfile= team.lower() + ".txt"
team_file = os.path.join(team_folder, teamfile)
team_name, ffc_team = helper.read_in_team(team_file)
fplcodes = [player[1][1] for player in ffc_team.items()]
ffcteams.insert_one({'team':team, 'codes': fplcodes})
def update_gw_fixtures():
with app.app_context():
gwfixtures = mongo.db.gwfixtures
gwfixtures.delete_many({})
gw = mongo.db.currentgw.find_one()['gw']
live_url = 'https://fantasy.premierleague.com/drf/event/%d/live' % gw
live_data = soupify(live_url)
gwfixtures.insert_many({'id': str(fix['id']), 'started': fix['started'], 'home': fix['team_h'], 'away': fix['team_a']} for fix in live_data['fixtures'])
def update_live_points():
with app.app_context():
print 'Updating!'
livepoints = mongo.db.livepoints
livepoints.delete_many({})
gw = mongo.db.currentgw.find_one()['gw']
live_url = 'https://fantasy.premierleague.com/drf/event/%d/live' % gw
live_data = soupify(live_url)['elements']
livepoints.insert_many({'id': str(player[0]), 'fixture': player[1]['explain'][0][1], 'points': player[1]['stats']['total_points']} for player in live_data.items())
#livepoints.insert_many({'id': str(player[0]), 'points': player[1]['stats']['total_points']} for player in live_data.items())
print 'Updated!'
def update_ffc_picks():
with app.app_context():
ffcteams = mongo.db.ffcteams
ffcpicks = mongo.db.ffcpicks
ffcpicks.delete_many({})
gw = mongo.db.currentgw.find_one()['gw']
for team in teamList:
obj = ffcteams.find_one({'team': team})
for code in obj['codes']:
picks_url = 'https://fantasy.premierleague.com/drf/entry/%d/event/%d/picks' % (code, gw)
try:
gw_data = soupify(picks_url)
except ValueError:
print picks_url
playing, bench = [], []
captain = None
vicecaptain = None
chip = gw_data['active_chip']
points = gw_data['entry_history']['points']
transcost = gw_data['entry_history']['event_transfers_cost']
for pick in gw_data['picks']:
if pick['is_captain']:
captain = pick['element']
if pick['is_vice_captain']:
vicecaptain = pick['element']
if pick['position'] >= 12:
bench.append(pick['element'])
else:
playing.append(pick['element'])
ffcpicks.insert_one({'code': code, 'points': points, 'captain': captain, 'vicecaptain': vicecaptain, 'chip': chip, 'cost': transcost, 'playing': playing, 'bench': bench})
def update_ffc_captains():
with app.app_context():
ffccaptains = mongo.db.ffccaptains
ffccaptains.delete_many({})
capCodes = [5609, 5065, 9350, 223, 776734, 76521, 350414, 1167796, 683730, 16471, 7243, 970393, 559067, 85810, 220254, 667989, 155651, 224000, 7496, 663518]
for team, capcode in zip(teamList, capCodes):
ffccaptains.insert_one({'team': team, 'captain': capcode})
def update_ffc_bench():
with app.app_context():
ffcbench = mongo.db.ffcbench
ffcbench.delete_many({})
benchCodes = [2447194, 896742, 90373, 343681, 801354, 755683, 672855, 3465481, 351798, 84958, 41341, 16553, 568263, 3758662, 743617, 540557, 673387, 220487, 29057, 514199]
for team, bcode in zip(teamList, benchCodes):
ffcbench.insert_one({'team': team, 'bench': bcode})
def analyze_tx_history():
with app.app_context():
order_dict = lambda d: OrderedDict(sorted(d.items(), key = lambda x: x[1], reverse=True))
eplplayers = mongo.db.eplplayers
playerlist = list(eplplayers.find())
tx_dict = dict()
for player in playerlist:
player_url = 'https://fantasy.premierleague.com/drf/element-summary/%d' % (player['id'])
player_history = soupify(player_url)['history']
tx_dict[player['name']] = 0
for w in player_history:
if w['transfers_balance'] >= 0:
tx_dict[player['name']] += 1
else:
tx_dict[player['name']] -= 1
import code; code.interact(local=locals())
final_dict = order_dict(tx_dict)
print take(20, final_dict.iteritems())
def update_for_gw():
update_current_gw()
update_gw_fixtures()
update_live_points()
update_ffc_picks()
def main():
parser = argparse.ArgumentParser(
description='Manage this Flask application.')
parser.add_argument(
'command', help='the name of the command you want to run')
args = parser.parse_args()
if args.command == 'update_gw':
update_current_gw()
print "GW Updated!"
elif args.command == 'update_eplteams':
update_epl_teams()
print "EPL Teams added!"
elif args.command == 'update_eplplayers':
update_epl_players()
print "EPL Players added!"
elif args.command == 'update_fplmanagers':
update_fpl_managers()
print "FPL Managers added!"
elif args.command == 'update_ffcteams':
update_ffcteams()
print "FFC Teams added!"
elif args.command == 'update_gwfixtures':
update_gw_fixtures()
print "GW Fixtures added!"
elif args.command == 'update_live_points':
update_live_points()
print "Live points added!"
elif args.command == 'update_ffc_picks':
update_ffc_picks()
print "FFC picks added!"
elif args.command == 'update_ffc_captains':
update_ffc_captains()
print "FFC captains added!"
elif args.command == 'update_ffc_bench':
update_ffc_bench()
print "FFC bench updated!"
elif args.command == 'update_for_gw':
update_for_gw()
print "Update for GW complete!"
elif args.command == 'tx':
analyze_tx_history()
elif args.command == 'test':
update_test()
print "Test pass!"
else:
raise Exception('Invalid command')
if __name__ == '__main__':
main()
| 1.546875 | 2 |
sw/dbsync/dbsync.py | simwr872/dbsync | 0 | 105476 | import time
from collections import ChainMap
from typing import Any, Callable, Literal, Optional, Sequence, TypedDict
from .dbapi import Cursor
RowType = dict[str, Any]
MessageTableType = TypedDict("Table", {"modified": list[RowType], "deleted": list[RowType]})
MessageType = TypedDict("Message", {"timestamp": int, "table": dict[str, MessageTableType]})
def row_factory(cursor: Cursor, row: Sequence[Any]) -> RowType:
"""Convert database row to dictionary."""
return {column[0]: row[index] for index, column in enumerate(cursor.description)}
class Table:
def __init__(
self,
name: str,
primary_columns: list[str],
schema: dict[str, Any],
extra_columns: list[str],
param: str,
):
self._name = name
self._schema = schema
self._primary_columns = primary_columns
self._extra_columns = extra_columns
self._all_columns = list(schema.keys())
all_columns = ",".join(self._all_columns)
extra_conditions = [f"{column}={param}" for column in extra_columns]
timestamp_conditions = [f"{param}<timestamp", f"timestamp<{param}"]
modified_conditions = " AND ".join(timestamp_conditions + extra_conditions)
self._modified_query = f"SELECT {all_columns} FROM {name} WHERE {modified_conditions}"
insert_columns = self._all_columns + ["timestamp"] + extra_columns
params = ",".join([param] * len(insert_columns))
insert_columns = ",".join(insert_columns)
self._modify_query = f"INSERT OR REPLACE INTO {name}({insert_columns}) VALUES({params})"
primary_conditions = [f"{column}={param}" for column in primary_columns]
update_conditions = " AND ".join(primary_conditions + extra_conditions)
self._delete_query = f"UPDATE {name} SET timestamp={param} WHERE {update_conditions}"
deleted_columns = ",".join(primary_columns)
self._deleted_query = f"SELECT {deleted_columns} FROM {name} WHERE {modified_conditions}"
def graverob(self, cursor: Cursor, timestamp: int) -> None:
cursor.execute(f"DELETE FROM {self._name} WHERE {timestamp}<timestamp AND timestamp<0")
def synchronize(
self,
cursor: Cursor,
last_timestamp: int,
current_timestamp: int,
table: MessageTableType,
**extras: Any,
) -> MessageTableType:
extra_values = [extras[column] for column in self._extra_columns]
cursor.executemany(
self._modify_query,
[
[row[column] for column in self._all_columns] + [current_timestamp] + extra_values
for row in table["modified"]
],
)
cursor.executemany(
self._delete_query,
[
[-current_timestamp]
+ [row[column] for column in self._primary_columns]
+ extra_values
for row in table["deleted"]
],
)
cursor.execute(self._modified_query, [last_timestamp, current_timestamp] + extra_values)
modified = [row_factory(cursor, row) for row in cursor.fetchall()]
cursor.execute(self._deleted_query, [-current_timestamp, -last_timestamp] + extra_values)
deleted = [row_factory(cursor, row) for row in cursor.fetchall()]
return {
"modified": modified,
"deleted": deleted,
}
def schema_definition(self) -> dict[str, Any]:
return {
f"{self._name}_deleted": {
"type": "object",
"properties": {column: self._schema[column] for column in self._primary_columns},
"additionalProperties": False,
"required": self._primary_columns,
},
f"{self._name}_modified": {
"type": "object",
"properties": self._schema,
"additionalProperties": False,
"required": self._all_columns,
},
}
class Database:
_paramstyles = {"qmark": "?", "format": "%s"}
def __init__(
self,
extra_columns: Optional[list[str]] = None,
counter: Callable[[], int] = lambda: int(time.time()),
paramstyle: Literal["qmark", "format"] = "qmark",
):
"""
`extra_columns` is a list of extra column names required to identify a row. `counter` is a
function returning the next timestamp. `paramstyle` is the DBAPI module paramstyle.
"""
self._param = self._paramstyles[paramstyle]
if extra_columns is None:
extra_columns = []
self._extra_columns = extra_columns
self._tables: dict[str, Table] = {}
self._counter = counter
def add_table(self, name: str, primary_columns: list[str], schema: dict[str, Any]) -> None:
self._tables[name] = Table(name, primary_columns, schema, self._extra_columns, self._param)
def graverob(self, cursor: Cursor, delta: int = 2592000) -> None:
"""Commit and rollback must be handled by caller."""
timestamp = -self._counter() + delta
for table in self._tables.values():
table.graverob(cursor, timestamp)
def synchronize(self, cursor: Cursor, request: MessageType, **extras: Any) -> MessageType:
"""
Commit and rollback must be handled by caller. `extras` are keyword given by previosuly
supplied `extra_columns` at Database initialization.
"""
last_timestamp = request["timestamp"]
current_timestamp = self._counter()
response: MessageType = {"timestamp": current_timestamp, "table": {}}
for name, table in request["table"].items():
response["table"][name] = self._tables[name].synchronize(
cursor, last_timestamp, current_timestamp, table, **extras
)
return response
def schema(self) -> dict[str, Any]:
"""Returns the database's jsonschema."""
return {
"definitions": {
**ChainMap(*[table.schema_definition() for table in self._tables.values()]),
"table": {
"type": "object",
"properties": {
"modified": {"type": "array"},
"deleted": {"type": "array"},
},
"additionalProperties": False,
"required": ["modified", "deleted"],
},
},
"type": "object",
"properties": {
"timestamp": {"type": "integer", "minimum": 0, "maximum": 2 ** 31 - 1},
"table": {
"type": "object",
"properties": {
name: {
"allOf": [
{"$ref": "#/definitions/table"},
{
"properties": {
"modified": {
"items": {"$ref": f"#/definitions/{name}_modified"}
},
"deleted": {
"items": {"$ref": f"#/definitions/{name}_deleted"}
},
}
},
]
}
for name in self._tables.keys()
},
"additionalProperties": False,
"required": list(self._tables.keys()),
},
},
"additionalProperties": False,
"required": ["timestamp", "table"],
}
| 1.953125 | 2 |
calaccess_processed_elections/proxies/opencivicdata/elections/candidatecontests.py | ryanvmenezes/django-calaccess-processed-data | 10 | 105604 | <reponame>ryanvmenezes/django-calaccess-processed-data<filename>calaccess_processed_elections/proxies/opencivicdata/elections/candidatecontests.py
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Proxy models for augmenting our source data tables with methods useful for processing.
"""
from __future__ import unicode_literals
from opencivicdata.elections.models import (
CandidateContest,
CandidateContestPost,
CandidateContestSource
)
from calaccess_processed.proxies import OCDProxyModelMixin
from calaccess_processed.managers import BulkLoadSQLManager
from calaccess_processed_elections.managers import OCDCandidateContestManager
class OCDCandidateContestProxy(CandidateContest, OCDProxyModelMixin):
"""
A proxy on the OCD CandidateContest model with helper methods.
"""
objects = OCDCandidateContestManager()
copy_to_fields = (
('id',),
('name',),
('division_id',),
('election_id',),
('party_id',),
('previous_term_unexpired',),
('number_elected',),
('runoff_for_contest_id',),
('created_at',),
('updated_at',),
('extras',),
('locked_fields',),
)
class Meta:
"""
Make this a proxy model.
"""
app_label = "calaccess_processed_elections"
proxy = True
def get_parent(self):
"""
Returns the undecided contest that preceeded runoff_contest.
Returns None if it can't be found.
"""
# Get the contest's post (should only ever be one per contest)
post = self.posts.all()[0].post
# Then try getting the most recent contest for the same post
# that preceeds the runoff contest
try:
return CandidateContest.objects.filter(
posts__post=post,
election__date__lt=self.election.date,
).latest('election__date')
except CandidateContest.DoesNotExist:
return None
class OCDCandidateContestPostProxy(CandidateContestPost, OCDProxyModelMixin):
"""
A proxy on the OCD CandidateContestPost model.
"""
objects = BulkLoadSQLManager()
class Meta:
"""
Make this a proxy model.
"""
app_label = "calaccess_processed_elections"
proxy = True
class OCDCandidateContestSourceProxy(CandidateContestSource, OCDProxyModelMixin):
"""
A proxy on the OCD CandidateContestSource model.
"""
objects = BulkLoadSQLManager()
class Meta:
"""
Make this a proxy model.
"""
app_label = "calaccess_processed_elections"
proxy = True
| 1.359375 | 1 |
stein/samplers/abstract_stein_sampler.py | JamesBrofos/Stein | 3 | 105732 | <reponame>JamesBrofos/Stein
import numpy as np
import tensorflow as tf
from abc import abstractmethod
from ..utilities import convert_array_to_dictionary, convert_dictionary_to_array
class AbstractSteinSampler:
"""Abstract Stein Sampler Class
This class implements the algorithm from the paper "Stein Variational
Gradient Descent: A General Purpose Bayesian Inference Algorithm" by Liu and
Wang. This algorithm provides a mechanism for sampling from arbitrary
distributions provided that the gradient of the distribution can be computed
with respect to the input.
The Stein variational gradient descent algorithm seeks to identify the
optimal perturbation direction for a set of particles that will be
iteratively transformed such that the empirical distribution of the
particles can be seen to approximate a sample from the distribution. This is
achieved by minimizing the KL-divergence between the samples and the target
distribution, the optimal decrease direction for which can be obtained in
closed-form and approximated via sampling. In particular, we compute the
direction of greatest decrease subject to a set of functions of bounded norm
within a reproducing kernel Hilbert space (RKHS).
"""
def __init__(self, n_particles, log_p, theta=None):
"""Initialize the parameters of the abstract Stein sampler object.
n_particles (int): The number of particles to use in the algorithm.
This is equivalently the number of samples to generate from the
target distribution.
log_p (TensorFlow tensor): A TensorFlow object corresponding to the
log-posterior distribution from which parameters wish to be
sampled. We only need to define the log-posterior up to an
addative constant since we'll simply take the gradient with
respect to the inputs and this term will vanish.
theta (numpy array, optional): An optional parameter corresponding
to the initial values of the particles. The dimension of this
array (if it is provided) should be the number of particles by
the number of random variables (parameters) to be sampled. If
this value is not provided, then the initial particles will be
generated by sampling from a multivariate standard normal
distribution.
"""
# Number of particles.
self.n_particles = n_particles
# Construct a TensorFlow session.
self.sess = tf.Session()
self.model_vars = tf.get_collection(
tf.GraphKeys.TRAINABLE_VARIABLES, "model"
)
# Create class variables for the log-posterior and the gradient of the
# log-posterior with respect to model parameters.
self.log_p = log_p
self.grad_log_p = tf.gradients(self.log_p, self.model_vars)
# If particles are provided, then use them. Otherwise, particles are
# initialized by sampling from a standard normal distribution. This
# latter method works well for relatively simple models, but better
# initialization is required for complex distributions such as those
# that are parametrized by neural networks.
#
# Notice that `theta` is a dictionary that maps model parameters to a
# matrix representing the value of that parameter for each of the
# particles.
if theta is not None:
self.theta = theta
else:
self.theta = {
v: np.random.normal(
size=[self.n_particles] + v.get_shape().as_list()
) * 0.01
for v in self.model_vars
}
def compute_phi(self, theta_array, grads_array):
"""Assuming a reproducing kernel Hilbert space with associated kernel,
this function computes the optimal perturbation in the particles under
functions in the unit ball under the norm of the RKHS. This perturbation
can be regarded as the direction that will maximally decrease the
KL-divergence between the empirical distribution of the particles and
the target distribution.
Parameters:
theta_array (numpy array): A two-dimensional matrix with dimensions
equal to the number of particles by the number of parameters.
This is the matrix representation of the particles.
grads_array (numpy array): A two-dimensional matrix with dimensions
equal to the number of particles by the number of parameters.
This is the matrix representation of the gradient of the
log-posterior with respect to the particles.
Returns:
Numpy array: A two-dimensional matrix with dimensions equal to the
number of particles by the number of parameters. This is the
update value corresponding to the optimal perturbation direction
given by Stein variational gradient descent.
"""
# Extract the number of particles and number of parameters.
n_particles, n_params = grads_array.shape
# Compute the kernel matrices and gradient with respect to the
# particles.
K, dK = self.kernel.kernel_and_grad(theta_array)
return (K.dot(grads_array) + dK) / n_particles
def update_particles(self, grads_array):
"""Internal method that computes the optimal perturbation direction
given the current set of particles and the gradient of the
log-posterior. Notice that this method applies the gradient descent
update and normalizes the gradient to have a given norm. Computation of
the optimal perturbation direction is broken out into the method
`compute_phi`.
Parameters:
grads_array (numpy array): A numpy array mapping TensorFlow model
variables to the gradient of the log-posterior.
"""
# Convert both the particle dictionary and the gradient dictionary into
# vector representations.
theta_array, access_indices = convert_dictionary_to_array(self.theta)
# Compute optimal update direction.
phi = self.compute_phi(theta_array, grads_array)
# Normalize the gradient have be norm no larger than the desired amount.
phi *= 10. / max(10., np.linalg.norm(phi))
theta_array += self.gd.update(phi)
self.theta = convert_array_to_dictionary(theta_array, access_indices)
def function_posterior(self, func, feed_dict, axis=None):
"""This method computes a posterior distribution of a provided function
under the posterior distribution learned via Stein variational gradient
descent. This function can be used to produce posterior mean squared
errors and the posterior mean log-likelihood. This function includes an
optional parameter to compute the average of the function's posterior.
Parameters:
func (TensorFlow Tensor): A function to be executed in a TensorFlow
session. The output of the function are averaged across all of
the posterior samples under the Bayesian model.
feed_dict (dictionary): A dictionary mapping TensorFlow placeholders
to provided values.
average (int, optional): Determines whether or not the average
of the posterior distribution should be computed instead of
simply returning samples. If not none, then the average of the
output of the function is computed across the specified
dimension.
Returns:
Numpy array or Float: If `average` is true then the posterior mean
is returned. Otherwise, a numpy array of samples from the
function's posterior are returned.
"""
# Initialize a vector to store the value of the function for each particle.
dist = []
# Iterate over each particle and compute the value of the function for
# that posterior sample.
for i in range(self.n_particles):
feed_dict.update({v: x[i] for v, x in self.theta.items()})
dist.append(np.ravel(self.sess.run(func, feed_dict)))
# Convert to a numpy array.
dist = np.array(dist)
# Either return posterior samples of the input function or the posterior
# mean.
if axis is not None:
return dist.mean(axis=axis)
else:
return dist
# Bundles was here and wishes penguin the best of luck with his program
# :3
@abstractmethod
def train_on_batch(self, batch_feed):
"""Trains the Stein variational gradient descent algorithm on a given
batch of data (provided in the form of a TensorFlow feed dictionary).
This function computes the gradient of the log-likelihood for each
sampling particle and then computes the optimal perturbation using the
formula provided in the Stein variational gradient descent paper. Notice
that, like the particles themselves, the gradients are represented as
dictionaries that allow to keep the gradients distinct for each
parameter.
Notice that this function does not update the TensorFlow variables used
to define the model. This wouldn't make sense in the first place since
there are multiple particles corresponding to a random draw of those
variables. Instead, the `theta` class variable is a dictionary that
stores all of the particle values for each parameter in the model.
Parameters:
batch_feed (dict): A dictionary that maps TensorFlow placeholders to
provided values. For instance, this might be mappings of feature
and target placeholders to batch values. Notice that this feed
dictionary will be internally augmented to include the current
feed values for the model parameters for each particle.
"""
raise NotImplementedError()
| 2.859375 | 3 |
openerp/addons/l10n_lu/wizard/pdf_ext.py | ntiufalara/openerp7 | 3 | 105860 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
""" Copyright (c) 2003-2007 LOGILAB S.A. (Paris, FRANCE).
http://www.logilab.fr/ -- mailto:<EMAIL>
manipulate pdf and fdf files. pdftk recommended.
Notes regarding pdftk, pdf forms and fdf files (form definition file)
fields names can be extracted with:
pdftk orig.pdf generate_fdf output truc.fdf
to merge fdf and pdf:
pdftk orig.pdf fill_form test.fdf output result.pdf [flatten]
without flatten, one could further edit the resulting form.
with flatten, everything is turned into text.
"""
import os
from openerp import tools
HEAD="""%FDF-1.2
%\xE2\xE3\xCF\xD3
1 0 obj
<<
/FDF
<<
/Fields [
"""
TAIL="""]
>>
>>
endobj
trailer
<<
/Root 1 0 R
>>
%%EOF
"""
def output_field(f):
return "\xfe\xff" + "".join( [ "\x00"+c for c in f ] )
def extract_keys(lines):
keys = []
for line in lines:
if line.startswith('/V'):
pass
elif line.startswith('/T'):
key = line[7:-2]
key = ''.join(key.split('\x00'))
keys.append( key )
return keys
def write_field(out, key, value):
out.write("<<\n")
if value:
out.write("/V (%s)\n" %value)
else:
out.write("/V /\n")
out.write("/T (%s)\n" % output_field(key) )
out.write(">> \n")
def write_fields(out, fields):
out.write(HEAD)
for key in fields:
value = fields[key]
write_field(out, key, value)
out.write(TAIL)
def extract_keys_from_pdf(filename):
# what about using 'pdftk filename dump_data_fields' and parsing the output ?
os.system('pdftk %s generate_fdf output /tmp/toto.fdf' % filename)
lines = file('/tmp/toto.fdf').readlines()
return extract_keys(lines)
def fill_pdf(infile, outfile, fields):
write_fields(file('/tmp/toto.fdf', 'w'), fields)
os.system('pdftk %s fill_form /tmp/toto.fdf output %s flatten' % (infile, outfile))
def testfill_pdf(infile, outfile):
keys = extract_keys_from_pdf(infile)
fields = []
for key in keys:
fields.append( (key, key, '') )
fill_pdf(infile, outfile, fields)
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| 1.671875 | 2 |
frappe-bench/apps/erpnext/erpnext/shopping_cart/test_shopping_cart.py | Semicheche/foa_frappe_docker | 0 | 105988 | # Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors
# License: GNU General Public License v3. See license.txt
from __future__ import unicode_literals
import unittest
import frappe
from frappe.utils import nowdate, add_months
from erpnext.shopping_cart.cart import _get_cart_quotation, update_cart, get_party
from erpnext.tests.utils import create_test_contact_and_address
# test_dependencies = ['Payment Terms Template']
class TestShoppingCart(unittest.TestCase):
"""
Note:
Shopping Cart == Quotation
"""
def setUp(self):
frappe.set_user("Administrator")
create_test_contact_and_address()
self.enable_shopping_cart()
def tearDown(self):
frappe.set_user("Administrator")
self.disable_shopping_cart()
def test_get_cart_new_user(self):
self.login_as_new_user()
# test if lead is created and quotation with new lead is fetched
quotation = _get_cart_quotation()
self.assertEqual(quotation.quotation_to, "Customer")
self.assertEqual(quotation.contact_person,
frappe.db.get_value("Contact", dict(email_id="<EMAIL>")))
self.assertEqual(quotation.lead, None)
self.assertEqual(quotation.contact_email, frappe.session.user)
return quotation
def test_get_cart_customer(self):
self.login_as_customer()
# test if quotation with customer is fetched
quotation = _get_cart_quotation()
self.assertEqual(quotation.quotation_to, "Customer")
self.assertEqual(quotation.customer, "_Test Customer")
self.assertEqual(quotation.lead, None)
self.assertEqual(quotation.contact_email, frappe.session.user)
return quotation
def test_add_to_cart(self):
self.login_as_customer()
# remove from cart
self.remove_all_items_from_cart()
# add first item
update_cart("_Test Item", 1)
quotation = self.test_get_cart_customer()
self.assertEqual(quotation.get("items")[0].item_code, "_Test Item")
self.assertEqual(quotation.get("items")[0].qty, 1)
self.assertEqual(quotation.get("items")[0].amount, 10)
# add second item
update_cart("_Test Item 2", 1)
quotation = self.test_get_cart_customer()
self.assertEqual(quotation.get("items")[1].item_code, "_Test Item 2")
self.assertEqual(quotation.get("items")[1].qty, 1)
self.assertEqual(quotation.get("items")[1].amount, 20)
self.assertEqual(len(quotation.get("items")), 2)
def test_update_cart(self):
# first, add to cart
self.test_add_to_cart()
# update first item
update_cart("_Test Item", 5)
quotation = self.test_get_cart_customer()
self.assertEqual(quotation.get("items")[0].item_code, "_Test Item")
self.assertEqual(quotation.get("items")[0].qty, 5)
self.assertEqual(quotation.get("items")[0].amount, 50)
self.assertEqual(quotation.net_total, 70)
self.assertEqual(len(quotation.get("items")), 2)
def test_remove_from_cart(self):
# first, add to cart
self.test_add_to_cart()
# remove first item
update_cart("_Test Item", 0)
quotation = self.test_get_cart_customer()
self.assertEqual(quotation.get("items")[0].item_code, "_Test Item 2")
self.assertEqual(quotation.get("items")[0].qty, 1)
self.assertEqual(quotation.get("items")[0].amount, 20)
self.assertEqual(quotation.net_total, 20)
self.assertEqual(len(quotation.get("items")), 1)
def test_tax_rule(self):
self.login_as_customer()
quotation = self.create_quotation()
from erpnext.accounts.party import set_taxes
tax_rule_master = set_taxes(quotation.customer, "Customer", \
quotation.transaction_date, quotation.company, None, None, \
quotation.customer_address, quotation.shipping_address_name, 1)
self.assertEqual(quotation.taxes_and_charges, tax_rule_master)
self.assertEqual(quotation.total_taxes_and_charges, 1000.0)
self.remove_test_quotation(quotation)
def create_quotation(self):
quotation = frappe.new_doc("Quotation")
values = {
"doctype": "Quotation",
"quotation_to": "Customer",
"order_type": "Shopping Cart",
"customer": get_party(frappe.session.user).name,
"docstatus": 0,
"contact_email": frappe.session.user,
"selling_price_list": "_Test Price List Rest of the World",
"currency": "USD",
"taxes_and_charges" : "_Test Tax 1 - _TC",
"conversion_rate":1,
"transaction_date" : nowdate(),
"valid_till" : add_months(nowdate(), 1),
"items": [{
"item_code": "_Test Item",
"qty": 1
}],
"taxes": frappe.get_doc("Sales Taxes and Charges Template", "_Test Tax 1 - _TC").taxes,
"company": "_Test Company"
}
quotation.update(values)
quotation.insert(ignore_permissions=True)
return quotation
def remove_test_quotation(self, quotation):
frappe.set_user("Administrator")
quotation.delete()
# helper functions
def enable_shopping_cart(self):
settings = frappe.get_doc("Shopping Cart Settings", "Shopping Cart Settings")
settings.update({
"enabled": 1,
"company": "_Test Company",
"default_customer_group": "_Test Customer Group",
"quotation_series": "_T-Quotation-",
"price_list": "_Test Price List India"
})
# insert item price
if not frappe.db.get_value("Item Price", {"price_list": "_Test Price List India",
"item_code": "_Test Item"}):
frappe.get_doc({
"doctype": "Item Price",
"price_list": "_Test Price List India",
"item_code": "_Test Item",
"price_list_rate": 10
}).insert()
frappe.get_doc({
"doctype": "Item Price",
"price_list": "_Test Price List India",
"item_code": "_Test Item 2",
"price_list_rate": 20
}).insert()
settings.save()
frappe.local.shopping_cart_settings = None
def disable_shopping_cart(self):
settings = frappe.get_doc("Shopping Cart Settings", "Shopping Cart Settings")
settings.enabled = 0
settings.save()
frappe.local.shopping_cart_settings = None
def login_as_new_user(self):
self.create_user_if_not_exists("<EMAIL>")
frappe.set_user("<EMAIL>")
def login_as_customer(self):
self.create_user_if_not_exists("<EMAIL>",
"_Test Contact For _Test Customer")
frappe.set_user("<EMAIL>")
def remove_all_items_from_cart(self):
quotation = _get_cart_quotation()
quotation.flags.ignore_permissions=True
quotation.delete()
def create_user_if_not_exists(self, email, first_name = None):
if frappe.db.exists("User", email):
return
frappe.get_doc({
"doctype": "User",
"user_type": "Website User",
"email": email,
"send_welcome_email": 0,
"first_name": first_name or email.split("@")[0]
}).insert(ignore_permissions=True)
test_dependencies = ["Sales Taxes and Charges Template", "Price List", "Item Price", "Shipping Rule", "Currency Exchange",
"Customer Group", "Lead", "Customer", "Contact", "Address", "Item", "Tax Rule"]
| 1.609375 | 2 |
gaussian_methods.py | jelena-markovic/compare-selection | 0 | 106116 | import tempfile, os, glob
from scipy.stats import norm as ndist
from traitlets import (HasTraits,
Integer,
Unicode,
Float,
Integer,
Instance,
Dict,
Bool,
default)
import numpy as np
import regreg.api as rr
from selection.algorithms.lasso import lasso, lasso_full, lasso_full_modelQ
from selection.algorithms.sqrt_lasso import choose_lambda
from selection.truncated.gaussian import truncated_gaussian_old as TG
from selection.randomized.lasso import lasso as random_lasso_method, form_targets
from selection.randomized.modelQ import modelQ as randomized_modelQ
from utils import BHfilter
from selection.randomized.base import restricted_estimator
# Rpy
import rpy2.robjects as rpy
from rpy2.robjects import numpy2ri
methods = {}
class generic_method(HasTraits):
need_CV = False
selectiveR_method = False
wide_ok = True # ok for p>= n?
# Traits
q = Float(0.2)
method_name = Unicode('Generic method')
model_target = Unicode()
@classmethod
def setup(cls, feature_cov):
cls.feature_cov = feature_cov
def __init__(self, X, Y, l_theory, l_min, l_1se, sigma_reid):
(self.X,
self.Y,
self.l_theory,
self.l_min,
self.l_1se,
self.sigma_reid) = (X,
Y,
l_theory,
l_min,
l_1se,
sigma_reid)
def select(self):
raise NotImplementedError('abstract method')
@classmethod
def register(cls):
methods[cls.__name__] = cls
def selected_target(self, active, beta):
C = self.feature_cov[active]
Q = C[:,active]
return np.linalg.inv(Q).dot(C.dot(beta))
def full_target(self, active, beta):
return beta[active]
def get_target(self, active, beta):
if self.model_target not in ['selected', 'full']:
raise ValueError('Gaussian methods only have selected or full targets')
if self.model_target == 'full':
return self.full_target(active, beta)
else:
return self.selected_target(active, beta)
# Knockoff selection
class knockoffs_mf(generic_method):
method_name = Unicode('Knockoffs')
knockoff_method = Unicode('Second order')
model_target = Unicode("full")
def select(self):
try:
numpy2ri.activate()
rpy.r.assign('X', self.X)
rpy.r.assign('Y', self.Y)
rpy.r.assign('q', self.q)
rpy.r('V=knockoff.filter(X, Y, fdr=q)$selected')
rpy.r('if (length(V) > 0) {V = V-1}')
V = rpy.r('V')
numpy2ri.deactivate()
return np.asarray(V, np.int), np.asarray(V, np.int)
except:
return [], []
knockoffs_mf.register()
class knockoffs_sigma(generic_method):
factor_method = 'asdp'
method_name = Unicode('Knockoffs')
knockoff_method = Unicode("ModelX (asdp)")
model_target = Unicode("full")
@classmethod
def setup(cls, feature_cov):
cls.feature_cov = feature_cov
numpy2ri.activate()
# see if we've factored this before
have_factorization = False
if not os.path.exists('.knockoff_factorizations'):
os.mkdir('.knockoff_factorizations')
factors = glob.glob('.knockoff_factorizations/*npz')
for factor_file in factors:
factor = np.load(factor_file)
feature_cov_f = factor['feature_cov']
if ((feature_cov_f.shape == feature_cov.shape) and
(factor['method'] == cls.factor_method) and
np.allclose(feature_cov_f, feature_cov)):
have_factorization = True
print('found factorization: %s' % factor_file)
cls.knockoff_chol = factor['knockoff_chol']
if not have_factorization:
print('doing factorization')
cls.knockoff_chol = factor_knockoffs(feature_cov, cls.factor_method)
numpy2ri.deactivate()
def select(self):
numpy2ri.activate()
rpy.r.assign('chol_k', self.knockoff_chol)
rpy.r('''
knockoffs = function(X) {
mu = rep(0, ncol(X))
mu_k = X # sweep(X, 2, mu, "-") %*% SigmaInv_s
X_k = mu_k + matrix(rnorm(ncol(X) * nrow(X)), nrow(X)) %*%
chol_k
return(X_k)
}
''')
numpy2ri.deactivate()
try:
numpy2ri.activate()
rpy.r.assign('X', self.X)
rpy.r.assign('Y', self.Y)
rpy.r.assign('q', self.q)
rpy.r('V=knockoff.filter(X, Y, fdr=q, knockoffs=knockoffs)$selected')
rpy.r('if (length(V) > 0) {V = V-1}')
V = rpy.r('V')
numpy2ri.deactivate()
return np.asarray(V, np.int), np.asarray(V, np.int)
except:
return [], []
knockoffs_sigma.register()
def factor_knockoffs(feature_cov, method='asdp'):
numpy2ri.activate()
rpy.r.assign('Sigma', feature_cov)
rpy.r.assign('method', method)
rpy.r('''
# Compute the Cholesky -- from create.gaussian
diag_s = diag(switch(method, equi = create.solve_equi(Sigma),
sdp = create.solve_sdp(Sigma), asdp = create.solve_asdp(Sigma)))
if (is.null(dim(diag_s))) {
diag_s = diag(diag_s, length(diag_s))
}
SigmaInv_s = solve(Sigma, diag_s)
Sigma_k = 2 * diag_s - diag_s %*% SigmaInv_s
chol_k = chol(Sigma_k)
''')
knockoff_chol = np.asarray(rpy.r('chol_k'))
SigmaInv_s = np.asarray(rpy.r('SigmaInv_s'))
diag_s = np.asarray(rpy.r('diag_s'))
np.savez('.knockoff_factorizations/%s.npz' % (os.path.split(tempfile.mkstemp()[1])[1],),
method=method,
feature_cov=feature_cov,
knockoff_chol=knockoff_chol)
return knockoff_chol
class knockoffs_sigma_equi(knockoffs_sigma):
knockoff_method = Unicode('ModelX (equi)')
factor_method = 'equi'
knockoffs_sigma_equi.register()
class knockoffs_orig(generic_method):
wide_OK = False # requires at least n>p
method_name = Unicode("Knockoffs")
knockoff_method = Unicode('Candes & Barber')
model_target = Unicode('full')
def select(self):
try:
numpy2ri.activate()
rpy.r.assign('X', self.X)
rpy.r.assign('Y', self.Y)
rpy.r.assign('q', self.q)
rpy.r('V=knockoff.filter(X, Y, statistic=stat.glmnet_lambdadiff, fdr=q, knockoffs=create.fixed)$selected')
rpy.r('if (length(V) > 0) {V = V-1}')
V = rpy.r('V')
numpy2ri.deactivate()
V = np.asarray(V, np.int)
return V, V
except:
return [], []
knockoffs_orig.register()
class knockoffs_fixed(generic_method):
wide_OK = False # requires at least n>p
method_name = Unicode("Knockoffs")
knockoff_method = Unicode('Fixed')
model_target = Unicode('full')
def select(self):
try:
numpy2ri.activate()
rpy.r.assign('X', self.X)
rpy.r.assign('Y', self.Y)
rpy.r.assign('q', self.q)
rpy.r('V=knockoff.filter(X, Y, fdr=q, knockoffs=create.fixed)$selected')
rpy.r('if (length(V) > 0) {V = V-1}')
V = rpy.r('V')
numpy2ri.deactivate()
return np.asarray(V, np.int), np.asarray(V, np.int)
except:
return [], []
knockoffs_fixed.register()
# Liu, Markovic, Tibs selection
class parametric_method(generic_method):
confidence = Float(0.95)
def __init__(self, X, Y, l_theory, l_min, l_1se, sigma_reid):
generic_method.__init__(self, X, Y, l_theory, l_min, l_1se, sigma_reid)
self._fit = False
def select(self):
if not self._fit:
self.method_instance.fit()
self._fit = True
active_set, pvalues = self.generate_pvalues()
if len(pvalues) > 0:
selected = [active_set[i] for i in BHfilter(pvalues, q=self.q)]
return selected, active_set
else:
return [], active_set
class liu_theory(parametric_method):
sigma_estimator = Unicode('relaxed')
method_name = Unicode("Liu")
lambda_choice = Unicode("theory")
model_target = Unicode("full")
dispersion = Float(0.)
def __init__(self, X, Y, l_theory, l_min, l_1se, sigma_reid):
parametric_method.__init__(self, X, Y, l_theory, l_min, l_1se, sigma_reid)
n, p = X.shape
if n < p:
self.method_name = 'ROSI'
self.lagrange = l_theory * np.ones(X.shape[1])
@property
def method_instance(self):
if not hasattr(self, "_method_instance"):
n, p = self.X.shape
self._method_instance = lasso_full.gaussian(self.X, self.Y, self.lagrange * np.sqrt(n))
return self._method_instance
def generate_summary(self, compute_intervals=False):
if not self._fit:
self.method_instance.fit()
self._fit = True
X, Y, lagrange, L = self.X, self.Y, self.lagrange, self.method_instance
n, p = X.shape
if len(L.active) > 0:
if self.sigma_estimator == 'reid' and n < p:
dispersion = self.sigma_reid**2
elif self.dispersion != 0:
dispersion = self.dispersion
else:
dispersion = None
S = L.summary(compute_intervals=compute_intervals, dispersion=dispersion)
return S
def generate_pvalues(self):
S = self.generate_summary(compute_intervals=False)
if S is not None:
active_set = np.array(S['variable'])
pvalues = np.asarray(S['pval'])
return active_set, pvalues
else:
return [], []
def generate_intervals(self):
S = self.generate_summary(compute_intervals=True)
if S is not None:
active_set = np.array(S['variable'])
lower, upper = np.asarray(S['lower_confidence']), np.asarray(S['upper_confidence'])
return active_set, lower, upper
else:
return [], [], []
liu_theory.register()
class liu_aggressive(liu_theory):
lambda_choice = Unicode("aggressive")
def __init__(self, X, Y, l_theory, l_min, l_1se, sigma_reid):
liu_theory.__init__(self, X, Y, l_theory, l_min, l_1se, sigma_reid)
self.lagrange = l_theory * np.ones(X.shape[1]) * 0.8
liu_aggressive.register()
class liu_modelQ_pop_aggressive(liu_aggressive):
method_name = Unicode("Liu (ModelQ population)")
@property
def method_instance(self):
if not hasattr(self, "_method_instance"):
n, p = self.X.shape
self._method_instance = lasso_full_modelQ(self.feature_cov * n, self.X, self.Y, self.lagrange * np.sqrt(n))
return self._method_instance
liu_modelQ_pop_aggressive.register()
class liu_modelQ_semi_aggressive(liu_aggressive):
method_name = Unicode("Liu (ModelQ semi-supervised)")
B = 10000 # how many samples to use to estimate E[XX^T]
@classmethod
def setup(cls, feature_cov):
cls.feature_cov = feature_cov
cls._chol = np.linalg.cholesky(feature_cov)
@property
def method_instance(self):
if not hasattr(self, "_method_instance"):
# draw sample of X for semi-supervised method
_chol = self._chol
p = _chol.shape[0]
Q = 0
batch_size = int(self.B/10)
for _ in range(10):
X_semi = np.random.standard_normal((batch_size, p)).dot(_chol.T)
Q += X_semi.T.dot(X_semi)
Q += self.X.T.dot(self.X)
Q /= (10 * batch_size + self.X.shape[0])
n, p = self.X.shape
self._method_instance = lasso_full_modelQ(Q * self.X.shape[0], self.X, self.Y, self.lagrange * np.sqrt(n))
return self._method_instance
liu_modelQ_semi_aggressive.register()
class liu_sparseinv_aggressive(liu_aggressive):
method_name = Unicode("ROSI")
"""
Force the use of the debiasing matrix.
"""
@property
def method_instance(self):
if not hasattr(self, "_method_instance"):
n, p = self.X.shape
self._method_instance = lasso_full.gaussian(self.X, self.Y, self.lagrange * np.sqrt(n))
self._method_instance.sparse_inverse = True
return self._method_instance
liu_sparseinv_aggressive.register()
class liu_aggressive_reid(liu_aggressive):
sigma_estimator = Unicode('Reid')
pass
liu_aggressive_reid.register()
class liu_CV(liu_theory):
need_CV = True
lambda_choice = Unicode("CV")
def __init__(self, X, Y, l_theory, l_min, l_1se, sigma_reid):
liu_theory.__init__(self, X, Y, l_theory, l_min, l_1se, sigma_reid)
self.lagrange = l_min * np.ones(X.shape[1])
liu_CV.register()
class liu_1se(liu_theory):
need_CV = True
lambda_choice = Unicode("1se")
def __init__(self, X, Y, l_theory, l_min, l_1se, sigma_reid):
liu_theory.__init__(self, X, Y, l_theory, l_min, l_1se, sigma_reid)
self.lagrange = l_1se * np.ones(X.shape[1])
liu_1se.register()
class liu_sparseinv_1se(liu_1se):
method_name = Unicode("ROSI")
"""
Force the use of the debiasing matrix.
"""
@property
def method_instance(self):
if not hasattr(self, "_method_instance"):
n, p = self.X.shape
self._method_instance = lasso_full.gaussian(self.X, self.Y, self.lagrange * np.sqrt(n))
self._method_instance.sparse_inverse = True
return self._method_instance
liu_sparseinv_1se.register()
class liu_sparseinv_1se_known(liu_1se):
method_name = Unicode("ROSI - known")
dispersion = Float(1.)
"""
Force the use of the debiasing matrix.
"""
@property
def method_instance(self):
if not hasattr(self, "_method_instance"):
n, p = self.X.shape
self._method_instance = lasso_full.gaussian(self.X, self.Y, self.lagrange * np.sqrt(n))
self._method_instance.sparse_inverse = True
return self._method_instance
liu_sparseinv_1se_known.register()
class liu_R_theory(liu_theory):
selectiveR_method = True
method_name = Unicode("Liu (R code)")
def generate_pvalues(self):
try:
numpy2ri.activate()
rpy.r.assign('X', self.X)
rpy.r.assign('y', self.Y)
rpy.r.assign('sigma_reid', self.sigma_reid)
rpy.r('y = as.numeric(y)')
rpy.r.assign('lam', self.lagrange[0])
rpy.r('''
p = ncol(X);
n = nrow(X);
sigma_est = 1.
if (p >= n) {
sigma_est = sigma_reid
} else {
sigma_est = sigma(lm(y ~ X - 1))
}
penalty_factor = rep(1, p);
lam = lam / sqrt(n); # lambdas are passed a sqrt(n) free from python code
soln = selectiveInference:::solve_problem_glmnet(X, y, lam, penalty_factor=penalty_factor, loss="ls")
PVS = selectiveInference:::inference_group_lasso(X, y,
soln, groups=1:ncol(X),
lambda=lam, penalty_factor=penalty_factor,
sigma_est, loss="ls", algo="Q",
construct_ci=FALSE)
active_vars=PVS$active_vars - 1 # for 0-based
pvalues = PVS$pvalues
''')
pvalues = np.asarray(rpy.r('pvalues'))
active_set = np.asarray(rpy.r('active_vars'))
numpy2ri.deactivate()
if len(active_set) > 0:
return active_set, pvalues
else:
return [], []
except:
return [np.nan], [np.nan] # some R failure occurred
liu_R_theory.register()
class liu_R_aggressive(liu_R_theory):
lambda_choice = Unicode('aggressive')
def __init__(self, X, Y, l_theory, l_min, l_1se, sigma_reid):
liu_R_theory.__init__(self, X, Y, l_theory, l_min, l_1se, sigma_reid)
self.lagrange = l_theory * np.ones(X.shape[1]) * 0.8
liu_R_aggressive.register()
class lee_full_R_theory(liu_theory):
wide_OK = False # requires at least n>p
method_name = Unicode("Lee (R code)")
selectiveR_method = True
def generate_pvalues(self):
numpy2ri.activate()
rpy.r.assign('x', self.X)
rpy.r.assign('y', self.Y)
rpy.r('y = as.numeric(y)')
rpy.r.assign('sigma_reid', self.sigma_reid)
rpy.r.assign('lam', self.lagrange[0])
rpy.r('''
sigma_est=sigma_reid
n = nrow(x);
gfit = glmnet(x, y, standardize=FALSE, intercept=FALSE)
lam = lam / sqrt(n); # lambdas are passed a sqrt(n) free from python code
if (lam < max(abs(t(x) %*% y) / n)) {
beta = coef(gfit, x=x, y=y, s=lam, exact=TRUE)[-1]
out = fixedLassoInf(x, y, beta, lam*n, sigma=sigma_est, type='full', intercept=FALSE)
active_vars=out$vars - 1 # for 0-based
pvalues = out$pv
} else {
pvalues = NULL
active_vars = numeric(0)
}
''')
pvalues = np.asarray(rpy.r('pvalues'))
active_set = np.asarray(rpy.r('active_vars'))
numpy2ri.deactivate()
if len(active_set) > 0:
return active_set, pvalues
else:
return [], []
lee_full_R_theory.register()
class lee_full_R_aggressive(lee_full_R_theory):
lambda_choice = Unicode("aggressive")
def __init__(self, X, Y, l_theory, l_min, l_1se, sigma_reid):
lee_full_R_theory.__init__(self, X, Y, l_theory, l_min, l_1se, sigma_reid)
self.lagrange = l_theory * np.ones(X.shape[1]) * 0.8
lee_full_R_aggressive.register()
# Unrandomized selected
class lee_theory(parametric_method):
model_target = Unicode("selected")
method_name = Unicode("Lee")
def __init__(self, X, Y, l_theory, l_min, l_1se, sigma_reid):
parametric_method.__init__(self, X, Y, l_theory, l_min, l_1se, sigma_reid)
self.lagrange = l_theory * np.ones(X.shape[1])
@property
def method_instance(self):
if not hasattr(self, "_method_instance"):
n, p = self.X.shape
self._method_instance = lasso.gaussian(self.X, self.Y, self.lagrange * np.sqrt(n))
return self._method_instance
def generate_summary(self, compute_intervals=False):
if not self._fit:
self.method_instance.fit()
self._fit = True
X, Y, lagrange, L = self.X, self.Y, self.lagrange, self.method_instance
if len(L.active) > 0:
S = L.summary(compute_intervals=compute_intervals, alternative='onesided')
return S
def generate_pvalues(self):
S = self.generate_summary(compute_intervals=False)
if S is not None:
active_set = np.array(S['variable'])
pvalues = np.asarray(S['pval'])
return active_set, pvalues
else:
return [], []
def generate_intervals(self):
S = self.generate_summary(compute_intervals=True)
if S is not None:
active_set = np.array(S['variable'])
lower, upper = np.asarray(S['lower_confidence']), np.asarray(S['upper_confidence'])
return active_set, lower, upper
else:
return [], [], []
def point_estimator(self):
X, Y, lagrange, L = self.X, self.Y, self.lagrange, self.method_instance
n, p = X.shape
beta_full = np.zeros(p)
if self.estimator == "LASSO":
beta_full[L.active] = L.soln
else:
beta_full[L.active] = L.onestep_estimator
return L.active, beta_full
lee_theory.register()
class lee_CV(lee_theory):
need_CV = True
lambda_choice = Unicode("CV")
def __init__(self, X, Y, l_theory, l_min, l_1se, sigma_reid):
lee_theory.__init__(self, X, Y, l_theory, l_min, l_1se, sigma_reid)
self.lagrange = l_min * np.ones(X.shape[1])
lee_CV.register()
class lee_1se(lee_theory):
need_CV = True
lambda_choice = Unicode("1se")
def __init__(self, X, Y, l_theory, l_min, l_1se, sigma_reid):
lee_theory.__init__(self, X, Y, l_theory, l_min, l_1se, sigma_reid)
self.lagrange = l_1se * np.ones(X.shape[1])
lee_1se.register()
class lee_aggressive(lee_theory):
lambda_choice = Unicode("aggressive")
def __init__(self, X, Y, l_theory, l_min, l_1se, sigma_reid):
lee_theory.__init__(self, X, Y, l_theory, l_min, l_1se, sigma_reid)
self.lagrange = 0.8 * l_theory * np.ones(X.shape[1])
lee_aggressive.register()
class lee_weak(lee_theory):
lambda_choice = Unicode("weak")
def __init__(self, X, Y, l_theory, l_min, l_1se, sigma_reid):
lee_theory.__init__(self, X, Y, l_theory, l_min, l_1se, sigma_reid)
self.lagrange = 2 * l_theory * np.ones(X.shape[1])
lee_weak.register()
class sqrt_lasso(parametric_method):
method_name = Unicode('SqrtLASSO')
kappa = Float(0.7)
def __init__(self, X, Y, l_theory, l_min, l_1se, sigma_reid):
parametric_method.__init__(self, X, Y, l_theory, l_min, l_1se, sigma_reid)
self.lagrange = self.kappa * choose_lambda(X)
@property
def method_instance(self):
if not hasattr(self, "_method_instance"):
self._method_instance = lasso.sqrt_lasso(self.X, self.Y, self.lagrange)
return self._method_instance
def generate_summary(self, compute_intervals=False):
X, Y, lagrange, L = self.X, self.Y, self.lagrange, self.method_instance
n, p = X.shape
X = X / np.sqrt(n)
if len(L.active) > 0:
S = L.summary(compute_intervals=compute_intervals, alternative='onesided')
return S
def generate_pvalues(self):
S = self.generate_summary(compute_intervals=False)
if S is not None:
active_set = np.array(S['variable'])
pvalues = np.asarray(S['pval'])
return active_set, pvalues
else:
return [], []
def generate_intervals(self):
S = self.generate_summary(compute_intervals=True)
if S is not None:
active_set = np.array(S['variable'])
lower, upper = np.asarray(S['lower_confidence']), np.asarray(S['upper_confidence'])
return active_set, lower, upper
else:
return [], [], []
sqrt_lasso.register()
# Randomized selected
class randomized_lasso(parametric_method):
method_name = Unicode("Randomized LASSO")
model_target = Unicode("selected")
lambda_choice = Unicode("theory")
randomizer_scale = Float(1)
ndraw = 10000
burnin = 1000
def __init__(self, X, Y, l_theory, l_min, l_1se, sigma_reid):
parametric_method.__init__(self, X, Y, l_theory, l_min, l_1se, sigma_reid)
self.lagrange = l_theory * np.ones(X.shape[1])
@property
def method_instance(self):
if not hasattr(self, "_method_instance"):
n, p = self.X.shape
mean_diag = np.mean((self.X ** 2).sum(0))
self._method_instance = random_lasso_method.gaussian(self.X,
self.Y,
feature_weights = self.lagrange * np.sqrt(n),
ridge_term=np.std(self.Y) * np.sqrt(mean_diag) / np.sqrt(n),
randomizer_scale=self.randomizer_scale * np.std(self.Y) * np.sqrt(n))
return self._method_instance
def generate_summary(self, compute_intervals=False):
X, Y, lagrange, rand_lasso = self.X, self.Y, self.lagrange, self.method_instance
n, p = X.shape
if not self._fit:
signs = self.method_instance.fit()
self._fit = True
signs = rand_lasso.fit()
active_set = np.nonzero(signs)[0]
active = signs != 0
# estimates sigma
# JM: for transparency it's better not to have this digged down in the code
X_active = X[:,active_set]
rpy.r.assign('X_active', X_active)
rpy.r.assign('Y', Y)
rpy.r('X_active=as.matrix(X_active)')
rpy.r('Y=as.numeric(Y)')
rpy.r('sigma_est = sigma(lm(Y~ X_active - 1))')
dispersion = rpy.r('sigma_est')
print("dispersion (sigma est for Python)", dispersion)
(observed_target,
cov_target,
cov_target_score,
alternatives) = form_targets(self.model_target,
rand_lasso.loglike,
rand_lasso._W,
active,
**{'dispersion': dispersion})
if active.sum() > 0:
_, pvalues, intervals = rand_lasso.summary(observed_target,
cov_target,
cov_target_score,
alternatives,
level=0.9,
ndraw=self.ndraw,
burnin=self.burnin,
compute_intervals=compute_intervals)
return active_set, pvalues, intervals
else:
return [], [], []
def generate_pvalues(self, compute_intervals=False):
active_set, pvalues, _ = self.generate_summary(compute_intervals=compute_intervals)
if len(active_set) > 0:
return active_set, pvalues
else:
return [], []
def generate_intervals(self):
active_set, _, intervals = self.generate_summary(compute_intervals=True)
if len(active_set) > 0:
return active_set, intervals[:,0], intervals[:,1]
else:
return [], [], []
class randomized_lasso_CV(randomized_lasso):
need_CV = True
lambda_choice = Unicode("CV")
def __init__(self, X, Y, l_theory, l_min, l_1se, sigma_reid):
randomized_lasso.__init__(self, X, Y, l_theory, l_min, l_1se, sigma_reid)
self.lagrange = l_min * np.ones(X.shape[1])
class randomized_lasso_1se(randomized_lasso):
need_CV = True
lambda_choice = Unicode("1se")
def __init__(self, X, Y, l_theory, l_min, l_1se, sigma_reid):
randomized_lasso.__init__(self, X, Y, l_theory, l_min, l_1se, sigma_reid)
self.lagrange = l_1se * np.ones(X.shape[1])
randomized_lasso.register(), randomized_lasso_CV.register(), randomized_lasso_1se.register()
# More aggressive lambda choice
class randomized_lasso_aggressive(randomized_lasso):
lambda_choice = Unicode("aggressive")
def __init__(self, X, Y, l_theory, l_min, l_1se, sigma_reid):
randomized_lasso.__init__(self, X, Y, l_theory, l_min, l_1se, sigma_reid)
self.lagrange = l_theory * np.ones(X.shape[1]) * 0.8
class randomized_lasso_aggressive_half(randomized_lasso):
lambda_choice = Unicode('aggressive')
randomizer_scale = Float(0.5)
def __init__(self, X, Y, l_theory, l_min, l_1se, sigma_reid):
randomized_lasso.__init__(self, X, Y, l_theory, l_min, l_1se, sigma_reid)
self.lagrange = l_theory * np.ones(X.shape[1]) * 0.8
class randomized_lasso_weak_half(randomized_lasso):
lambda_choice = Unicode('weak')
randomizer_scale = Float(0.5)
def __init__(self, X, Y, l_theory, l_min, l_1se, sigma_reid):
randomized_lasso.__init__(self, X, Y, l_theory, l_min, l_1se, sigma_reid)
self.lagrange = l_theory * np.ones(X.shape[1]) * 2.
randomized_lasso_weak_half.register()
class randomized_lasso_aggressive_quarter(randomized_lasso):
randomizer_scale = Float(0.25)
def __init__(self, X, Y, l_theory, l_min, l_1se, sigma_reid):
randomized_lasso.__init__(self, X, Y, l_theory, l_min, l_1se, sigma_reid)
self.lagrange = l_theory * np.ones(X.shape[1]) * 0.8
randomized_lasso_aggressive.register(), randomized_lasso_aggressive_half.register(), randomized_lasso_aggressive_quarter.register()
# Randomized selected smaller randomization
class randomized_lasso_half(randomized_lasso):
randomizer_scale = Float(0.5)
pass
class randomized_lasso_half_CV(randomized_lasso_CV):
need_CV = True
randomizer_scale = Float(0.5)
pass
class randomized_lasso_half_1se(randomized_lasso_1se):
need_CV = True
randomizer_scale = Float(0.5)
pass
randomized_lasso_half.register(), randomized_lasso_half_CV.register(), randomized_lasso_half_1se.register()
# selective mle
class randomized_lasso_mle(randomized_lasso_aggressive_half):
method_name = Unicode("Randomized MLE")
randomizer_scale = Float(0.5)
model_target = Unicode("selected")
@property
def method_instance(self):
if not hasattr(self, "_method_instance"):
n, p = self.X.shape
self._method_instance = randomized_modelQ(self.feature_cov * n,
self.X,
self.Y,
self.lagrange * np.sqrt(n),
randomizer_scale=self.randomizer_scale * np.std(self.Y) * np.sqrt(n))
return self._method_instance
def generate_pvalues(self):
X, Y, lagrange, rand_lasso = self.X, self.Y, self.lagrange, self.method_instance
n, p = X.shape
if not self._fit:
signs = self.method_instance.fit()
self._fit = True
signs = rand_lasso.fit()
active_set = np.nonzero(signs)[0]
Z, pvalues = rand_lasso.selective_MLE(target=self.model_target,
solve_args={'min_iter':1000, 'tol':1.e-12})[-3:-1]
print(pvalues, 'pvalues')
print(Z, 'Zvalues')
if len(pvalues) > 0:
return active_set, pvalues
else:
return [], []
randomized_lasso_mle.register()
# Using modelQ for randomized
class randomized_lasso_half_pop_1se(randomized_lasso_half_1se):
method_name = Unicode("Randomized ModelQ (pop)")
randomizer_scale = Float(0.5)
nsample = 15000
burnin = 2000
@property
def method_instance(self):
if not hasattr(self, "_method_instance"):
n, p = self.X.shape
self._method_instance = randomized_modelQ(self.feature_cov * n,
self.X,
self.Y,
self.lagrange * np.sqrt(n),
randomizer_scale=self.randomizer_scale * np.std(self.Y) * np.sqrt(n))
return self._method_instance
class randomized_lasso_half_semi_1se(randomized_lasso_half_1se):
method_name = Unicode("Randomized ModelQ (semi-supervised)")
randomizer_scale = Float(0.5)
B = 10000
nsample = 15000
burnin = 2000
@classmethod
def setup(cls, feature_cov):
cls.feature_cov = feature_cov
cls._chol = np.linalg.cholesky(feature_cov)
@property
def method_instance(self):
if not hasattr(self, "_method_instance"):
# draw sample of X for semi-supervised method
_chol = self._chol
p = _chol.shape[0]
Q = 0
batch_size = int(self.B/10)
for _ in range(10):
X_semi = np.random.standard_normal((batch_size, p)).dot(_chol.T)
Q += X_semi.T.dot(X_semi)
Q += self.X.T.dot(self.X)
Q /= (10 * batch_size + self.X.shape[0])
n, p = self.X.shape
self._method_instance = randomized_modelQ(Q * n,
self.X,
self.Y,
self.lagrange * np.sqrt(n),
randomizer_scale=self.randomizer_scale * np.std(self.Y) * np.sqrt(n))
return self._method_instance
randomized_lasso_half_pop_1se.register(), randomized_lasso_half_semi_1se.register()
# Using modelQ for randomized
class randomized_lasso_half_pop_aggressive(randomized_lasso_aggressive_half):
method_name = Unicode("Randomized ModelQ (pop)")
randomizer_scale = Float(0.5)
nsample = 10000
burnin = 2000
@property
def method_instance(self):
if not hasattr(self, "_method_instance"):
n, p = self.X.shape
self._method_instance = randomized_modelQ(self.feature_cov * n,
self.X,
self.Y,
self.lagrange * np.sqrt(n),
randomizer_scale=self.randomizer_scale * np.std(self.Y) * np.sqrt(n))
return self._method_instance
class randomized_lasso_half_semi_aggressive(randomized_lasso_aggressive_half):
method_name = Unicode("Randomized ModelQ (semi-supervised)")
randomizer_scale = Float(0.25)
B = 10000
nsample = 15000
burnin = 2000
@classmethod
def setup(cls, feature_cov):
cls.feature_cov = feature_cov
cls._chol = np.linalg.cholesky(feature_cov)
@property
def method_instance(self):
if not hasattr(self, "_method_instance"):
# draw sample of X for semi-supervised method
_chol = self._chol
p = _chol.shape[0]
Q = 0
batch_size = int(self.B/10)
for _ in range(10):
X_semi = np.random.standard_normal((batch_size, p)).dot(_chol.T)
Q += X_semi.T.dot(X_semi)
Q += self.X.T.dot(self.X)
Q /= (10 * batch_size + self.X.shape[0])
n, p = self.X.shape
self._method_instance = randomized_modelQ(Q * n,
self.X,
self.Y,
self.lagrange * np.sqrt(n),
randomizer_scale=self.randomizer_scale * np.std(self.Y) * np.sqrt(n))
return self._method_instance
randomized_lasso_half_pop_aggressive.register(), randomized_lasso_half_semi_aggressive.register()
# Randomized sqrt selected
class randomized_sqrtlasso(randomized_lasso):
method_name = Unicode("Randomized SqrtLASSO")
model_target = Unicode("selected")
randomizer_scale = Float(1)
kappa = Float(0.7)
@property
def method_instance(self):
if not hasattr(self, "_method_instance"):
n, p = self.X.shape
lagrange = np.ones(p) * choose_lambda(self.X) * self.kappa
self._method_instance = random_lasso_method.gaussian(self.X,
self.Y,
lagrange,
randomizer_scale=self.randomizer_scale * np.std(self.Y))
return self._method_instance
def generate_summary(self, compute_intervals=False):
X, Y, rand_lasso = self.X, self.Y, self.method_instance
n, p = X.shape
X = X / np.sqrt(n)
if not self._fit:
self.method_instance.fit()
self._fit = True
signs = self.method_instance.selection_variable['sign']
active_set = np.nonzero(signs)[0]
active = signs != 0
(observed_target,
cov_target,
cov_target_score,
alternatives) = form_targets(self.model_target,
rand_lasso.loglike,
rand_lasso._W,
active)
_, pvalues, intervals = rand_lasso.summary(observed_target,
cov_target,
cov_target_score,
alternatives,
ndraw=self.ndraw,
burnin=self.burnin,
compute_intervals=compute_intervals)
if len(pvalues) > 0:
return active_set, pvalues, intervals
else:
return [], [], []
class randomized_sqrtlasso_half(randomized_sqrtlasso):
randomizer_scale = Float(0.5)
pass
randomized_sqrtlasso.register(), randomized_sqrtlasso_half.register()
class randomized_sqrtlasso_bigger(randomized_sqrtlasso):
kappa = Float(0.8)
pass
class randomized_sqrtlasso_bigger_half(randomized_sqrtlasso):
kappa = Float(0.8)
randomizer_scale = Float(0.5)
pass
randomized_sqrtlasso_bigger.register(), randomized_sqrtlasso_bigger_half.register()
# Randomized full
class randomized_lasso_full(randomized_lasso):
model_target = Unicode('full')
def __init__(self, X, Y, l_theory, l_min, l_1se, sigma_reid):
randomized_lasso.__init__(self, X, Y, l_theory, l_min, l_1se, sigma_reid)
self.lagrange = l_theory * np.ones(X.shape[1])
class randomized_lasso_full_CV(randomized_lasso_full):
need_CV = True
lambda_choice = Unicode("CV")
def __init__(self, X, Y, l_theory, l_min, l_1se, sigma_reid):
randomized_lasso_full.__init__(self, X, Y, l_theory, l_min, l_1se, sigma_reid)
self.lagrange = l_min * np.ones(X.shape[1])
class randomized_lasso_full_1se(randomized_lasso_full):
need_CV = True
lambda_choice = Unicode("1se")
def __init__(self, X, Y, l_theory, l_min, l_1se, sigma_reid):
randomized_lasso_full.__init__(self, X, Y, l_theory, l_min, l_1se, sigma_reid)
self.lagrange = l_1se * np.ones(X.shape[1])
randomized_lasso_full.register(), randomized_lasso_full_CV.register(), randomized_lasso_full_1se.register()
# Randomized full smaller randomization
class randomized_lasso_full_half(randomized_lasso_full):
randomizer_scale = Float(0.5)
pass
class randomized_lasso_full_half_CV(randomized_lasso_full_CV):
randomizer_scale = Float(0.5)
pass
class randomized_lasso_full_half_1se(randomized_lasso_full_1se):
need_CV = True
randomizer_scale = Float(0.5)
pass
randomized_lasso_full_half.register(), randomized_lasso_full_half_CV.register(), randomized_lasso_full_half_1se.register()
# Aggressive choice of lambda
class randomized_lasso_full_aggressive(randomized_lasso_full):
lambda_choice = Unicode("aggressive")
def __init__(self, X, Y, l_theory, l_min, l_1se, sigma_reid):
randomized_lasso_full.__init__(self, X, Y, l_theory, l_min, l_1se, sigma_reid)
self.lagrange = l_theory * np.ones(X.shape[1]) * 0.8
class randomized_lasso_full_aggressive_half(randomized_lasso_full_aggressive):
randomizer_scale = Float(0.5)
pass
randomized_lasso_full_aggressive.register(), randomized_lasso_full_aggressive_half.register()
class randomized_lasso_R_theory(randomized_lasso):
method_name = Unicode("Randomized LASSO (R code)")
selective_Rmethod = True
def generate_summary(self, compute_intervals=False):
numpy2ri.activate()
rpy.r.assign('X', self.X)
rpy.r.assign('y', self.Y)
rpy.r('y = as.numeric(y)')
rpy.r.assign('q', self.q)
rpy.r.assign('lam', self.lagrange[0])
rpy.r.assign("randomizer_scale", self.randomizer_scale)
rpy.r.assign("compute_intervals", compute_intervals)
rpy.r('''
n = nrow(X)
p = ncol(X)
lam = lam * sqrt(n)
mean_diag = mean(apply(X^2, 2, sum))
ridge_term = sqrt(mean_diag) * sd(y) / sqrt(n)
result = randomizedLasso(X, y, lam, ridge_term=ridge_term,
noise_scale = randomizer_scale * sd(y) * sqrt(n), family='gaussian')
active_set = result$active_set
if (length(active_set)==0){
active_set = -1
} else{
sigma_est = sigma(lm(y ~ X[,active_set] - 1))
cat("sigma est for R", sigma_est,"\n")
targets = selectiveInference:::compute_target(result, 'partial', sigma_est = sigma_est,
construct_pvalues=rep(TRUE, length(active_set)),
construct_ci=rep(compute_intervals, length(active_set)))
out = randomizedLassoInf(result,
targets=targets,
sampler = "norejection",
level=0.9,
burnin=1000,
nsample=10000)
active_set=active_set-1
pvalues = out$pvalues
intervals = out$ci
}
''')
active_set = np.asarray(rpy.r('active_set'), np.int)
print(active_set)
if active_set[0]==-1:
numpy2ri.deactivate()
return [], [], []
pvalues = np.asarray(rpy.r('pvalues'))
intervals = np.asarray(rpy.r('intervals'))
numpy2ri.deactivate()
return active_set, pvalues, intervals
randomized_lasso_R_theory.register()
class data_splitting_1se(parametric_method):
method_name = Unicode('Data splitting')
selection_frac = Float(0.5)
def __init__(self, X, Y, l_theory, l_min, l_1se, sigma_reid):
parametric_method.__init__(self, X, Y, l_theory, l_min, l_1se, sigma_reid)
self.lagrange = l_1se * np.ones(X.shape[1])
n, p = self.X.shape
n1 = int(self.selection_frac * n)
X1, X2 = self.X1, self.X2 = self.X[:n1], self.X[n1:]
Y1, Y2 = self.Y1, self.Y2 = self.Y[:n1], self.Y[n1:]
pen = rr.weighted_l1norm(np.sqrt(n1) * self.lagrange, lagrange=1.)
loss = rr.squared_error(X1, Y1)
problem = rr.simple_problem(loss, pen)
soln = problem.solve()
self.active_set = np.nonzero(soln)[0]
self.signs = np.sign(soln)[self.active_set]
self._fit = True
def generate_pvalues(self):
X2, Y2 = self.X2[:,self.active_set], self.Y2
if len(self.active_set) > 0:
s = len(self.active_set)
X2i = np.linalg.inv(X2.T.dot(X2))
beta2 = X2i.dot(X2.T.dot(Y2))
resid2 = Y2 - X2.dot(beta2)
n2 = X2.shape[0]
sigma2 = np.sqrt((resid2**2).sum() / (n2 - s))
Z2 = beta2 / np.sqrt(sigma2**2 * np.diag(X2i))
signed_Z2 = self.signs * Z2
pvalues = 1 - ndist.cdf(signed_Z2)
return self.active_set, pvalues
else:
return [], []
data_splitting_1se.register()
| 1.570313 | 2 |
app/main.py | nox-noctua-consulting/peon-ui | 1 | 106244 | #!/usr/bin/python3
# IMPORTS
import logging
from modules import devMode
from website import create_app
# VARIABLES
app = create_app()
# MAIN
if __name__ == '__main__':
logging.basicConfig(filename='/var/log/peon/webui.log', filemode='a', format='%(asctime)s %(thread)d [%(levelname)s] - %(message)s', level=logging.INFO)
devMode()
logging.debug(app.run(host='0.0.0.0',port=80, debug=True)) | 1.351563 | 1 |
tk_gui_benchmarks.py | zacsimile/random | 0 | 106372 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Benchmark different GUI image draw times in tkinter
Environment setup instructions:
conda create -n gui-test tk matplotlib pillow vispy
pip install pyopengltk
"""
import time
import tkinter as tk
import numpy as np
from PIL import Image, ImageTk
from matplotlib.backends.backend_tkagg import FigureCanvasTkAgg
from matplotlib.figure import Figure
import vispy
from vispy import scene
from vispy.app import use_app
def pil_gui_test(arr):
# https://stackoverflow.com/questions/52459277/convert-a-c-or-numpy-array-to-a-tkinter-photoimage-with-a-minimum-number-of-copi
root = tk.Tk()
start = time.time()
img = ImageTk.PhotoImage(Image.fromarray(arr))
stop = time.time()
print(f"Pillow run took {stop-start} s")
lbl = tk.Label(root, image=img)
lbl.pack()
root.mainloop()
def matplotlib_gui_test(arr):
# https://matplotlib.org/3.1.0/gallery/user_interfaces/embedding_in_tk_sgskip.html
root = tk.Tk()
f = Figure()
canvas = FigureCanvasTkAgg(f,root)
canvas.get_tk_widget().pack(side=tk.TOP, fill=tk.BOTH, expand=1)
start = time.time()
f.add_subplot(111).imshow(arr)
canvas.draw()
stop = time.time()
print(f"Matplotlib run took {stop-start} s")
lbl = tk.Label(root)
lbl.pack()
root.mainloop()
def vispy_gui_test(arr):
# https://github.com/vispy/vispy/issues/2168
# https://vispy.org/gallery/scene/image.html
root = tk.Tk()
app = use_app("tkinter")
canvas = vispy.scene.SceneCanvas(keys='interactive', show=True, parent=root, app=app)
# Set up a viewbox to display the image with interactive pan/zoom
view = canvas.central_widget.add_view()
# Set 2D camera (the camera will scale to the contents in the scene)
view.camera = scene.PanZoomCamera(aspect=1)
view.camera.flip = (0, 1, 0)
view.camera.zoom(1.0)
# TODO: This isn't setting the window size correctly.
# Need to manually expand the window to see the image
canvas.native.pack(side=tk.TOP, fill=tk.BOTH, expand=1)
# Create the image
start = time.time()
image = scene.visuals.Image(arr, interpolation='nearest',
parent=view.scene, method='subdivide')
view.camera.set_range()
stop = time.time()
print(f"Vispy run took {stop-start} s")
app.run()
if __name__ == "__main__":
# generate image array to plot
arr = np.random.randint(low=255, size=(100, 100, 3), dtype=np.uint8)
pil_gui_test(arr)
matplotlib_gui_test(arr)
vispy_gui_test(arr)
| 2.453125 | 2 |
polls/models.py | enesebastian/djangoapp | 0 | 106500 | import datetime
from django.db import models
from django.utils import timezone
from django.contrib.auth.models import AbstractUser
from django.core.validators import MinValueValidator, MaxValueValidator
# Create your models here.
class Question(models.Model):
question_text = models.CharField(max_length=200)
pub_date = models.DateTimeField('date published')
def __str__(self):
return self.question_text
def was_published_recently(self):
now = timezone.now()
return now - datetime.timedelta(days=1) <= self.pub_date <= now
was_published_recently.admin_order_field = 'pub_date'
was_published_recently.boolean = True
was_published_recently.short_description = 'Published recently?'
class Choice(models.Model):
question = models.ForeignKey(Question, on_delete=models.CASCADE)
choice_text = models.CharField(max_length=200)
votes = models.IntegerField(default=0)
def __str__(self):
return self.choice_text
class CustomUser(AbstractUser):
full_name = models.CharField(error_messages={'required': 'Introdu un nume!'},max_length=100, blank=False)
phone_number = models.CharField(error_messages={'required': 'Trebuie sa stim cum sa dam de tine!'},max_length=100, blank=False)
age = models.IntegerField(validators=[MinValueValidator(18),
MaxValueValidator(80)],error_messages={'required': 'Interzis minorilor!'},null=True, blank=True)
address = models.CharField(error_messages={'required': 'O metoda de precautie!'},max_length=100, blank=False)
class Imprumut(models.Model):
username = models.CharField(max_length=200,blank=True)
suma = models.IntegerField(validators=[MinValueValidator(100000),
MaxValueValidator(20000000)],error_messages={'required': 'Suma minima este de 100.000$ / Maxim 20.000.000$'},null=True, blank=True)
perioada = models.PositiveSmallIntegerField()
luni = models.IntegerField()
bifa = models.BooleanField()
| 1.828125 | 2 |
practice/kmp.py | haandol/dojo | 0 | 106628 | # https://www.geeksforgeeks.org/kmp-algorithm-for-pattern-searching/
def get_lps(pat):
n = len(pat)
lps = [0] * n # longest proper prefix which is also suffix
i = 1
l = 0
while i < n:
if pat[i] == pat[l]:
l += 1
lps[i] = l
i += 1
else:
if 0 < l:
l = lps[l-1]
else:
i += 1
return lps
def find(text, pat):
res = []
n = len(text)
m = len(pat)
i = 0
l = 0
lps = get_lps(pat)
while i < n:
if text[i] == pat[l]:
i += 1
l += 1
else:
if 0 < l:
l = lps[l-1]
else:
i += 1
if l == m:
res.append(i-m)
l = lps[l-1]
return res
if __name__ == '__main__':
text = 'THIS IS A TEST TEXT'
pat = 'TEST'
[10] == find(text, pat)
text = 'AABAACAADAABAABA'
pat = 'AABA'
[0, 9, 12] == find(text, pat)
assert [0, 1, 2, 3]== get_lps('AAAA')
assert [0, 1, 2, 0]== get_lps('AAAB')
assert [0, 1, 0, 1]== get_lps('AABA')
assert [0, 0, 1, 2]== get_lps('ABAB')
assert [0, 1, 0, 1, 2] == get_lps('AACAA') | 2.953125 | 3 |
model_correction/code/prepare_subsystem_for_Yeast8_map.py | hongzhonglu/yeast-model-update | 2 | 106756 | # -*- coding: utf-8 -*-
# -*- python 3 -*-
# -*- hongzhong Lu -*-
import os
os.chdir('/Users/luho/PycharmProjects/model/model_correction/code')
exec(open("./find_subsystem_Yeast8_using_code.py").read())
#it can be found that the reaction in different software is different in some formats
#thus the reaction list will be based on R function to keep the consistent
subsystem_map = pd.read_excel('../result/subsystem_manual check results.xlsx')
gem_dataframe['subsystem_map'] = singleMapping(subsystem_map['Subsystem_for yeast map'],subsystem_map['Abbreviation'],gem_dataframe['rxnID'])
gem_dataframe['removed_duplicate_subsystem'] = singleMapping(subsystem_map['removed_duplicate_subsystem'],subsystem_map['Abbreviation'],gem_dataframe['rxnID'])
gem_dataframe['evidence'] = singleMapping(subsystem_map['evidence'],subsystem_map['Abbreviation'],gem_dataframe['rxnID'])
#add the subsystem obtained based on geneID
for i in range(0,len(gem_dataframe['subsystem_map'])):
if gem_dataframe['subsystem_map'][i] is None:
gem_dataframe['subsystem_map'][i] = gem_dataframe['subsystem_xref'][i]
else:
gem_dataframe['subsystem_map'][i] = gem_dataframe['subsystem_map'][i]
#add the subsystem obtained based on the keggID
for i in range(0,len(gem_dataframe['subsystem_map'])):
if gem_dataframe['subsystem_map'][i] is '':
gem_dataframe['subsystem_map'][i] = gem_dataframe['subsystem_rxnID'][i]
else:
gem_dataframe['subsystem_map'][i] = gem_dataframe['subsystem_map'][i]
#add the information from manual check results for these reactions connected with new genes
subsystem_manual = pd.read_excel('../data/subsytem_for new genes added into Yeast8.xlsx')
subsystem_manual['inf'] = subsystem_manual.loc[:,'subpathway'] + ' @@ ' + subsystem_manual.loc[:,'note']
rxn_gene['subsystem_manual'] = multiMapping(subsystem_manual['inf'],subsystem_manual['gene'],rxn_gene['gene'],sep=" // ")
gem_dataframe['subsytem_manual_newGene'] = multiMapping(rxn_gene['subsystem_manual'] ,rxn_gene['rxnID'] ,gem_dataframe['rxnID'],sep=" // ")
gem_dataframe['subsytem_manual_newGene'] = RemoveDuplicated(gem_dataframe['subsytem_manual_newGene'].tolist())
#add the information from reaction notes for these reactions from biolog experiments
evidences_biolog = pd.read_excel('../data/classification for new reactions from biolog_result.xlsx')
evidences_biolog['inf'] = evidences_biolog.loc[:,'source'] + ' @@ ' + evidences_biolog.loc[:,'external_ID']
gem_dataframe['note'] = multiMapping(evidences_biolog['inf'], evidences_biolog['rxnID'] ,gem_dataframe['rxnID'],sep=" // ")
saveExcel(gem_dataframe,"../result/subsystem for yeast8 map.xlsx")
#refine the subsystem for the yeast map based on the reaction number and manual check results
subsystem_map_v2 = pd.read_excel('../result/subsystem for yeast8 map_V2.xlsx')
| 1.585938 | 2 |
plotter.py | jdmillard/rocket-altitude | 0 | 106884 | from pyqtgraph.Qt import QtGui, QtCore
import numpy as np
import pyqtgraph as pg
import time
class livePlotter:
"""
Class for plotting methods.
"""
def __init__(self, rocket, final_time, plot_real_time):
# store some inputs
self.plot_real_time = plot_real_time
self.tf = final_time
''' setup real time plot using pyqtgraph '''
self.app = QtGui.QApplication([])
# create the widget ("Graphics Window" allows stacked plots)
self.win = pg.GraphicsWindow(title="Live Plotting")
self.win.resize(1500,1000) # set window size
self.win.move(50,50) # set window monitor position
self.win.setWindowTitle('Altitude Controller Truth')
# enable antialiasing for prettier plots
pg.setConfigOptions(antialias=True)
# set some pen types
pen_green = pg.mkPen(color=(50, 255, 50, 255), width=2)
pen_green2 = pg.mkPen(color=(50, 255, 50, 255), width=1)
pen_blue = pg.mkPen(color=(50, 50, 255, 255), width=2, symbol='t')
pen_blue2 = pg.mkPen(color=(50, 50, 255, 255), width=1)
# FIRST SUBPLOT OBJECT
self.p1 = self.win.addPlot(title="Altitude vs. Time")
self.p1.setXRange(0,final_time,padding=0)
self.p1.setYRange(rocket.h*0.9,rocket.h_f*1.1,padding=0)
self.p1.setLabel('left', "Altitude (m)")
self.p1.setLabel('bottom', "Time (s)") # , units='s'
self.p1.showGrid(x=True, y=True)
self.meas1 = self.p1.plot(pen=pen_blue, name='Curve 1')
# SECOND SUBPLOT OBJECT
self.p2 = self.win.addPlot(title="Velocity vs. Time")
self.p2.setXRange(0,final_time,padding=0)
self.p2.setYRange(0,rocket.hd_0*1.1,padding=0)
self.p2.setLabel('left', "h_dot (m/s)")
self.p2.setLabel('bottom', "Time (s)")
self.p2.showGrid(x=True, y=True)
self.meas2 = self.p2.plot(pen=pen_blue, name='Curve 2')
# THIRD SUBPLOT OBJECT
self.p3 = self.win.addPlot(title="h_dot vs. h")
self.p3.setXRange(rocket.h*0.9,rocket.h_f*1.1,padding=0)
self.p3.setYRange(0,rocket.hd_0*1.1,padding=0)
self.p3.setLabel('left', "h_dot (m/s)")
self.p3.setLabel('bottom', "h (m)")
self.p3.showGrid(x=True, y=True)
self.p3.addLegend(offset=[-10,10])
self.meas3 = self.p3.plot(pen=pen_blue, name='Simulated Trajectory')
self.t_ref = self.p3.plot(pen=pen_green2, name='Reference Trajectory')
self.t_ref.setData(rocket.h_ref, rocket.hd_ref)
self.win.nextRow()
# FOURTH SUBPLOT OBJECT
self.p4 = self.win.addPlot(title="Theta Control Input")
self.p4.setXRange(0,final_time,padding=0)
self.p4.setYRange(0,rocket.th_max*1.1,padding=0)
self.p4.setLabel('left', "theta (deg)")
self.p4.setLabel('bottom', "time (s)")
self.p4.showGrid(x=True, y=True)
self.p4.addLegend(offset=[-10,10])
self.meas4 = self.p4.plot(pen=pen_blue, name='Current Theta')
self.meas4a = self.p4.plot(pen=pen_green2, name='Desired Theta')
# FIFTH SUBPLOT OBJECT
self.p5 = self.win.addPlot(title="Error vs. Time")
#self.p5.setLogMode(False,True)
self.p5.setXRange(0,final_time,padding=0)
#self.p5.setYRange( , ,padding=0)
self.p5.setLabel('left', "Velocity Error (m/s)")
self.p5.setLabel('bottom', "Time (s)")
self.p5.showGrid(x=True, y=True)
self.meas5 = self.p5.plot(pen=pen_green, name='Curve 6')
# SIXTH SUBPLOT OBJECT
self.p6 = self.win.addPlot(title="Error vs. Height")
self.p6.setXRange(rocket.h*0.9,rocket.h_f*1.1,padding=0)
#self.p6.setYRange(rocket.h*0.9,rocket.h_f*1.1,padding=0)
self.p6.setLabel('left', "Velocity Error (m/s)")
self.p6.setLabel('bottom', "h (m)")
self.p6.showGrid(x=True, y=True)
self.meas6 = self.p6.plot(pen=pen_green, name='Curve 6')
# show the plot by calling an update
# it is needed twice (to force display on first iteration) - not sure why
# either method below works, but the app handle method is better practice
self.app.processEvents() #pg.QtGui.QApplication.processEvents()
self.app.processEvents() #pg.QtGui.QApplication.processEvents()
# start timer
self.time0 = time.time()
# method for updating data
def updateItems(self, rocket, sim_time, current_time):
# override the waiting constraint
if self.plot_real_time:
actual_time = current_time - self.time0
else:
actual_time = sim_time
if self.plot_real_time or rocket.hd <= 0 or sim_time==self.tf:
# plot no faster than actual time
# NOTE: simulation can get slower than real time
if actual_time < sim_time:
# pause to wait for actual time to catch up
time.sleep(sim_time-actual_time)
# get time and h for the rocket
x = rocket.t_all[0:rocket.i]
y = rocket.h_all[0:rocket.i]
self.meas1.setData(x,y)
# get time and h_dot for the rocket
#x = rocket.t_all[0:rocket.i] # x is already this
y = rocket.hd_all[0:rocket.i]
self.meas2.setData(x,y)
# get h and h_dot for the rocket
x = rocket.h_all[0:rocket.i]
#y = rocket.hd_all[0:rocket.i] # y is already this
self.meas3.setData(x,y)
# get time and theta for the air brake
x = rocket.t_all[0:rocket.i]
y = rocket.th_all[0:rocket.i]
self.meas4.setData(x,y)
# get time and theta_cmd for the air brake
#x = rocket.t_all[0:rocket.i]
y = rocket.th_cmd_all[0:rocket.i]
self.meas4a.setData(x,y)
# get time and e_hd for the rocket
#x = rocket.t_all[0:rocket.i]
y = rocket.e_hd[0:rocket.i]
self.meas5.setData(x,y)
# get h and e_hd for the rocket
x = rocket.h_all[0:rocket.i]
#y = rocket.e_hd[0:rocket.i]
self.meas6.setData(x,y)
# update the plotted data
self.app.processEvents() #pg.QtGui.QApplication.processEvents()
# hold plot when rocket reaches maximum height
if rocket.hd <= 0 or sim_time==self.tf:
print("simulation finished")
print("rocket altitude:", rocket.h, "m")
print("simulation time:", sim_time, "s")
#print("real time: ", current_time - self.time0, " s")
while 1:
self.app.processEvents() #pg.QtGui.QApplication.processEvents()
self.app.exec_() # hold final plot
#time.sleep(5)
# method for generating 2d ellipse for a given covariance
def generateEllipse(self, P):
# fill in ellipse generation here
return 3
| 2.234375 | 2 |
selfdrive/mapd/lib/WayRelation.py | arneschwarck/openpilot | 4 | 107012 | from selfdrive.mapd.lib.geo import DIRECTION, distance_and_bearing, absoule_delta_with_direction, bearing_delta, bearing
from common.numpy_fast import interp
from selfdrive.config import Conversions as CV
import numpy as np
import re
_ACCEPTABLE_BEARING_DELTA_V = [40., 20., 10., 5.]
_ACCEPTABLE_BEARING_DELTA_BP = [30., 100., 200., 300.]
_COUNTRY_LIMITS_KPH = {
'DE': {
'urban': 50.,
'rural': 100.,
'motorway': 0.,
'living_street': 7.,
'bicycle_road': 30.
}
}
class WayRelation():
"""A class that represent the relationship of an OSM way and a given `location` and `bearing` of a driving vehicle.
"""
def __init__(self, way, location=None, bearing=None):
self.way = way
self.reset_location_variables()
self.direction = DIRECTION.NONE
self._speed_limit = None
# Create a numpy array with nodes data to support calculations.
self._nodes_np = np.radians(np.array([[nd.lat, nd.lon] for nd in way.nodes], dtype=float))
# Define bounding box to ease the process of locating a node in a way.
# [[min_lat, min_lon], [max_lat, max_lon]]
self.bbox = np.row_stack((np.amin(self._nodes_np, 0), np.amax(self._nodes_np, 0)))
if location is not None and bearing is not None:
self.update(location, bearing)
def __repr__(self):
return f'(id: {self.id}, name: {self.name}, ref: {self.ref}, ahead: {self.ahead_idx}, \
behind: {self.behind_idx}, {self.direction}, active: {self.active})'
def reset_location_variables(self):
self.location = None
self.bearing = None
self.active = False
self.ahead_idx = None
self.behind_idx = None
self._active_way_bearing = None
@property
def id(self):
return self.way.id
def update(self, location, bearing):
"""Will update and validate the associated way with a given `location` and `bearing`.
Specifically it will find the nodes behind and ahead of the current location and bearing.
If no proper fit to the way geometry, the way relation is marked as invalid.
"""
self.reset_location_variables()
# Ignore if location not in way boundingn box
if not self.is_location_in_bbox(location):
return
# TODO: Do this with numpy. Calculate distance and bearing to all nodes and then process array to find
# best match if any.
for idx, node in enumerate(self.way.nodes):
distance_to_node, bearing_to_node = distance_and_bearing(location, (node.lat, node.lon))
delta, direction = absoule_delta_with_direction(bearing_delta(bearing, bearing_to_node))
if abs(delta) > interp(distance_to_node, _ACCEPTABLE_BEARING_DELTA_BP, _ACCEPTABLE_BEARING_DELTA_V):
continue
if direction == DIRECTION.AHEAD:
self.ahead_idx = idx
self.distance_to_node_ahead = distance_to_node
if self.behind_idx is not None:
break
elif direction == DIRECTION.BEHIND:
self.behind_idx = idx
if self.ahead_idx is not None:
break
# Validate
if self.ahead_idx is None or self.behind_idx is None or abs(self.ahead_idx - self.behind_idx) > 1:
self.reset_location_variables()
return
self.active = True
self.location = location
self.bearing = bearing
self._speed_limit = None
self.direction = DIRECTION.FORWARD if self.ahead_idx - self.behind_idx > 0 else DIRECTION.BACKWARD
def update_direction_from_starting_node(self, start_node_id):
self._speed_limit = None
if self.way.nodes[0].id == start_node_id:
self.direction = DIRECTION.FORWARD
elif self.way.nodes[-1].id == start_node_id:
self.direction = DIRECTION.BACKWARD
else:
self.direction = DIRECTION.NONE
def is_location_in_bbox(self, location):
"""Indicates if a given location is contained in the bounding box surrounding the way.
self.bbox = [[min_lat, min_lon], [max_lat, max_lon]]
"""
radians = np.radians(np.array(location, dtype=float))
is_g = np.greater_equal(radians, self.bbox[0, :])
is_l = np.less_equal(radians, self.bbox[1, :])
return np.all(np.concatenate((is_g, is_l)))
@property
def speed_limit(self):
if self._speed_limit is not None:
return self._speed_limit
# Get string from corresponding tag
limit_string = self.way.tags.get("maxspeed")
if limit_string is None:
if self.direction == DIRECTION.FORWARD:
limit_string = self.way.tags.get("maxspeed:forward")
elif self.direction == DIRECTION.BACKWARD:
limit_string = self.way.tags.get("maxspeed:backward")
# When limit is set to 0. is considered not existing. Use 0. as default value.
limit = 0.
# https://wiki.openstreetmap.org/wiki/Key:maxspeed
if limit_string is not None:
# Look for matches of speed by default in kph, or in mph when explicitly noted.
v = re.match(r'^\s*([0-9]{1,3})\s*?(mph)?\s*$', limit_string)
if v is not None:
conv = CV.MPH_TO_MS if v[2] is not None and v[2] == "mph" else CV.KPH_TO_MS
limit = conv * float(v[1])
else:
# Look for matches of speed with country implicit values.
v = re.match(r'^\s*([A-Z]{2}):([a-z_]+):?([0-9]{1,3})?(\s+)?(mph)?\s*', limit_string)
if v is not None:
if v[2] == "zone" and v[3] is not None:
conv = CV.MPH_TO_MS if v[5] is not None and v[5] == "mph" else CV.KPH_TO_MS
limit = conv * float(v[3])
elif v[1] in _COUNTRY_LIMITS_KPH and v[2] in _COUNTRY_LIMITS_KPH[v[1]]:
limit = _COUNTRY_LIMITS_KPH[v[1]][v[2]] * CV.KPH_TO_MS
self._speed_limit = limit
return self._speed_limit
@property
def ref(self):
return self.way.tags.get("ref", None)
@property
def name(self):
return self.way.tags.get("name", None)
@property
def active_bearing(self):
"""Returns the exact bearing of the portion of way we are currentluy located at.
"""
if self._active_way_bearing is not None:
return self._active_way_bearing
if not self.active:
return None
ahead_node = self.way.nodes[self.ahead_idx]
behind_node = self.way.nodes[self.behind_idx]
self._active_way_bearing = bearing((behind_node.lat, behind_node.lon), (ahead_node.lat, ahead_node.lon))
return self._active_way_bearing
def active_bearing_delta(self, bearing):
"""Returns the delta between the given bearing and the exact
bearing of the portion of way we are currentluy located at.
"""
if self.active_bearing is None:
return None
return bearing_delta(bearing, self.active_bearing)
@property
def node_behind(self):
return self.way.nodes[self.behind_idx] if self.behind_idx is not None else None
@property
def node_ahead(self):
return self.way.nodes[self.ahead_idx] if self.ahead_idx is not None else None
@property
def last_node(self):
"""Returns the last node on the way considering the traveling direction
"""
if self.direction == DIRECTION.FORWARD:
return self.way.nodes[-1]
if self.direction == DIRECTION.BACKWARD:
return self.way.nodes[0]
return None
def edge_on_node(self, node_id):
"""Indicates if the associated way starts or ends in the node with `node_id`
"""
return self.way.nodes[0].id == node_id or self.way.nodes[-1].id == node_id
def next_wr(self, way_relations):
"""Returns a tuple with the next way relation (if any) based on `location` and `bearing` and
the `way_relations` list excluding the found next way relation. (to help with recursion)
"""
if self.direction not in [DIRECTION.FORWARD, DIRECTION.BACKWARD]:
return None, way_relations
possible_next_wr = list(filter(lambda wr: wr.id != self.id and wr.edge_on_node(self.last_node.id), way_relations))
possibles = len(possible_next_wr)
if possibles == 0:
return None, way_relations
if possibles == 1 or (self.ref is None and self.name is None):
next_wr = possible_next_wr[0]
else:
next_wr = next((wr for wr in possible_next_wr if wr.has_name_or_ref(self.name, self.ref)), possible_next_wr[0])
next_wr.update_direction_from_starting_node(self.last_node.id)
updated_way_relations = list(filter(lambda wr: wr.id != next_wr.id, way_relations))
return next_wr, updated_way_relations
def has_name_or_ref(self, name, ref):
if ref is not None and self.ref is not None and self.ref == ref:
return True
if name is not None and self.name is not None and self.name == name:
return True
return False
| 2.359375 | 2 |
processors/mods_processor.py | UB-Dortmund/mms | 0 | 107140 | <reponame>UB-Dortmund/mms
# The MIT License
#
# Copyright 2015 <NAME> <<EMAIL>>.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
import datetime
import logging
import uuid
from lxml import etree
try:
import app_secrets
import local_app_secrets as secrets
except ImportError:
import app_secrets
logging.basicConfig(level=logging.DEBUG,
format='%(asctime)s %(levelname)-4s %(message)s',
datefmt='%a, %d %b %Y %H:%M:%S',
)
def mods2csl(record):
csl_json = []
MODS = 'http://www.loc.gov/mods/v3'
NSDICT = {'m': MODS}
for mods in record.xpath('//m:mods', namespaces=NSDICT):
csl = {}
try:
theid = str(uuid.uuid4())
if mods.xpath('./m:recordInfo/m:recordIdentifier', namespaces=NSDICT):
theid = mods.xpath('./m:recordInfo/m:recordIdentifier', namespaces=NSDICT)[0].text
csl.setdefault('type', 'book')
csl_title = ''
csl.setdefault('id', theid)
# logging.info(etree.tostring(record))
if mods.xpath('./m:titleInfo/m:nonSort', namespaces=NSDICT):
csl_title = mods.xpath('./m:titleInfo/m:nonSort', namespaces=NSDICT)[0].text + ' '
csl_title += '%s' % mods.xpath('./m:titleInfo/m:title', namespaces=NSDICT)[0].text
if mods.xpath('./m:titleInfo/m:subTitle', namespaces=NSDICT):
csl_title += ' : %s' % mods.xpath('./m:titleInfo/m:subTitle', namespaces=NSDICT)[0].text
csl.setdefault('title', csl_title)
if mods.xpath('./m:name[@type="personal"]', namespaces=NSDICT):
for name in mods.xpath('./m:name[@type="personal"]', namespaces=NSDICT):
csl_tmp = {}
if name.xpath('./m:namePart[@type="family"]', namespaces=NSDICT):
csl_tmp.setdefault('family', name.xpath('./m:namePart[@type="family"]', namespaces=NSDICT)[0].text)
if name.xpath('./m:namePart[@type="given"]', namespaces=NSDICT):
csl_tmp.setdefault('given', name.xpath('./m:namePart[@type="given"]', namespaces=NSDICT)[0].text)
role = ''
if name.xpath('./m:role/m.roleTerm[@authority="marcrelator"]', namespaces=NSDICT):
role = name.xpath('./m:role/m.roleTerm[@authority="marcrelator"]', namespaces=NSDICT)[0].text
if csl_tmp.get('family'):
if role == 'edt':
csl.setdefault('editor', []).append(csl_tmp)
else:
csl.setdefault('author', []).append(csl_tmp)
if mods.xpath('./m:originInfo/m:place/m:placeTerm[@type="text"]', namespaces=NSDICT):
csl.setdefault('publisher-place', mods.xpath('./m:originInfo/m:place/m:placeTerm[@type="text"]', namespaces=NSDICT)[0].text)
if mods.xpath('./m:originInfo/m:publisher', namespaces=NSDICT):
csl.setdefault('publisher', mods.xpath('./m:originInfo/m:publisher', namespaces=NSDICT)[0].text)
if mods.xpath('./m:originInfo/m:dateIssued', namespaces=NSDICT):
year = mods.xpath('./m:originInfo/m:dateIssued', namespaces=NSDICT)[0].text.replace('[', '').replace(']', '').replace('c', '')
if type(year) == int:
csl.setdefault('issued', {}).setdefault('date-parts', [[year]])
if mods.xpath('./m:language/m:languageTerm', namespaces=NSDICT):
csl.setdefault('language', mods.xpath('./m:language/m:languageTerm', namespaces=NSDICT)[0].text)
# if mods.xpath('./m:physicalDescription/m:extent', namespaces=NSDICT):
# csl.setdefault('page', mods.xpath('./m:physicalDescription/m:extent', namespaces=NSDICT)[0].text)
if mods.xpath('./m:abstract', namespaces=NSDICT):
csl.setdefault('abstract', mods.xpath('./m:abstract', namespaces=NSDICT)[0].text)
csl_json.append(csl)
except Exception:
continue
return {'items': csl_json}
def mods2wtfjson(ppn=''):
wtf = {}
record = etree.parse(
'http://sru.gbv.de/gvk?version=1.1&operation=searchRetrieve&query=%s=%s&maximumRecords=10&recordSchema=mods'
% ('pica.ppn', ppn))
# logging.info(etree.tostring(record))
MODS = 'http://www.loc.gov/mods/v3'
NSDICT = {'m': MODS}
mods = record.xpath('//m:mods', namespaces=NSDICT)[0]
wtf.setdefault('id', str(uuid.uuid4()))
timestamp = str(datetime.datetime.now())
wtf.setdefault('created', timestamp)
wtf.setdefault('changed', timestamp)
wtf.setdefault('editorial_status', 'new')
wtf.setdefault('pubtype', 'Monograph')
wtf.setdefault('title', mods.xpath('./m:titleInfo/m:title', namespaces=NSDICT)[0].text)
if mods.xpath('./m:titleInfo/m:subTitle', namespaces=NSDICT):
wtf.setdefault('subtitle', mods.xpath('./m:titleInfo/m:subTitle', namespaces=NSDICT)[0].text)
persons = []
if mods.xpath('./m:name[@type="personal"]', namespaces=NSDICT):
for name in mods.xpath('./m:name[@type="personal"]', namespaces=NSDICT):
tmp = {}
if name.get('authority') and name.get('authority') == 'gnd' and name.get('valueURI'):
tmp.setdefault('gnd', name.get('valueURI').split('gnd/')[1])
tmp.setdefault('name', '%s, %s' % (name.xpath('./m:namePart[@type="family"]', namespaces=NSDICT)[0].text, name.xpath('./m:namePart[@type="given"]', namespaces=NSDICT)[0].text))
persons.append(tmp)
wtf.setdefault('person', persons)
if mods.xpath('./m:originInfo/m:place/m:placeTerm[@type="text"]', namespaces=NSDICT):
wtf.setdefault('place', mods.xpath('./m:originInfo/m:place/m:placeTerm[@type="text"]', namespaces=NSDICT)[0].text)
if mods.xpath('./m:originInfo/m:publisher', namespaces=NSDICT):
wtf.setdefault('publisher', mods.xpath('./m:originInfo/m:publisher', namespaces=NSDICT)[0].text)
if mods.xpath('./m:originInfo/m:dateIssued', namespaces=NSDICT):
year = mods.xpath('./m:originInfo/m:dateIssued', namespaces=NSDICT)[0].text.replace('[', '').replace(']', '').replace('c', '')
if type(year) == int:
wtf.setdefault('issued', year)
if mods.xpath('./m:language/m:languageTerm', namespaces=NSDICT):
wtf.setdefault('language', mods.xpath('./m:language/m:languageTerm', namespaces=NSDICT)[0].text)
if mods.xpath('./m:physicalDescription/m:extent', namespaces=NSDICT):
wtf.setdefault('pages', mods.xpath('./m:physicalDescription/m:extent', namespaces=NSDICT)[0].text)
if mods.xpath('./m:abstract', namespaces=NSDICT):
abstract = {}
abstract.setdefault('content', mods.xpath('./m:abstract', namespaces=NSDICT)[0].text)
abstract.setdefault('shareable', True)
wtf.setdefault('abstract', []).append(abstract)
if mods.xpath('./m:note', namespaces=NSDICT):
notes = mods.xpath('./m:note', namespaces=NSDICT)
for note in notes:
wtf.setdefault('note', []).append(note.text)
if mods.xpath('./m:subject', namespaces=NSDICT):
keywords = []
subjects = mods.xpath('./m:subject', namespaces=NSDICT)
for subject in subjects:
for topic in subject:
keywords.append(topic.text)
wtf.setdefault('keyword', keywords)
if mods.xpath('./m:classification', namespaces=NSDICT):
classifications = mods.xpath('./m:classification', namespaces=NSDICT)
for classification in classifications:
tmp = {}
if classification.get('authority') and classification.get('authority') == 'ddc':
tmp.setdefault('id', classification.text)
tmp.setdefault('label', '')
wtf.setdefault('ddc_subject', []).append(tmp)
if mods.xpath('./m:identifier[@type]', namespaces=NSDICT):
ids = mods.xpath('./m:identifier[@type]', namespaces=NSDICT)
for myid in ids:
wtf.setdefault(str(myid.get('type')).upper(), []).append(myid.text)
if mods.xpath('./m:relatedItem', namespaces=NSDICT):
items = mods.xpath('./m:relatedItem', namespaces=NSDICT)
for item in items:
if item.get('type') and (item.get('type') == 'series' or item.get('type') == 'host'):
tmp = {}
for subitem in item:
if subitem.tag == '{http://www.loc.gov/mods/v3}titleInfo':
tmp.setdefault('is_part_of', subitem[0].text)
tmp.setdefault('volume', '')
wtf.setdefault('is_part_of', []).append(tmp)
# logging.debug('wtf_json: %s' % wtf)
return wtf
| 1.351563 | 1 |
Tools/compiler/compiler/misc.py | marcosptf/cpython-2.0.1 | 5 | 107268 | <gh_stars>1-10
import types
def flatten(tup):
elts = []
for elt in tup:
if type(elt) == types.TupleType:
elts = elts + flatten(elt)
else:
elts.append(elt)
return elts
class Set:
def __init__(self):
self.elts = {}
def __len__(self):
return len(self.elts)
def add(self, elt):
self.elts[elt] = elt
def elements(self):
return self.elts.keys()
def has_elt(self, elt):
return self.elts.has_key(elt)
def remove(self, elt):
del self.elts[elt]
class Stack:
def __init__(self):
self.stack = []
self.pop = self.stack.pop
def __len__(self):
return len(self.stack)
def push(self, elt):
self.stack.append(elt)
def top(self):
return self.stack[-1]
| 2.875 | 3 |
crafting/IRecipeInterface.py | uuk0/mcpython-4 | 2 | 107396 | """mcpython - a minecraft clone written in python licenced under MIT-licence
authors: uuk, xkcdjerry
original game by forgleman licenced under MIT-licence
minecraft by Mojang
blocks based on 1.14.4.jar of minecraft, downloaded on 20th of July, 2019"""
import globals as G
class IRecipeInterface:
@staticmethod
def get_name() -> str:
raise NotImplementedError()
| 1.117188 | 1 |
tensorflow/contrib/slim/python/slim/data/dataset.py | connectthefuture/tensorflow | 101 | 107524 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Contains the definition of a Dataset.
A Dataset is a collection of several components: (1) a list of data sources
(2) a Reader class that can read those sources and returns possibly encoded
samples of data (3) a decoder that decodes each sample of data provided by the
reader (4) the total number of samples and (5) an optional dictionary mapping
the list of items returns to a description of those items.
Data can be loaded from a dataset specification using a dataset_data_provider:
dataset = CreateMyDataset(...)
provider = dataset_data_provider.DatasetDataProvider(
dataset, shuffle=False)
image, label = provider.get(['image', 'label'])
See slim.data.dataset_data_provider for additional examples.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
class Dataset(object):
"""Represents a Dataset specification."""
def __init__(self, data_sources, reader, decoder, num_samples,
items_to_descriptions, **kwargs):
"""Initializes the dataset.
Args:
data_sources: A list of files that make up the dataset.
reader: The reader class, a subclass of BaseReader such as TextLineReader
or TFRecordReader.
decoder: An instance of a data_decoder.
num_samples: The number of samples in the dataset.
items_to_descriptions: A map from the items that the dataset provides to
the descriptions of those items.
**kwargs: Any remaining dataset-specific fields.
"""
kwargs['data_sources'] = data_sources
kwargs['reader'] = reader
kwargs['decoder'] = decoder
kwargs['num_samples'] = num_samples
kwargs['items_to_descriptions'] = items_to_descriptions
self.__dict__.update(kwargs)
| 2.03125 | 2 |
test/lang/c/test_synthesis.py | rakati/ppci-mirror | 161 | 107652 | import unittest
import io
from ppci import ir
from ppci.irutils import verify_module
from ppci.lang.c import CBuilder
from ppci.lang.c.options import COptions
from ppci.arch.example import ExampleArch
from ppci.lang.c import CSynthesizer
class CSynthesizerTestCase(unittest.TestCase):
def test_hello(self):
""" Convert C to Ir, and then this IR to C """
src = r"""
void printf(char*);
void main(int b) {
printf("Hello" "world\n");
}
"""
arch = ExampleArch()
builder = CBuilder(arch.info, COptions())
f = io.StringIO(src)
ir_module = builder.build(f, None)
assert isinstance(ir_module, ir.Module)
verify_module(ir_module)
synthesizer = CSynthesizer()
synthesizer.syn_module(ir_module)
if __name__ == "__main__":
unittest.main()
| 1.289063 | 1 |
app.py | chatchalai/flask-boilerplate | 0 | 107780 | <reponame>chatchalai/flask-boilerplate
from flask import Flask, request, jsonify, render_template
from flask_cors import CORS
import postgresql_api
app = Flask(__name__)
app.config['JSON_AS_ASCII'] = False
CORS(app)
@app.route('/')
def root():
return "Working"
@app.route('/hello')
def new_one_function():
username = request.args.get('name')
print(username)
return "chat" + username
@app.route('/mypage')
def mypage():
username = request.args.get("name")
return render_template('home.html',name = username)
@app.route('/std_list')
def std_list():
student_list= postgresql_api.get_student_data()
return render_template('table.html',student_list=student_list)
if __name__ == "__main__":
app.run(debug = True, host="0.0.0.0", port=5000) | 1.234375 | 1 |
src/EKF.py | noskill/JRMOT_ROS | 112 | 107908 | # vim: expandtab:ts=4:sw=4
import numpy as np
import scipy.linalg
import pdb
"""
Table for the 0.95 quantile of the chi-square distribution with N degrees of
freedom (contains values for N=1, ..., 9). Taken from MATLAB/Octave's chi2inv
function and used as Mahalanobis gating threshold.
"""
chi2inv95 = {
1: 3.8415,
2: 5.9915,
3: 7.8147,
4: 9.4877,
5: 11.070,
6: 12.592,
7: 14.067,
8: 15.507,
9: 16.919}
chi2inv90 = {
1: 2.706,
2: 4.605,
3: 6.251,
4: 7.779,
5: 9.236,
6: 10.645,
7: 12.017,
8: 13.363,
9: 14.684}
chi2inv975 = {
1: 5.025,
2: 7.378,
3: 9.348,
4: 11.143,
5: 12.833,
6: 14.449,
7: 16.013,
8: 17.535,
9: 19.023}
chi2inv10 = {
1: .016,
2: .221,
3: .584,
4: 1.064,
5: 1.610,
6: 2.204,
7: 2.833,
8: 3.490,
9: 4.168}
chi2inv995 = {
1: 0.0000393,
2: 0.0100,
3: .0717,
4: .207,
5: .412,
6: .676,
7: .989,
8: 1.344,
9: 1.735}
chi2inv75 = {
1: 1.323,
2: 2.773,
3: 4.108,
4: 5.385,
5: 6.626,
6: 7.841,
7: 9.037,
8: 10.22,
9: 11.39}
def squared_mahalanobis_distance(mean, covariance, measurements):
# cholesky factorization used to solve for
# z = d * inv(covariance)
# so z is also the solution to
# covariance * z = d
d = measurements - mean
# cholesky_factor = np.linalg.cholesky(covariance)
# z = scipy.linalg.solve_triangular(
# cholesky_factor, d.T, lower=True, check_finite=False,
# overwrite_b=True)
squared_maha = np.linalg.multi_dot([d, np.linalg.inv(covariance),
d.T]).diagonal()
return squared_maha
class EKF(object):
"""
Generic extended kalman filter class
"""
def __init__(self):
pass
def initiate(self, measurement):
"""Create track from unassociated measurement.
Parameters
----------
measurement : ndarray
Returns
-------
(ndarray, ndarray)
Returns the mean vector and covariance matrix of the new track.
Unobserved velocities are initialized to 0 mean.
"""
pass
def predict_mean(self, mean):
# Updates predicted state from previous state (function g)
# Calculates motion update Jacobian (Gt)
# Returns (g(mean), Gt)
pass
def get_process_noise(self, mean, covariance):
# Returns Rt the motion noise covariance
pass
def predict_covariance(self, mean, covariance):
pass
def project_mean(self, mean):
# Measurement prediction from state (function h)
# Calculations sensor update Jacobian (Ht)
# Returns (h(mean), Ht)
pass
def project_cov(self, mean, covariance):
pass
def predict(self, mean, covariance):
"""Run Kalman filter prediction step.
Parameters
----------
mean : ndarray
The mean vector of the object state at the previous
time step.
covariance : ndarray
The covariance matrix of the object state at the
previous time step.
Returns
-------
(ndarray, ndarray)
Returns the mean vector and covariance matrix of the predicted
state. Unobserved velocities are initialized to 0 mean.
"""
# Perform prediction
covariance = self.predict_covariance(mean, covariance)
mean = self.predict_mean(mean)
return mean, covariance
def get_innovation_cov(self, covariance):
pass
def project(self, mean, covariance):
"""Project state distribution to measurement space.
Parameters
----------
mean : ndarray
The state's mean vector
covariance : ndarray
The state's covariance matrix
Returns
-------
(ndarray, ndarray)
Returns the projected mean and covariance matrix of the given state
estimate.
"""
# Measurement uncertainty scaled by estimated height
return self.project_mean(mean), self.project_cov(mean, covariance)
def update(self, mean, covariance, measurement_t, marginalization=None, JPDA=False):
"""Run Kalman filter correction step.
Parameters
----------
mean : ndarray
The predicted state's mean vector (8 dimensional).
covariance : ndarray
The state's covariance matrix (8x8 dimensional).
measurement : ndarray
The 4 dimensional measurement vector (x, y, a, h), where (x, y)
is the center position, a the aspect ratio, and h the height of the
bounding box.
Returns
-------
(ndarray, ndarray)
Returns the measurement-corrected state distribution.
"""
predicted_measurement, innovation_cov = self.project(mean, covariance)
# cholesky factorization used to solve for kalman gain since
# K = covariance * update_mat.T * inv(innovation_cov)
# so K is also the solution to
# innovation_cov * K = covariance * update_mat.T
try:
chol_factor, lower = scipy.linalg.cho_factor(
innovation_cov, lower=True, check_finite=False)
kalman_gain = scipy.linalg.cho_solve(
(chol_factor, lower), np.dot(covariance, self._observation_mat.T).T,
check_finite=False).T
except:
# in case cholesky factorization fails, revert to standard solver
kalman_gain = np.linalg.solve(innovation_cov, np.dot(covariance, self._observation_mat.T).T).T
if JPDA:
# marginalization
innovation = np.zeros((self.ndim))
cov_soft = np.zeros((self.ndim, self.ndim))
for measurement_idx, measurement in enumerate(measurement_t):
p_ij = marginalization[measurement_idx + 1] # + 1 for dummy
y_ij = measurement - predicted_measurement
innovation += y_ij * p_ij
cov_soft += p_ij * np.outer(y_ij, y_ij)
cov_soft = cov_soft - np.outer(innovation, innovation)
P_star = covariance - np.linalg.multi_dot((
kalman_gain, innovation_cov, kalman_gain.T))
p_0 = marginalization[0]
P_0 = p_0 * covariance + (1 - p_0) * P_star
new_covariance = P_0 + np.linalg.multi_dot((kalman_gain, cov_soft, kalman_gain.T))
else:
innovation = measurement_t - predicted_measurement
new_covariance = covariance - np.linalg.multi_dot((
kalman_gain, innovation_cov, kalman_gain.T))
new_mean = mean + np.dot(innovation, kalman_gain.T)
return new_mean, new_covariance
| 1.960938 | 2 |
gameevents/db_create.py | danilovbarbosa/sg-gameevents | 0 | 108036 | '''
Created on 15 Oct 2015
@author: mbrandaoca
'''
from migrate.versioning import api
from config import SQLALCHEMY_DATABASE_URI
from config import SQLALCHEMY_MIGRATE_REPO
from gameevents_app import db, create_app
import os.path
import sys
app = create_app()
with app.app_context():
db.create_all()
#Add the admin user
from gameevents_app.models.client import Client
#Generate random password
from random import choice
import string
chars = string.ascii_letters + string.digits
length = 16
randompass = ''.join(choice(chars) for _ in range(length))
admin = Client('administrator', randompass, "admin")
db.session.add(admin)
try:
db.session.commit()
sys.stdout.write("Created administrator client: %s, with random apikey %s \n" % (admin.clientid, randompass) )
except Exception as e:
sys.stdout.write(e)
db.session.rollback()
db.session.flush() # for resetting non-commited .add()
#
# if not os.path.exists(SQLALCHEMY_MIGRATE_REPO):
# api.create(SQLALCHEMY_MIGRATE_REPO, 'database repository')
# api.version_control(SQLALCHEMY_DATABASE_URI, SQLALCHEMY_MIGRATE_REPO)
# else:
# api.version_control(SQLALCHEMY_DATABASE_URI, SQLALCHEMY_MIGRATE_REPO, api.version(SQLALCHEMY_MIGRATE_REPO)) | 1.234375 | 1 |
src/main/python/shippy/data_volume.py | codesplicer/shippy | 0 | 108164 | <reponame>codesplicer/shippy<gh_stars>0
# shippy
# Copyright 2017 <NAME>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
shippy.data_volume
===================
Builds and manages a docker data volume with the provided sourcecode
"""
import logging
import docker
from docker import APIClient
from copy import deepcopy
LOGGER = logging.getLogger(__name__)
DOCKERFILE_TEMPLATE = """\
FROM busybox
# Add our user and group
RUN addgroup -S user && adduser -G user -D user
RUN mkdir -p {mountpoint}
RUN chown -R user:user {mountpoint}
RUN chmod -R 777 {mountpoint}
ADD . {mountpoint}
VOLUME {mountpoint}
USER user
LABEL version={source_sha}
LABEL maintainer="<NAME> (<EMAIL>)"
CMD ["echo", "Data container for app"]
"""
class DataVolume:
def __init__(self, sourcecode_path, sha, config):
"""
Constructor
:param sourcecode_path: (str) Path to unpacked sourcecode for the given hash
:param sha: (str) Commit hash to work on
:param config: (dict) Configuration object as parsed by shippy.config
"""
self.sourcecode_path = sourcecode_path
self.sha = sha
self.config = deepcopy(config)
self.cli = APIClient(base_url='unix://var/run/docker.sock')
self.volume_name = self._generate_name()
self.volume_image_tag = self._generate_tag()
def _generate_name(self):
"""
Generates a name for the data volume
:return: (str) Volume name
"""
volume_name = "shippy_{app_name}_data_{sha}".format(app_name=self.config["app_name"], sha=self.sha)
return volume_name
def _generate_tag(self):
"""
Generates image tag for the data volume
:return: (str) Volume image tag
"""
tag = "shippy_{app_name}:{sha}".format(app_name=self.config["app_name"], sha=self.sha)
return tag
def get_name(self):
"""
Returns name of the docker volume
:return:(str) Name of the docker volume name
"""
return self.volume_name
def get_tag(self):
"""
Returns name of the docker image tag
:return:(str) Name of the docker volume tag
"""
return self.volume_image_tag
def _render_template(self):
"""
Renders dockerfile template
:return: (str) Rendered template
"""
data = {
"mountpoint": self.config["application_source_mountpoint"],
"source_archivedir": self.sourcecode_path,
"source_sha": self.sha
}
template = DOCKERFILE_TEMPLATE.format(**data)
return template
def _write_dockerfile(self):
"""
Writes a dockerfile into the source path
:return: None
"""
template = self._render_template()
dockerfile_path = "{sourcepath}/Dockerfile".format(sourcepath=self.sourcecode_path)
LOGGER.info("Writing dockerfile to: %s", dockerfile_path)
with open(dockerfile_path, "w") as f:
try:
f.write(template)
except OSError as e:
LOGGER.error("Could not write dockerfile")
LOGGER.error(e)
raise SystemExit(1)
def build(self):
"""
Builds a docker data volume
:return: None
"""
'''
1. copy sourcecode path into volume
2. create named docker volume.
In order to facilitate lookup for cleanup, we need to
be able to search based on standard naming convention (or image label)
'''
# Write dockerfile
self._write_dockerfile()
LOGGER.info("Creating docker data volume")
try:
response = self.cli.build(path=self.sourcecode_path, rm=True, tag=self.volume_name)
except docker.errors.BuildError as e:
LOGGER.error("Problem building docker image")
LOGGER.error(e)
raise SystemExit(1)
for line in response:
decoded = line.decode("utf-8")
if "error" in decoded:
LOGGER.error(decoded)
raise docker.errors.BuildError
def remove(self):
"""
Deletes the docker image
:return: None
"""
LOGGER.info("Removing image: %s", self.volume_name)
response = [line for line in self.cli.remove(image=self.volume_name, force=True)]
LOGGER.info(response)
| 1.515625 | 2 |
setup.py | AzureHJ/CHAID | 1 | 108292 | <filename>setup.py
"""A setuptools based setup module.
See:
https://packaging.python.org/en/latest/distributing.html
https://github.com/Rambatino/CHAID
"""
import re
from os import path
from setuptools import setup, find_packages
def get_version():
"""
Read version from __init__.py
"""
version_regex = re.compile(
'__version__\\s*=\\s*(?P<q>[\'"])(?P<version>\\d+(\\.\\d+)*(-(alpha|beta|rc)(\\.\\d+)?)?)(?P=q)'
)
here = path.abspath(path.dirname(__file__))
init_location = path.join(here, "CHAID/__init__.py")
with open(init_location) as init_file:
for line in init_file:
match = version_regex.search(line)
if not match:
raise Exception(
"Couldn't read version information from '{0}'".format(init_location)
)
return match.group('version')
setup(
name='CHAID',
version=get_version(),
description='A CHAID tree building algorithm',
long_description="This package provides a python implementation of the Chi-Squared Automatic Inference Detection (CHAID) decision tree",
url='https://github.com/Rambatino/CHAID',
author='<NAME>, <NAME>',
author_email='<EMAIL>',
license='Apache License 2.0',
classifiers=[
'Development Status :: 5 - Production/Stable',
'Intended Audience :: Developers',
'Topic :: Software Development :: Build Tools',
'License :: OSI Approved :: Apache Software License',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
],
keywords='CHAID pandas numpy scipy statistics statistical analysis',
packages=find_packages(exclude=['contrib', 'docs', 'tests']),
install_requires=['cython', 'numpy', 'pandas', 'treelib', 'pytest', 'scipy', 'savReaderWriter', 'graphviz', 'plotly', 'colorlover'],
extras_require={
'test': ['codecov', 'tox', 'tox-pyenv', 'detox', 'pytest', 'pytest-cov'],
}
)
| 1.429688 | 1 |
code/old code/optimizeCRF.py | ujjwal95/tappe_project | 1 | 108420 | import pandas as pd
import numpy
import matplotlib
import sklearn_crfsuite
from sklearn import preprocessing
from sklearn.preprocessing import LabelEncoder
from sklearn_crfsuite import metrics
from sklearn.model_selection import train_test_split
from sklearn.metrics import make_scorer
from sklearn.cross_validation import cross_val_score
from sklearn.grid_search import RandomizedSearchCV
from sklearn_crfsuite import scorers
from sklearn.externals import joblib
from glob import glob
import scipy.stats
import operator
import sys
import os
# FBcols = ["FB%d" % d for d in range(4096)]
# GGcols = ["GG%d" % d for d in range(512)]
# elmocols = ["ELMO%d" % d for d in range(1024)]
features = ["GS%d" % d for d in range(4096)] + ['wordCount','chartStart','charEnd']
# labelNames = ['semanticType','Symptom','PMH','MEDS','ALLG','FAMHx','SOCHx','pysch','lifestyle','substanceUse','PE','FORM','supportProvision','transition']
labelNames = ['supportProvision']
files = glob("/Users/karanjani/Desktop/csvWithVecs/TrainCSV_Updated/*.csv")
#MAYBE CREATE A LIST FOR featurelabels so you can add what you wish to the FB vectors?
for name in labelNames:
featureMaster = []
labelMaster = []
for file in files[:10]:
df = pd.read_csv(file)
df = df.dropna(axis=0, how='any')
df = df[df.speakerID == 'doctor']
#DROP ALL LABELS + ANY FEATURES YOU DON'T WANT TO INCLUDE
dfX = df[features]
# dfX = df.drop(['labelType','stringList','transition'], axis=1)
#CREATE LIST OF LIST OF DICTS OF FEATURES
list_of_FeatureDicts = dfX.to_dict(orient='records')
featureMaster += [list_of_FeatureDicts]
#CREATE LIST OF LIST OF STRINGS OF LABELS
labels = df[name].values.tolist()
labelMaster += [labels]
X_train, X_valid, Y_train, Y_valid = train_test_split(featureMaster, labelMaster, test_size = 0.2)
crf = sklearn_crfsuite.CRF(
algorithm='lbfgs',
max_iterations=100,
all_possible_transitions=True)
params_space = {'c1': scipy.stats.expon(scale=0.5),'c2': scipy.stats.expon(scale=0.05)}
f1_scorer = make_scorer(metrics.flat_f1_score,average='weighted', labels=numpy.unique(name))
rs = RandomizedSearchCV(crf, params_space,
cv=2,
verbose=1,
n_jobs=-1,
n_iter=10,
scoring=f1_scorer)
rs.fit(X_train, Y_train)
print('best params:', rs.best_params_)
print('best CV score:', rs.best_score_)
print('model size: {:0.2f}M'.format(rs.best_estimator_.size_ / 1000000))
| 1.773438 | 2 |
src/squad/compute/curves.py | douglasdaly/spot-robot | 0 | 108548 | <filename>src/squad/compute/curves.py
import math
from typing import Callable, Iterable, List, Optional, Tuple, Union
import numpy as np
from squad.constants import LIMIT_LT_1
def _bezier_domain(t: float) -> float:
"""Constrains the value to the valid Bezier domain."""
return max(min(t, LIMIT_LT_1), 0.0)
def bezier_function(
points: Union[Iterable[float], Iterable[Iterable[float]]],
weights: Optional[Iterable[float]] = None,
) -> Callable[[float], Tuple[float, ...]]:
"""Creates a new rational Bezier function from the given points.
Parameters
----------
points : Iterable[float] or Iterable[Tuple[float, ...]]
The control points to use to construct the new Bezier function.
weights : Iterable[float], optional
The corresponding weights to use for the given `points` (if
any). If not provided the points will be equally-weighted.
Returns
-------
Callable[[float], Tuple[float, ...]]
The rational Bezier function constructed from the given control
`points` and (optional) weights.
"""
control_points: List[Tuple[float, ...]] = []
for p in points:
if isinstance(p, float):
control_points.append((p,))
else:
control_points.append(tuple(p))
cp_arr = np.array(control_points, dtype=float)
if weights is None:
wt_arr = np.ones(cp_arr.shape[0], dtype=float)
else:
wt_arr = np.repeat(
np.array(weights, dtype=float),
cp_arr.shape[1],
axis=1,
)
lookup_tbl = np.array(
[math.comb(cp_arr.shape[0], i) for i in range(cp_arr.shape[0])],
dtype=float,
)
def _bezier_fn(t: float) -> Tuple[float, ...]:
"""Rational Bezier curve function.
Parameters
----------
t : float
The point on the curve to compute the coordinates for (s.t.
0 <= t <= 1).
Returns
-------
Tuple[float, ...]
The coordinates of the point on the curve specified by `t`.
"""
t = _bezier_domain(t)
n = cp_arr.shape[0]
num = np.zeros(cp_arr.shape[1], dtype=float)
den = np.zeros_like(num)
for i in range(n):
t_bn = lookup_tbl[i] * ((1.0 - t) ** (n - i)) * (t ** i)
num += t_bn * (cp_arr[i] * wt_arr[i])
den += t_bn * wt_arr[i]
return tuple(num / den)
return _bezier_fn
def linear_interpolation_function(
initial_point: Union[float, Tuple[float, ...]],
final_point: Union[float, Tuple[float, ...]],
) -> Callable[[float], Tuple[float, ...]]:
"""Creates a linear interpolation function between two points.
Parameters
----------
initial_point : float or Tuple[float, ...]
The initial point to start the interpolation from.
final_point : float or Tuple[float, ...]
The final/target point to end the interpolation at.
Returns
-------
Callable[[float], Tuple[float, ...]]
The interpolation function between the two points.
"""
if isinstance(initial_point, float):
pt_s = (initial_point,)
else:
pt_s = initial_point
if isinstance(final_point, float):
pt_f = (final_point,)
else:
pt_f = final_point
d_pts = tuple(f - s for s, f in zip(pt_s, pt_f))
pt_iter = tuple(zip(pt_s, d_pts))
def _linear_interpolation_fn(t: float) -> Tuple[float, ...]:
"""Linear interpolation function from initial to final points.
Parameters
----------
t : float
The point on the curve to compute the coordinates for (s.t.
0.0 <= t <= 1.0).
Returns
-------
Tuple[float, ...]
The coordinate(s) of the point specified by `t`.
"""
return tuple(s + (t * d) for s, d in pt_iter)
return _linear_interpolation_fn
| 2.625 | 3 |
my_prepare_ending.py | wangbingo/Person_reID_baseline_pytorch | 0 | 108676 | import os, random, shutil
from shutil import copyfile
from IPython import embed
# You only need to change this line to your dataset download path
download_path = '../train'
#---------------------------------------
#train_val
train_save_path = download_path + '/pytorch/train'
val_save_path = download_path + '/pytorch/val'
if not os.path.isdir(train_save_path):
os.mkdir(train_save_path)
os.mkdir(val_save_path)
print('cp train_all train begin........')
os.system('cp -r ../train/pytorch/train_all/* ../train/pytorch/train/') # tested ok.
print('cp train_all train completed........')
split_rate = 0.1
dir_numbers = len(os.listdir(train_save_path)) # 19658
pick_numbers = int(dir_numbers * split_rate) #
dir_samples = random.sample(os.listdir(train_save_path), pick_numbers) #
for dir in dir_samples:
shutil.move(train_save_path + '/' + dir, val_save_path + '/' + dir)
print('{} / {} dirs moved from train to val.'.format(len(dir_samples), dir_numbers))
print('train/val datasets generated.')
| 1.9375 | 2 |
ex10_5.py | DexHunter/Think-Python-book-exercise-solutions | 24 | 108804 | <gh_stars>10-100
def is_sorted(l):
if len(l) == 1:
return True
else:
if(l[0] > l[1]):
return False
return is_sorted(l[1:])
print is_sorted([1,2,2])
print is_sorted(['b','a']) | 2.65625 | 3 |
test-crates/pyo3-mixed/test_pyo3_mixed.py | thedrow/maturin | 854 | 108932 | #!/usr/bin/env python3
import pyo3_mixed
def test_get_42():
assert pyo3_mixed.get_42() == 42
| 0.660156 | 1 |
tests/atomic_swap/test_get_public_key.py | Remmeauth/remme-core-cli | 0 | 109060 | """
Provide tests for command line interface's get the public key of the atomic swap commands.
"""
import json
import re
import pytest
from click.testing import CliRunner
from cli.constants import (
DEV_CONSENSUS_GENESIS_NODE_IP_ADDRESS_FOR_TESTING,
FAILED_EXIT_FROM_COMMAND_CODE,
PASSED_EXIT_FROM_COMMAND_CODE,
PUBLIC_KEY_REGEXP,
)
from cli.entrypoint import cli
from cli.utils import dict_to_pretty_json
def test_get_public_key():
"""
Case: get the public key of the atomic swap.
Expect: a public key is returned.
"""
runner = CliRunner()
result = runner.invoke(cli, [
'atomic-swap',
'get-public-key',
'--node-url',
DEV_CONSENSUS_GENESIS_NODE_IP_ADDRESS_FOR_TESTING,
])
public_key = json.loads(result.output).get('result').get('public_key')
assert PASSED_EXIT_FROM_COMMAND_CODE == result.exit_code
assert re.match(pattern=PUBLIC_KEY_REGEXP, string=public_key) is not None
def test_get_public_key_without_node_url(mocker):
"""
Case: get the public key of the atomic swap without passing node URL.
Expect: a public key is returned from node on localhost.
"""
public_key = '03738df3f4ac3621ba8e89413d3ff4ad036c3a0a4dbb164b695885aab6aab614ad'
mock_swap_get_public_key = mocker.patch('cli.node.service.loop.run_until_complete')
mock_swap_get_public_key.return_value = public_key
runner = CliRunner()
result = runner.invoke(cli, [
'atomic-swap',
'get-public-key',
])
expected_result = {
'result': {
'public_key': public_key,
},
}
assert PASSED_EXIT_FROM_COMMAND_CODE == result.exit_code
assert expected_result == json.loads(result.output)
def test_get_public_key_invalid_node_url():
"""
Case: get the public key of the atomic swap by passing an invalid node URL.
Expect: the following node URL is invalid error message.
"""
invalid_node_url = 'domainwithoutextention'
runner = CliRunner()
result = runner.invoke(cli, [
'atomic-swap',
'get-public-key',
'--node-url',
invalid_node_url,
])
expected_error = {
'errors': {
'node_url': [
f'The following node URL `{invalid_node_url}` is invalid.',
],
},
}
assert FAILED_EXIT_FROM_COMMAND_CODE == result.exit_code
assert dict_to_pretty_json(expected_error) in result.output
@pytest.mark.parametrize('node_url_with_protocol', ['http://masternode.com', 'https://masternode.com'])
def test_get_public_key_node_url_with_protocol(node_url_with_protocol):
"""
Case: get the public key of the atomic swap by passing node URL with an explicit protocol.
Expect: the following node URL contains a protocol error message.
"""
runner = CliRunner()
result = runner.invoke(cli, [
'atomic-swap',
'get-public-key',
'--node-url',
node_url_with_protocol,
])
expected_error = {
'errors': {
'node_url': [
f'Pass the following node URL `{node_url_with_protocol}` without protocol (http, https, etc.).',
],
},
}
assert FAILED_EXIT_FROM_COMMAND_CODE == result.exit_code
assert dict_to_pretty_json(expected_error) in result.output
def test_get_public_key_non_existing_node_url():
"""
Case: get the public key of the atomic swap by passing the non-existing node URL.
Expect: check if node running at the URL error message.
"""
non_existing_node_url = 'non-existing-node.com'
runner = CliRunner()
result = runner.invoke(cli, [
'atomic-swap',
'get-public-key',
'--node-url',
non_existing_node_url,
])
expected_error = {
'errors': f'Please check if your node running at http://{non_existing_node_url}:8080.',
}
assert FAILED_EXIT_FROM_COMMAND_CODE == result.exit_code
assert dict_to_pretty_json(expected_error) in result.output
| 1.46875 | 1 |
Accessor.py | gudnm/dhash | 3 | 109188 | <gh_stars>1-10
class Accessor(object):
""""""
pass
class WriteThru(Accessor):
"""Implement write-thru access pattern."""
pass
class WriteAround(Accessor):
"""Implement write-around access pattern."""
pass
class WriteBack(Accessor):
"""Implement write-back access pattern."""
pass
| 1.859375 | 2 |
geometric_algebra_attention/keras/Multivector2Vector.py | klarh/geometric_algebra_attention | 5 | 109316 | <reponame>klarh/geometric_algebra_attention
from tensorflow import keras
from .. import base
class Multivector2Vector(base.Multivector2Vector, keras.layers.Layer):
__doc__ = base.Multivector2Vector.__doc__
def call(self, inputs, mask=None):
return self._evaluate(inputs)
keras.utils.get_custom_objects()['Multivector2Vector'] = Multivector2Vector
| 1.875 | 2 |
train/Deprecated/train_gpu.py | louisletoumelin/wind_downscaling_cnn | 0 | 109444 | """
Created on Tue Jul 28 12:19:11 2020
@author: <NAME>
"""
import numpy as np
import matplotlib.pyplot as plt
import time
import tensorflow as tf
from tensorflow.keras.models import Sequential
from tensorflow.keras.optimizers import RMSprop
from tensorflow.keras.layers import Dense, Dropout, Flatten, Conv2D, MaxPool2D, Flatten, BatchNormalization, LeakyReLU, LSTM, UpSampling2D, Conv2DTranspose, ZeroPadding2D, Cropping2D, UpSampling1D
from tensorflow.keras.models import load_model
from tensorflow.keras.callbacks import ModelCheckpoint, ReduceLROnPlateau
from tensorflow.keras import backend as K
import horovod.tensorflow.keras as hvd
############
# Parameters
############
date='14_12'
# Training data dimensions
n_rows = 79
n_col = 69
# Data format (channels_last)
input_shape = (n_rows, n_col, 1)
output_shape = (n_rows, n_col, 3)
input_dir = "//home/mrmn/letoumelinl/train"
output_dir = "//scratch/mrmn/letoumelinl/ARPS/"
# Nombre de filtres par couche de convolution
n_conv_features = 32
# Fonction de perte mse = mean squared error
loss="mse"
############
# GPU
############
GPU = True
if GPU:
device_name = tf.test.gpu_device_name()
if device_name != '/device:GPU:0':
raise SystemError('GPU device not found')
print('Found GPU at: {}'.format(device_name))
# Horovod: initialize Horovod.
hvd.init()
# Horovod: pin GPU to be used to process local rank (one GPU per process)
gpus = tf.config.experimental.list_physical_devices('GPU')
for gpu in gpus:
tf.config.experimental.set_memory_growth(gpu, True)
if gpus:
tf.config.experimental.set_visible_devices(gpus[hvd.local_rank()], 'GPU')
############
# Functions
############
def root_mse(y_true, y_pred):
return(K.sqrt(K.mean(K.square(y_true - y_pred))))
def build_model(input_shape):
model = Sequential()
model.add(ZeroPadding2D(padding=((0, 1), (0, 1)), input_shape=input_shape))
# CONVOLUTION
model.add(Conv2D(n_conv_features, (5, 5), activation='relu', padding="same"))
model.add(Conv2D(n_conv_features, (5, 5), activation='relu', padding="same"))
model.add(MaxPool2D(pool_size=(2, 2)))
model.add(BatchNormalization())
model.add(Dropout(0.25))
model.add(Conv2D(2*n_conv_features, (5, 5), activation='relu', padding="same"))
model.add(Conv2D(2*n_conv_features, (5, 5), activation='relu', padding="same"))
model.add(MaxPool2D(pool_size=(2, 2)))
model.add(BatchNormalization())
model.add(Dropout(0.25))
model.add(Conv2D(4*n_conv_features, (3, 3), activation='relu', padding="same"))
model.add(Conv2D(4*n_conv_features, (3, 3), activation='relu', padding="same"))
model.add(MaxPool2D(pool_size=(2, 2)))
model.add(BatchNormalization())
model.add(Dropout(0.25))
model.add(Conv2D(8*n_conv_features, (3, 3), activation='relu', padding="same"))
model.add(Conv2D(8*n_conv_features, (3, 3), activation='relu', padding="same"))
model.add(BatchNormalization())
model.add(Dropout(0.25))
# DECONVOLUTION
model.add(Conv2DTranspose(8*n_conv_features, (3, 3), activation='relu', padding="same"))
model.add(Conv2DTranspose(8*n_conv_features, (3, 3), activation='relu', padding="same"))
model.add(BatchNormalization())
model.add(Dropout(0.25))
model.add(UpSampling2D(size=(2, 2)))
model.add(ZeroPadding2D(padding=((0, 0), (0, 1))))
model.add(Conv2DTranspose(4*n_conv_features, (3, 3), activation='relu', padding="same"))
model.add(Conv2DTranspose(4*n_conv_features, (3, 3), activation='relu', padding="same"))
model.add(UpSampling2D(size=(2, 2)))
model.add(ZeroPadding2D(padding=((0, 0), (0, 1))))
model.add(BatchNormalization())
model.add(Dropout(0.25))
model.add(Conv2DTranspose(2*n_conv_features, (5, 5), activation='relu', padding="same"))
model.add(Conv2DTranspose(2*n_conv_features, (5, 5), activation='relu', padding="same"))
model.add(UpSampling2D(size=(2, 2)))
model.add(BatchNormalization())
model.add(Dropout(0.25))
model.add(Conv2DTranspose(n_conv_features, (5, 5), activation='relu', padding="same"))
model.add(Conv2DTranspose(n_conv_features, (5, 5), activation='relu', padding="same"))
model.add(Dropout(0.25))
# Matrice produite
model.add(Conv2DTranspose(3, (5, 5), activation='linear', padding="same"))
model.add(Cropping2D(cropping=((0, 1), (0, 1))))
# Résumé du réseau
model.summary()
return(model)
def train_model(model, root_mse, input_dir, txt):
#model.compile(loss=loss, optimizer=RMSprop(lr=0.001, decay=0.0001), metrics=['mae', root_mse])
#model.save_weights('weights.h5')
for fold in range(8):
print('Fold' + str(fold))
model.load_weights('weights.h5')
# Horovod: adjust learning rate based on number of GPUs.
scaled_lr = 0.001 * hvd.size()
opt = tf.optimizers.RMSprop(scaled_lr)
# Horovod: add Horovod DistributedOptimizer.
opt = hvd.DistributedOptimizer(
opt)
model.compile(loss=loss,
optimizer=opt,
metrics=['mae', root_mse],
experimental_run_tf_function=False)
# Chemin d'accès aux données (préalablement traitées pour numpy)
filepath = output_dir + "fold{}/".format(fold)
# Chargement des données
print("LOADING DATA")
if txt:
TOPO_TRAIN = np.loadtxt(filepath + "train/topo.txt", dtype=np.float32)
WIND_TRAIN = np.loadtxt(filepath + "train/wind.txt", dtype=np.float32)
TOPO_VALID = np.loadtxt(filepath + "validation/topo.txt", dtype=np.float32)
WIND_VALID = np.loadtxt(filepath + "validation/wind.txt", dtype=np.float32)
else:
TOPO_TRAIN = np.load(filepath + "train/topo.npy")
WIND_TRAIN = np.load(filepath + "train/wind.npy")
TOPO_VALID = np.load(filepath + "validation/topo.npy")
WIND_VALID = np.load(filepath + "validation/wind.npy")
#Affichage des dimensions
print("Before reshaping")
print("Training shape: ")
print(TOPO_TRAIN.shape)
print(WIND_TRAIN.shape)
print("Validation shape: ")
print(TOPO_VALID.shape)
print(WIND_VALID.shape)
# Redimensionnement des données brutes (x*x) pour format keras
x_train = TOPO_TRAIN.reshape((TOPO_TRAIN.shape[0], * input_shape))
y_train = WIND_TRAIN.reshape((WIND_TRAIN.shape[0], * output_shape))
x_val = TOPO_VALID.reshape((TOPO_VALID.shape[0], * input_shape))
y_val = WIND_VALID.reshape((WIND_VALID.shape[0], * output_shape))
print("\n\nAfter reshaping:\n")
print("Training shape: ")
print(x_train.shape)
print(y_train.shape)
print("Validation shape: ")
print(np.shape(x_val))
print(np.shape(y_val))
# Normalisation des features
train_mean, train_std = np.mean(x_train), np.std(x_train)
x_train = (x_train - train_mean)/train_std
x_val = (x_val - train_mean)/train_std
# Définition des callbacks utilisés
filepath="checkpoint.hdf5"
callbacks = [
# Horovod: broadcast initial variable states from rank 0 to all other processes.
# This is necessary to ensure consistent initialization of all workers when
# training is started with random weights or restored from a checkpoint.
hvd.callbacks.BroadcastGlobalVariablesCallback(0),
# Horovod: average metrics among workers at the end of every epoch.
#
# Note: This callback must be in the list before the ReduceLROnPlateau,
# TensorBoard or other metrics-based callbacks.
hvd.callbacks.MetricAverageCallback(),
# Horovod: using `lr = 1.0 * hvd.size()` from the very beginning leads to worse final
# accuracy. Scale the learning rate `lr = 1.0` ---> `lr = 1.0 * hvd.size()` during
# the first three epochs. See https://arxiv.org/abs/1706.02677 for details.
hvd.callbacks.LearningRateWarmupCallback(initial_lr=scaled_lr, warmup_epochs=3, verbose=1),
ReduceLROnPlateau(monitor='loss', factor=0.1, patience=5, min_lr=1e-10, verbose=1)
]
# Horovod: save checkpoints only on worker 0 to prevent other workers from corrupting them.
if hvd.rank() == 0:
callbacks.append(ModelCheckpoint(filepath,
monitor='loss',
verbose=0,
save_best_only=True,
mode='min'))
# Horovod: write logs on worker 0.
verbose = 1 if hvd.rank() == 0 else 0
# ENTRAINEMENT DU RESEAU
history = model.fit(x_train,
y_train,
batch_size=32,
steps_per_epoch=500 // hvd.size(),
epochs=150,
verbose=verbose,
validation_data=(x_val, y_val),
callbacks=callbacks)
# Sauvegarde du modele
model.save(output_dir+'model_'+date+'_fold_{}.h5'.format(fold))
np.save(output_dir+'model_'+date+'_fold_{}_history.npy'.format(fold), history.history)
return(model, history)
if GPU:
start = time.perf_counter()
model = build_model(input_shape)
_, history = train_model(model, root_mse, input_dir, False)
finish = time.perf_counter()
print(f'\nFinished in {round((finish-start)/60, 2)} minute(s)')
| 1.945313 | 2 |
trade_remedies_caseworker/core/models.py | uktrade/trade-remedies-caseworker | 1 | 109572 | <gh_stars>1-10
from .constants import SECURITY_GROUPS_TRA_ADMINS, SECURITY_GROUPS_TRA_TOP_ADMINS
class TransientUser:
"""
A TransientUser object mimics a Django auth User but does not
persist anywhere. Insetad it is created on the fly by the
APIUserMiddleware middleware using session data.
"""
def __init__(self, **kwargs):
self.is_authenticated = True
self.transient_user = True
self.permissions = {}
for key, value in kwargs.items():
setattr(self, key, value)
def has_group(self, groups):
if not isinstance(groups, list):
groups = [groups]
return any([grp in self.groups for grp in groups])
def has_perms(self, permissions):
return any([prm in self.permissions for prm in permissions])
def has_perm(self, permission):
return permission in self.permissions
@property
def is_admin(self):
return any([grp in self.groups for grp in SECURITY_GROUPS_TRA_ADMINS])
@property
def is_top_admin(self):
"""
To resolve TR-2639, adding this check for higher admin access.
This is because lead inv. is considered admin currently. This can be changed
but the implications are not clear.
"""
return any([grp in self.groups for grp in SECURITY_GROUPS_TRA_TOP_ADMINS])
def reload(self, request):
"""
Reload the user from the API
"""
user = get_user(request.user.token, self.id) # noqa: F821
request.session["user"] = user
request.session.modified = True
self.init_fields(**user)
return user
| 1.453125 | 1 |
analizador_lexico.py | JPalmaUCT/2FasesCompilador | 0 | 109700 | import json
#La clave es el lexema (lo que lee del programa) y el valor es el token
#ejemplo lexema: = ; toquen: operador asignacion
#aqui en el token en vez de que se repita lo mismo a parte podria ir una descripcion
#ejemplo 'ari': 'ari / condicional if'
#Faltan varias fijense en la tablita y agregenlas porfaa
reservadas = { 'ari':'if', #if
'chayri':'else', #else
'kawsachiy':'while', #while
'jaykaxpax':'for', #for
'imbabura':'function', #function
'harkay':'break', #break
'apachimuy' : 'import', #import
'kutichipuy' : 'return', #return
'pachan': 'tipo_entero', #int
'killaywaska' : 'tipo_string', #string
'pasaqlla': 'tipo_flotante', #float
'huknin':'tipo_booleano', #boolean
'chiqap':'valor_booleano', #true
'llulla':'valor_booleano', #false
'chitiripuy' : 'tipo_alerta', #alerta. Nuevo tipo de dato para nuestro contexto
'chaa' : 'alerta', #verde
'karwa':'alerta', #amarillo
'antipuka':'alerta', #rojo
'anchuna' : 'sen_evacuar', #evacuar
'kakuy':'sen_no_evacuar', #no evacuar
#'apachimuy': 'apachimuy / decision', #decision
'rikhuchiy':'imprimir', #print
'puncu': 'input', #input
'tapuyAriq':'funcion_medir_volcan', #medirVolcan
'apu':'operador_or',
'alqa':'operador_and'
}
operadores = {'=': 'operador_asignacion',
'+': 'operador_suma',
'-' : 'operador_resta',
'/' : 'operador_division',
'*': 'operador_multiplicacion',
'++' : 'operador_incremento',
'--' : 'operador_decremento'}
comparadores = {'<':'comparador',
'<=':'comparador',
'>':'comparador',
'>=':'comparador',
'==':'comparador',
'!=':'comparador'}
delimitadores = {'(':'parentesis_apertura',
')':'parentesis_cierre',
'{':'delimitador_apertura',
'}':'delimitador_cierre',
';':'fin_sentencia'}
#obtengo los lexemas posibles de cada bloque (un arreglo)
#por ejmplo para delimitadores_lexema seria : ['(',')','{','}']
operadores_lexema = operadores.keys()
comparadores_lexema = comparadores.keys()
reservadas_lexema = reservadas.keys()
delimitadores_lexema = delimitadores.keys()
"""#jaja
for i in reservadas:
print (i)
for i in operadores:
print (i)
for i in comparadores:
print (i)
for i in delimitadores:
print (i)"""
#Letras del quechua que estan permitidas
permitidos = ['a','c','h','i','j','k','l','m','n','ntilde','p','q', 'r', 's', 't', 'u','w','y',
'A','C','H','I','J','K','L','M','N','NTILDE','P','Q', 'R', 'S', 'T', 'U','W','Y','_']
numeros = ['0','1','2','3','4','5','6','7','8','9']
#comprueba si el lexema que lee desde el archivo es un identificador (letra seguidad de letras o numeros)
def es_identificador(lexema):
esIdentificador = True
inicial = lexema[0] #primera palabra si es una letra
if not inicial in permitidos:
esIdentificador = False
if len(lexema) > 1:
for i in range(1,len(lexema)):
if not lexema[i] in permitidos and not lexema[i] in numeros:
esIdentificador = False
return esIdentificador
#comprueba si el lexema que lee desde el archivo es un numero flotante (que lleva . si o si)
def es_flotante(lexema):
comprobacion = False
for dig in lexema:
if dig == ".":
comprobacion = True
if comprobacion:
try:
float(lexema)
except:
comprobacion = False
return comprobacion
#comprueba si el lexema que lee desde el archivo es un entero
def es_entero(lexema):
return lexema.isdigit()
"""def es_cadena(lexema):
return type(lexema).__name__ == "str"""
#tabla contendra todos los tokens que detecte en el archivo
estructura = {}
tabla = []
#a tabla se le agregara cada token (un elemento que retorn crearToken)
def crearToken(token,lexema,linea):
myToken = {}
myToken["token"] = token
myToken["lexema"] = lexema
myToken["linea"] = linea
return myToken
def eliminarEspaciosyComentarios(codigo):
for i in range(len(codigo)):
codigo[i] = codigo[i].strip()
cod_sin_espacio = []
for lex in codigo:
if lex != "":
cod_sin_espacio.append(lex)
indice = len(codigo)
for i in range(len(cod_sin_espacio)):
if len(cod_sin_espacio[i]) >= 2:
if cod_sin_espacio[i][0] =='/' and cod_sin_espacio[i][1] =='/':
print(indice)
indice = i
print("new")
print(indice)
cod_sin_espacio = cod_sin_espacio[:indice]
return cod_sin_espacio
#Se abre el archivo en modo lectura
f=open("programa", "r")
i =f.read()
linea = 0
program = i.split('\n') #separados por salto de linea y metidos a un array['todo','el','programa','asi']
identificado = False
for line in program:
#los separa por espacio
codigo = line.split(' ')
#Elimina espacios en blanco en el codigo
codigo = eliminarEspaciosyComentarios(codigo)
#Se eliminan los espacios en blanco
#for i in range(len(codigo)):
#codigo[i] = codigo[i].strip()
linea += 1
for lexema in codigo:
if lexema in operadores_lexema:
myToken = crearToken(operadores[lexema],lexema,linea)
identificado = True
if lexema in reservadas_lexema:
myToken = crearToken(reservadas[lexema],lexema,linea)
identificado = True
if lexema in comparadores_lexema:
myToken = crearToken(comparadores[lexema],lexema,linea)
identificado = True
if lexema in delimitadores_lexema:
myToken = crearToken(delimitadores[lexema],lexema,linea)
identificado = True
if es_entero(lexema):
myToken = crearToken("numero_entero",lexema,linea)
identificado = True
if es_flotante(lexema):
myToken = crearToken("numero_flotante",lexema,linea)
identificado = True
#Si no se identifico el lexema
if not identificado:
#Ve si se trata de un identificador
if es_identificador(lexema):
myToken = crearToken("identificador",lexema,linea)
tabla.append(myToken)
#Si difinitivamente no lo identifica
else:
print ("error, no se que es:", lexema, "en la linea", linea)
#Si se identifico el lexema
else:
#se agrega a la tabla de tokens
tabla.append(myToken)
identificado = False
estructura["Tokens"] = tabla
#Muestra tabla de tokens
#for token in tabla:
# print (token)
with open('tokens.json', 'w') as json_file:
json.dump(estructura, json_file)
#detalles:
#por ahora cada token debe estar separado por un espacio en el archivo ejmplo:
#estara bien: a = 20 / 3.8 (porque estan separados)
#estara mal: a = 20/ 3.8 (porque el 20 lo lee como 20/ y no sabe lo que es eso)
#igual se puede arreglar eso
#en el ejemplo no sabe lo que es x porque no esta en el alfabero (asi debe ser) | 1.640625 | 2 |
iotoolkit/mapillary_vistas_toolkit.py | vghost2008/wml | 6 | 109828 | import json
import wml_utils as wmlu
import numpy as np
import os
import cv2 as cv
import sys
import random
from iotoolkit.labelme_toolkit import get_labels_and_bboxes
def get_files(dir_path, sub_dir_name):
img_dir = os.path.join(dir_path, sub_dir_name,'images')
label_dir = os.path.join(dir_path, sub_dir_name,"v2.0","polygons")
res = []
json_files = wmlu.recurse_get_filepath_in_dir(label_dir,suffix=".json")
for jf in json_files:
base_name = wmlu.base_name(jf)
igf = os.path.join(img_dir, base_name + ".jpg")
if os.path.exists(igf):
res.append((igf, jf))
else:
print(f"ERROR: Find {igf} faild, json file is {jf}")
return res
class MapillaryVistasData(object):
def __init__(self, label_text2id=None, shuffle=False, sub_dir_name="training",ignored_labels=[],label_map={},
allowed_labels_fn=None):
self.files = None
self.label_text2id = label_text2id
self.shuffle = shuffle
self.sub_dir_name = sub_dir_name
self.ignored_labels = ignored_labels
self.label_map = label_map
self.allowed_labels_fn = None if allowed_labels_fn is None or (isinstance(allowed_labels_fn,list ) and len(allowed_labels_fn)==0) else allowed_labels_fn
if self.allowed_labels_fn is not None and isinstance(self.allowed_labels_fn,list):
self.allowed_labels_fn = lambda x:x in allowed_labels_fn
def read_data(self, dir_path):
self.files = get_files(dir_path, self.sub_dir_name)
if self.shuffle:
random.shuffle(self.files)
def __len__(self):
return len(self.files)
def get_items(self,beg=0,end=None,filter=None):
'''
:return:
binary_masks [N,H,W], value is 0 or 1,
full_path,img_size,category_ids,category_names,boxes,binary_masks,area,is_crowd,num_annotations_skipped
'''
if end is None:
end = len(self.files)
if beg is None:
beg = 0
for i, (img_file, json_file) in enumerate(self.files[beg:end]):
if filter is not None and not filter(img_file,json_file):
continue
print(img_file,json_file)
sys.stdout.write('\r>> read data %d/%d' % (i + 1, len(self.files)))
sys.stdout.flush()
image, annotations_list = self.read_json(json_file)
labels_names, bboxes = get_labels_and_bboxes(image, annotations_list)
masks = [ann["segmentation"] for ann in annotations_list]
if len(masks) > 0:
try:
masks = np.stack(masks, axis=0)
except:
print("ERROR: stack masks faild.")
masks = None
if self.label_text2id is not None:
labels = [self.label_text2id(x) for x in labels_names]
else:
labels = None
yield img_file, [image['height'], image['width']], labels, labels_names, bboxes, masks, None, None, None
def get_boxes_items(self):
'''
:return:
full_path,img_size,category_ids,boxes,is_crowd
'''
for i, (img_file, json_file) in enumerate(self.files):
sys.stdout.write('\r>> read data %d/%d' % (i + 1, len(self.files)))
sys.stdout.flush()
image, annotations_list = self.read_json(json_file,use_semantic=False)
labels_names, bboxes = get_labels_and_bboxes(image, annotations_list)
labels = [self.label_text2id(x) for x in labels_names]
#file, img_size,category_ids, labels_text, bboxes, binary_mask, area, is_crowd, _
yield img_file, [image['height'], image['width']], labels, labels_names,bboxes, None,None,None,None
def read_json(self,file_path,use_semantic=True):
annotations_list = []
image = {}
with open(file_path, "r", encoding="gb18030") as f:
print(file_path)
data_str = f.read()
try:
json_data = json.loads(data_str)
img_width = int(json_data["width"])
img_height = int(json_data["height"])
image["height"] = int(img_height)
image["width"] = int(img_width)
image["file_name"] = wmlu.base_name(file_path)
for shape in json_data["objects"]:
#label = shape["label"].split("--")[-1]
label = shape["label"]
if self.ignored_labels is not None and label in self.ignored_labels:
continue
if self.allowed_labels_fn is not None and not self.allowed_labels_fn(label):
continue
if self.label_map is not None and label in self.label_map:
label = self.label_map[label]
mask = np.zeros(shape=[img_height, img_width], dtype=np.uint8)
all_points = np.array([shape["polygon"]]).astype(np.int32)
if len(all_points) < 1:
continue
points = np.transpose(all_points[0])
x, y = np.vsplit(points, 2)
x = np.reshape(x, [-1])
y = np.reshape(y, [-1])
x = np.minimum(np.maximum(0, x), img_width - 1)
y = np.minimum(np.maximum(0, y), img_height - 1)
xmin = np.min(x)
xmax = np.max(x)
ymin = np.min(y)
ymax = np.max(y)
if use_semantic:
segmentation = cv.drawContours(mask, all_points, -1, color=(1), thickness=cv.FILLED)
annotations_list.append({"bbox": (xmin, ymin, xmax - xmin + 1, ymax - ymin + 1),
"segmentation": segmentation,
"category_id": label,
"points_x": x,
"points_y": y})
else:
annotations_list.append({"bbox": (xmin, ymin, xmax - xmin + 1, ymax - ymin + 1),
"category_id": label,
"points_x": x,
"points_y": y})
except:
print(f"Read file {os.path.basename(file_path)} faild.")
pass
if use_semantic:
'''
Each pixel only belong to one classes, and the latter annotation will overwrite the previous
'''
if len(annotations_list)>2:
mask = 1-annotations_list[-1]['segmentation']
for i in reversed(range(len(annotations_list)-1)):
annotations_list[i]['segmentation'] = np.logical_and(annotations_list[i]['segmentation'], mask)
mask = np.logical_and(mask,1-annotations_list[i]['segmentation'])
return image, annotations_list
if __name__ == "__main__":
id = 0
# data_statistics("/home/vghost/ai/mldata/qualitycontrol/rdatasv3")
import img_utils as wmli
import object_detection_tools.visualization as odv
import matplotlib.pyplot as plt
NAME2ID = {}
ID2NAME = {}
def name_to_id(x):
global id
if x in NAME2ID:
return NAME2ID[x]
else:
NAME2ID[x] = id
ID2NAME[id] = x
id += 1
return NAME2ID[x]
ignored_labels = [
'manhole', 'dashed', 'other-marking', 'static', 'front', 'back',
'solid', 'catch-basin','utility-pole', 'pole', 'street-light','direction-back', 'direction-front'
'ambiguous', 'other','text','diagonal','left','right','water-valve','general-single','temporary-front',
'wheeled-slow','parking-meter','split-left-or-straight','split-right-or-straight','zigzag',
'give-way-row','ground-animal','phone-booth','give-way-single','garage','temporary-back','caravan','other-barrier',
'chevron','pothole','sand'
]
label_map = {
'individual':'person',
'cyclists':'person',
'other-rider':'person'
}
data = MapillaryVistasData(label_text2id=name_to_id, shuffle=False, ignored_labels=ignored_labels,label_map=label_map)
# data.read_data("/data/mldata/qualitycontrol/rdatasv5_splited/rdatasv5")
# data.read_data("/home/vghost/ai/mldata2/qualitycontrol/rdatav10_preproc")
# data.read_data("/home/vghost/ai/mldata2/qualitycontrol/rdatasv10_neg_preproc")
data.read_data(wmlu.home_dir("ai/mldata/mapillary_vistas/mapillary-vistas-dataset_public_v2.0"))
def filter(x):
return x in ['general-single', 'parking', 'temporary', 'general-horizontal']
# return x in ['terrain']
# return x in ['car']
# data.read_data("/home/vghost/ai/mldata2/qualitycontrol/x")
for x in data.get_items():
full_path, img_info, category_ids, category_names, boxes, binary_mask, area, is_crowd, num_annotations_skipped = x
img = wmli.imread(full_path)
def text_fn(classes, scores):
return f"{ID2NAME[classes]}"
if False:
is_keep = [filter(x) for x in category_names]
category_ids = np.array(category_ids)[is_keep]
boxes = np.array(boxes)[is_keep]
binary_mask = np.array(binary_mask)[is_keep]
if len(category_ids) == 0:
continue
wmlu.show_dict(NAME2ID)
odv.draw_bboxes_and_maskv2(
img=img, classes=category_ids, scores=None, bboxes=boxes, masks=binary_mask, color_fn=None,
text_fn=text_fn, thickness=4,
show_text=True,
fontScale=0.8)
plt.figure()
plt.imshow(img)
plt.show()
| 1.726563 | 2 |
lmpcc/scripts/cadrl_client.py | JitskedeVries/amr-lmpcc | 60 | 109956 | #! /usr/bin/env python
import rospy
import sys
# Brings in the SimpleActionClient
import actionlib
import math
import tf2_ros
from geometry_msgs.msg import *
from std_srvs.srv import *
import time
"""Reference Path"""
#x = [1.5, 3.5, 5.5, 7, 5.5, 3.5, 1.5, 0, 1.5]
#y = [0.5, 0.5, 0.5, 2, 3.5, 3.5, 3.5, 2, 0.5]
#theta = [0, 0, 0, 1.57, 3.14, 3.14, 3.14, -1.57, 0]
#global_path:
#x = [1.5, 3.5, 5.5, 7, 8, 10, 13, 11, 10.5, 9, 7, 5.5, 3.5, 1.5, 0, 1.5]
#y = [0.5, 0.5, 0.5, 2, 4, 4, 6, 7.5, 6, 4, 3.5, 3.5, 3.5, 3.5, 2, 0.5]
#theta = [0, 0, 0, 1.57, 0, 0, 1.57, 3.14, -1.57, 3.14, 3.14, 3.14, 3.14, 3.14, -1.57, 0]
# faculty corridor
#global_path:
#x= [50, 55, 60, 65,70,80]
#y= [-0.5, -0.5, -0.5, -0.5,-0.5,-0.5]
#theta= [0,0,0,0,0,0]
#reference_velocity= 0.5
#cadrl test
x= [15]
y= [0]
theta= [0]
reference_velocity= 0.5
distance_threshold = 0.63
loop = True
def cadrl_client(index,pub_global_goal):
# Creates a goal to send to the action server.
goal = PoseStamped()
goal.header.stamp = rospy.get_rostime()
goal.header.frame_id = "odom"
goal.pose.position.x = x[index]
goal.pose.position.y = y[index]
goal.pose.orientation.x = 0
goal.pose.orientation.y = 0
goal.pose.orientation.z = math.sin(theta[i]*0.5)
goal.pose.orientation.w = math.cos(theta[i]*0.5)
# Sends the goal to the action server.
pub_global_goal.publish(goal)
def check_if_arrived(i,tfBuffer):
try:
trans = tfBuffer.lookup_transform('odom', 'base_link', rospy.Time(0))
except (tf2_ros.LookupException, tf2_ros.ConnectivityException, tf2_ros.ExtrapolationException):
print ("Could not get TF")
return False
if math.sqrt(pow(x[i]-trans.transform.translation.x,2)+pow(y[i]-trans.transform.translation.y,2)) < 1:
return True
else:
return False
def collision_check(tfBuffer):
try:
pos_ped_1 = tfBuffer.lookup_transform('base_link', 'ped_link_1', rospy.Time(0))
except (tf2_ros.LookupException, tf2_ros.ConnectivityException, tf2_ros.ExtrapolationException):
print ("Could not get ped_link_1" )
return False
try:
pos_ped_2 = tfBuffer.lookup_transform('base_link', 'ped_link_2', rospy.Time(0))
except (tf2_ros.LookupException, tf2_ros.ConnectivityException, tf2_ros.ExtrapolationException):
print ("Could not get ped_link_2" )
return False
try:
pos_ped_3 = tfBuffer.lookup_transform('base_link', 'ped_link_3', rospy.Time(0))
except (tf2_ros.LookupException, tf2_ros.ConnectivityException, tf2_ros.ExtrapolationException):
print ("Could not get ped_link_3" )
return False
try:
pos_ped_4 = tfBuffer.lookup_transform('base_link', 'ped_link_4', rospy.Time(0))
except (tf2_ros.LookupException, tf2_ros.ConnectivityException, tf2_ros.ExtrapolationException):
print ("Could not get ped_link_4" )
return False
"""
try:
pos_ped_5 = tfBuffer.lookup_transform('base_link', 'ped_link_5', rospy.Time(0))
except (tf2_ros.LookupException, tf2_ros.ConnectivityException, tf2_ros.ExtrapolationException):
print ("Could not get ped_link_5" )
return False
try:
pos_ped_6 = tfBuffer.lookup_transform('base_link', 'ped_link_6', rospy.Time(0))
except (tf2_ros.LookupException, tf2_ros.ConnectivityException, tf2_ros.ExtrapolationException):
print ("Could not get ped_link_6" )
return False
"""
ped_distance_1 =math.sqrt(pow(pos_ped_1.transform.translation.x,2)+pow(pos_ped_1.transform.translation.y,2))
#print("ped_distance_1: " +str(ped_distance_1))
if ped_distance_1 < distance_threshold:
print ("Collision with ped_link_1!!!")
return True
#print("ped_distance_1: " +str(ped_distance_1))
if math.sqrt(pow(pos_ped_2.transform.translation.x,2)+pow(pos_ped_2.transform.translation.y,2)) < distance_threshold:
print ("Collision with ped_link_2")
return True
if math.sqrt(pow(pos_ped_3.transform.translation.x,2)+pow(pos_ped_3.transform.translation.y,2)) < distance_threshold:
print ("Collision with ped_link_3")
return True
if math.sqrt(pow(pos_ped_4.transform.translation.x,2)+pow(pos_ped_4.transform.translation.y,2)) < distance_threshold:
print ("Collision with ped_link_4")
return True
else:
return False
"""
if math.sqrt(pow(pos_ped_5.transform.translation.x,2)+pow(pos_ped_5.transform.translation.y,2)) < distance_threshold:
print ("Collision with ped_link_5")
return True
if math.sqrt(pow(pos_ped_6.transform.translation.x,2)+pow(pos_ped_6.transform.translation.y,2)) < distance_threshold:
print ("Collision with ped_link_6")
return True
"""
if __name__ == '__main__':
rospy.init_node('cadrl_base_client_py')
i = 0
pub_global_goal = rospy.Publisher('/nn_jackal/goal',PoseStamped, queue_size=1)
reset_simulation_client_ = rospy.ServiceProxy("/gazebo/reset_world",Empty());
"""ROS Variables"""
tfBuffer = tf2_ros.Buffer()
listener = tf2_ros.TransformListener(tfBuffer)
collision_number = 0
n_events = 0
trials = 0
timeout = 0
mean_time=0
while(i < len(x)):
if trials > 100:
break
try:
# Initializes a rospy node so that the SimpleActionClient can
# publish and subscribe over ROS.
cadrl_client(i,pub_global_goal)
arrived = False
col = False
ti = time.time() # initial time
while (not arrived) and (not col):
#rospy.sleep(0.1)
cadrl_client(i,pub_global_goal)
arrived = check_if_arrived(i,tfBuffer)
if arrived:
break
col = collision_check(tfBuffer)
if col:
collision_number += 1
trials +=1
i=0
reset_simulation_client_()
rospy.sleep(1)
break
tf = time.time()
if tf-ti > 90:
reset_simulation_client_()
i=0
timeout += 1
trials +=1
break
#print("Not arrived in: " + str(tf-ti) + " [s]")
except rospy.ROSInterruptException:
print("Failed")
break
print("next goal pos..."+str(i+1))
i += 1
if i == len(x):
i = 0
n_events += 1
trials +=1
mean_time +=tf-ti
print("Mean time to goal: " + str(mean_time))
print("Number of collisions: " + str(collision_number))
print("Number of successful events: " + str(n_events))
print("Number of trials: " + str(trials))
print("Number of timeout: " + str(timeout))
reset_simulation_client_()
rospy.sleep(1)
if trials > 100:
break
| 1.695313 | 2 |
mlp_france.py | lujiammy/coronavirus-machine-learning | 26 | 110084 | import numpy as np
np.random.seed(1337)
from keras.models import Sequential
from keras.layers import Dense
import matplotlib.pyplot as plt
model = Sequential()
model.add(Dense(units=50, input_dim=1, activation='relu'))
model.add(Dense(units=50, activation='relu'))
model.add(Dense(units=1, activation='sigmoid'))
model.add(Dense(units=1, activation='linear'))
model.compile(optimizer='adam', loss='mean_squared_error')
model.summary()
import csv
with open('data/france_history.csv', 'r') as csvfile:
reader = csv.reader(csvfile)
rows = [row for row in reader]
fr_corn_y = []
for each_y in rows:
fr_corn_y.append(int(each_y[0]))
dates = len(fr_corn_y)
fr_corn_x = list(range(1, dates + 1))
fr_corn_x = np.array(fr_corn_x)
fr_corn_y = np.array(fr_corn_y)
fr_dates_length = len(fr_corn_x)
fr_absorb = fr_corn_y[fr_dates_length-1]
corn_y_norm = fr_corn_y / fr_absorb
model.fit(fr_corn_x, corn_y_norm, epochs=10000, shuffle=False)
corn_y_predict = model.predict(fr_corn_x)
corn_y_predict = corn_y_predict * fr_absorb
fig_italy = plt.figure(figsize=(7, 5))
plt.scatter(fr_corn_x, fr_corn_y, label='Real Confirmed')
plt.plot(fr_corn_x, corn_y_predict, label='Predict Result')
plt.title('France Confirmed VS Dates')
plt.xlabel('Dates')
plt.ylabel('Amount')
plt.legend()
plt.show() | 1.929688 | 2 |
src/test/aws/test_clients.py | Dudesons/cassandras3 | 9 | 110212 | <filename>src/test/aws/test_clients.py<gh_stars>1-10
import unittest
from mock import patch
from cassandras3.aws.clients import ClientCache
REGION = 'us-east-1'
class TestClientCache(unittest.TestCase):
def setUp(self):
self.clients = ClientCache(REGION)
@patch('cassandras3.aws.clients.boto3')
def test_s3(self, mock_boto3):
self.clients.s3()
mock_boto3.client.assert_called_once_with('s3', REGION)
@patch('cassandras3.aws.clients.boto3')
def test_s3_cached(self, mock_boto3):
self.clients.s3()
self.clients.s3()
self.assertEqual(1, mock_boto3.client.call_count)
| 1.164063 | 1 |
6_google_trace/tensorflow/exam5.py | nguyenthieu95/machine_learning | 1 | 110340 | <reponame>nguyenthieu95/machine_learning
#!/usr/bin/env python2
# -*- coding: utf-8 -*-
"""
Created on Mon Jan 15 10:17:15 2018
@author: thieunv
Magic method: https://www.youtube.com/watch?v=3ohzBxoFHAY
"""
class Employee:
# class variable
raise_amount = 1.04
def __init__(self, first, last, pay):
self.first = first
self.last = last
self.pay = pay
self.email = first + "." + last + "@company.com"
def fullname(self):
return "{} {}".format(self.first, self.last)
def apply_raise(self):
self.pay = int(self.pay * self.raise_amount)
def __repr__(self): # representation method
return "Employee('{}', '{}', '{}')".format(self.first, self.last, self.pay)
def __str__(self): # string method, stronger than __repr__
return "{} - {}".format(self.fullname(), self.email)
def __add__(self, other):
return self.pay + other.pay
def __len__(self):
return len(self.fullname())
# Instance
e1 = Employee("thieu", "nguyen", 5000)
e2 = Employee("tien", "pham", 3000)
print e1
print repr(e2) # Same: print e2.__repr__()
print str(e2) # Same: print e2.__str__()
## Add method
print(1 + 2)
print(int.__add__(1, 2))
print(str.__add__('a', 'b'))
print(e1 + e2) # Understand how to add using __add__
## Len method
print(len('test'))
print('test'.__len__())
print(len(e1))
| 3.25 | 3 |
doc/userGuide/tutorial/introductory.py | lbl-srg/MPCPy | 96 | 110468 | # -*- coding: utf-8 -*-
"""
This tutorial will introduce the basic concepts and workflow of mpcpy.
By the end, we will train a simple model based on emulated data, and use
the model to optimize the control signal of the system. All required data
files for this tutorial are located in doc/userGuide/tutorial.
The model is a simple RC model of zone thermal response to ambient temperature
and a singal heat input. It is written in Modelica:
.. code-block:: modelica
model RC "A simple RC network for example purposes"
Modelica.Blocks.Interfaces.RealInput weaTDryBul(unit="K") "Ambient temperature";
Modelica.Blocks.Interfaces.RealInput Qflow(unit="W") "Heat input";
Modelica.Blocks.Interfaces.RealOutput Tzone(unit="K") "Zone temperature";
Modelica.Thermal.HeatTransfer.Components.HeatCapacitor heatCapacitor(C=1e5)
"Thermal capacitance of zone";
Modelica.Thermal.HeatTransfer.Components.ThermalResistor thermalResistor(R=0.01)
"Thermal resistance of zone";
Modelica.Thermal.HeatTransfer.Sources.PrescribedTemperature preTemp;
Modelica.Thermal.HeatTransfer.Sensors.TemperatureSensor senTemp;
Modelica.Thermal.HeatTransfer.Sources.PrescribedHeatFlow preHeat;
equation
connect(senTemp.T, Tzone)
connect(preHeat.Q_flow, Qflow)
connect(heatCapacitor.port, senTemp.port)
connect(heatCapacitor.port, preHeat.port)
connect(preTemp.port, thermalResistor.port_a)
connect(thermalResistor.port_b, heatCapacitor.port)
connect(preTemp.T, weaTDryBul)
end RC;
Variables and Units
-------------------
First, lets get familiar with variables and units, the basic building blocks of MPCPy.
>>> from mpcpy import variables
>>> from mpcpy import units
Static variables contain data that is not a timeseries:
>>> setpoint = variables.Static('setpoint', 20, units.degC)
>>> print(setpoint) # doctest: +NORMALIZE_WHITESPACE
Name: setpoint
Variability: Static
Quantity: Temperature
Display Unit: degC
The unit assigned to the variable is the display unit.
However, each display unit quantity has a base unit that is used to store
the data in memory. This makes it easy to convert between units
when necessary. For example, the degC display unit has a quantity temperature,
which has base unit in Kelvin.
>>> # Get the data in display units
>>> setpoint.display_data()
20.0
>>> # Get the data in base units
>>> setpoint.get_base_data()
293.15
>>> # Convert the display unit to degF
>>> setpoint.set_display_unit(units.degF)
>>> setpoint.display_data() # doctest: +NORMALIZE_WHITESPACE
68.0
Timeseries variables contain data in the form of a ``pandas`` Series with a
datetime index:
>>> # Create pandas Series object
>>> import pandas as pd
>>> data = [0, 5, 10, 15, 20]
>>> index = pd.date_range(start='1/1/2017', periods=len(data), freq='H')
>>> ts = pd.Series(data=data, index=index, name='power_data')
Now we can do the same thing with the timeseries variable as we did with the
static variable:
>>> # Create mpcpy variable
>>> power_data = variables.Timeseries('power_data', ts, units.Btuh)
>>> print(power_data) # doctest: +NORMALIZE_WHITESPACE
Name: power_data
Variability: Timeseries
Quantity: Power
Display Unit: Btuh
>>> # Get the data in display units
>>> power_data.display_data()
2017-01-01 00:00:00+00:00 0.0
2017-01-01 01:00:00+00:00 5.0
2017-01-01 02:00:00+00:00 10.0
2017-01-01 03:00:00+00:00 15.0
2017-01-01 04:00:00+00:00 20.0
Freq: H, Name: power_data, dtype: float64
>>> # Get the data in base units
>>> power_data.get_base_data()
2017-01-01 00:00:00+00:00 0.000000
2017-01-01 01:00:00+00:00 1.465355
2017-01-01 02:00:00+00:00 2.930711
2017-01-01 03:00:00+00:00 4.396066
2017-01-01 04:00:00+00:00 5.861421
Freq: H, Name: power_data, dtype: float64
>>> # Convert the display unit to kW
>>> power_data.set_display_unit(units.kW)
>>> power_data.display_data()
2017-01-01 00:00:00+00:00 0.000000
2017-01-01 01:00:00+00:00 0.001465
2017-01-01 02:00:00+00:00 0.002931
2017-01-01 03:00:00+00:00 0.004396
2017-01-01 04:00:00+00:00 0.005861
Freq: H, Name: power_data, dtype: float64
There is additional functionality with the units that may be useful, such as
setting new data and getting the units. Consult the documentation on these
classes for more information.
Collect model weather and control signal data
---------------------------------------------
Now, we would like to collect the weather data and control signal inputs
for our model. We do this using exodata objects:
>>> from mpcpy import exodata
Let's take our weather data from an EPW file. We instantiate the weather
exodata object by supplying the path to the EPW file:
>>> weather = exodata.WeatherFromEPW('USA_IL_Chicago-OHare.Intl.AP.725300_TMY3.epw')
Note that using the weather exodata object assumes that weather inputs to our
model are named a certain way. Consult the documentation on the weather
exodata class for more information. In this case, the ambient dry bulb
temperature input in our model is named weaTDryBul.
Let's take our control input signal from a CSV file. The CSV file looks like:
::
Time,Qflow_csv
01/01/17 12:00 AM,3000
01/01/17 01:00 AM,3000
01/01/17 02:00 AM,3000
...
01/02/17 10:00 PM,3000
01/02/17 11:00 PM,3000
01/03/17 12:00 AM,3000
We instantiate the control exodata object by supplying the path to the CSV file
as well as a map of the names of the columns to the input of our model.
We also assume that the data in the CSV file is given in the local time of the
weather file, and so we supply this optional parameter, tz_name, upon
instantiation as well. If no time zone is supplied, it is assumed to be UTC.
>>> variable_map = {'Qflow_csv' : ('Qflow', units.W)}
>>> control = exodata.ControlFromCSV('ControlSignal.csv',
... variable_map,
... tz_name = weather.tz_name)
Now we are ready to collect the exogenous data from our data sources for a
given time period.
>>> start_time = '1/1/2017'
>>> final_time = '1/3/2017'
>>> weather.collect_data(start_time, final_time) # doctest: +ELLIPSIS
-etc-
>>> control.collect_data(start_time, final_time)
Use the ``display_data()`` and ``get_base_data()`` functions for the weather
and control objects to get the data in the form of a pandas dataframe. Note
that the data is given in UTC time.
>>> control.display_data() # doctest: +ELLIPSIS
Qflow
Time
2017-01-01 06:00:00+00:00 3000.0
2017-01-01 07:00:00+00:00 3000.0
2017-01-01 08:00:00+00:00 3000.0
-etc-
Simulate as Emulated System
---------------------------
The model has parameters for the resistance and capacitance set in the
modelica code. For the purposes of this tutorial, we will assume that the
model with these parameter values represents the actual system. We now wish to
collect measurements from this 'actual system.' For this, we use the systems
module of mpcpy.
>>> from mpcpy import systems
First, we instantiate our system model by supplying a measurement dictionary,
information about where the model resides, and information about model exodata.
The measurement dictionary holds information about and data from the variables
being measured. We start with defining the variables we are interested in
measuring and their sample rate. In this case, we have two, the output of
the model, called 'Tzone' and the control input called 'Qflow'.
Note that 'heatCapacitor.T' would also be valid instead of 'Tzone'.
>>> measurements = {'Tzone' : {}, 'Qflow' : {}}
>>> measurements['Tzone']['Sample'] = variables.Static('sample_rate_Tzone',
... 3600,
... units.s)
>>> measurements['Qflow']['Sample'] = variables.Static('sample_rate_Qflow',
... 3600,
... units.s)
The model information is given by a tuple containing the path to the
Modelica (.mo) file, the path of the model within the .mo file, and a list of
paths of any required libraries other than the Modelica Standard.
For this example, there are no additional libraries.
>>> moinfo = ('Tutorial.mo', 'Tutorial.RC', {})
Ultimately, the modelica model is compiled into an FMU. If the emulation model
is already an FMU, than an fmupath can be specified instead of the modelica
information tuple. For more information, see the documentation on the systmems
class.
We can now instantiate the system emulation object with our measurement
dictionary, model information, collected exogenous data, and time zone:
>>> emulation = systems.EmulationFromFMU(measurements,
... moinfo = moinfo,
... weather_data = weather.data,
... control_data = control.data,
... tz_name = weather.tz_name)
Finally, we can collect the measurements from our emulation over a specified
time period and display the results as a pandas dataframe. The
``collect_measurements()`` function updates the measurement dictionary with
timeseries data in the ``'Measured'`` field for each variable.
>>> # Collect the data
>>> emulation.collect_measurements('1/1/2017', '1/2/2017') # doctest: +ELLIPSIS
-etc-
>>> # Display the results
>>> emulation.display_measurements('Measured').applymap('{:.2f}'.format) # doctest: +ELLIPSIS
Qflow Tzone
Time
2017-01-01 06:00:00+00:00 3000.00 293.15
2017-01-01 07:00:00+00:00 3000.00 291.01
2017-01-01 08:00:00+00:00 3000.00 291.32
-etc-
Estimate Parameters
-------------------
Now assume that we do not know the parameters of the model. Or, that we have
measurements from a real or emulated system, and would like to estimate
parameters of our model to fit the measurements. For this, we use the models
module from mpcpy.
>>> from mpcpy import models
In this case, we have a Modelica model with two parameters that we would like
to train based on the measured data from our system; the resistance
and capacitance.
We first need to collect some information about our parameters and do so using
a parameters exodata object. The parameter information is stored in a CSV file
that looks like:
::
Name,Free,Value,Minimum,Maximum,Covariance,Unit
heatCapacitor.C,True,40000,1.00E+04,1.00E+06,1000,J/K
thermalResistor.R,True,0.002,0.001,0.1,0.0001,K/W
The name is the name of the parameter in the model. The Free field indicates
if the parameter is free to be changed during the estimation method or not.
The Value is the current value of the parameter. If the parameter is to be
estimated, this would be an initial guess. If the parameter's Free field is
set to False, then the value is set to the parameter upon simulation. The
Minimum and Maximum fields set the minimum and maximum value allowed by the
parameter during estimation. The Covariance field sets the covariance of
the parameter, and is only used for unscented kalman filtering. Finally, the
Unit field specifies the unit of the parameter using the name string of
MPCPy unit classes.
>>> parameters = exodata.ParameterFromCSV('Parameters.csv')
>>> parameters.collect_data()
>>> parameters.display_data() # doctest: +NORMALIZE_WHITESPACE
Covariance Free Maximum Minimum Unit Value
Name
heatCapacitor.C 1000 True 1e+06 10000 J/K 40000
thermalResistor.R 0.0001 True 0.1 0.001 K/W 0.002
Now, we can instantiate the model object by defining the estimation method,
validation method, measurement dictionary, model information, parameter data,
and exogenous data. In this case, we use JModelica optimization to perform
the parameter estimation and will validate the parameter estimation by
calculating the root mean square error (RMSE) between measurements from the
model and emulation.
>>> model = models.Modelica(models.JModelicaParameter,
... models.RMSE,
... emulation.measurements,
... moinfo = moinfo,
... parameter_data = parameters.data,
... weather_data = weather.data,
... control_data = control.data,
... tz_name = weather.tz_name)
Let's simulate the model to see how far off we are with our initial parameter
guesses. The ``simulate()`` function updates the measurement dictionary with
timeseries data in the ``'Simulated'`` field for each variable.
>>> # Simulate the model
>>> model.simulate('1/1/2017', '1/2/2017') # doctest: +ELLIPSIS
-etc-
>>> # Display the results
>>> model.display_measurements('Simulated').applymap('{:.2f}'.format) # doctest: +ELLIPSIS
Qflow Tzone
Time
2017-01-01 06:00:00+00:00 3000.00 293.15
2017-01-01 07:00:00+00:00 3000.00 266.95
2017-01-01 08:00:00+00:00 3000.00 267.44
-etc-
Now, we are ready to estimate the parameters to better fit the emulated
measurements. In addtion to a training period, we must supply a list of
measurement variables for which to minimize the error between the simulated
and measured data. In this case, we only have one, ``'Tzone'``. The
``estimate()`` function updates the Value field for the parameter data in
the model.
>>> model.parameter_estimate('1/1/2017', '1/2/2017', ['Tzone']) # doctest: +ELLIPSIS
-etc-
Let's validate the estimation on the training period. The ``validate()``
method will simulate the model over the specified time period, calculate the
RMSE between the simulated and measured data, and generate a plot in the
working directory that shows the simulated and measured data for each
measurement variable.
>>> # Perform validation
>>> model.validate('1/1/2017', '1/2/2017', 'validate_tra', plot=1) # doctest: +ELLIPSIS
-etc-
>>> # Get RMSE
>>> print("%.3f" % model.RMSE['Tzone'].display_data()) # doctest: +NORMALIZE_WHITESPACE
0.041
Now let's validate on a different period of exogenous data:
>>> # Define validation period
>>> start_time_val = '1/2/2017'
>>> final_time_val = '1/3/2017'
>>> # Collect new measurements
>>> emulation.collect_measurements(start_time_val, final_time_val) # doctest: +ELLIPSIS
-etc-
>>> # Assign new measurements to model
>>> model.measurements = emulation.measurements
>>> # Perform validation
>>> model.validate(start_time_val, final_time_val, 'validate_val', plot=1) # doctest: +ELLIPSIS
-etc-
>>> # Get RMSE
>>> print("%.3f" % model.RMSE['Tzone'].display_data()) # doctest: +NORMALIZE_WHITESPACE
0.047
Finally, let's view the estimated parameter values:
>>> for key in model.parameter_data.keys():
... print(key, "%.2f" % model.parameter_data[key]['Value'].display_data())
('heatCapacitor.C', '119828.30')
('thermalResistor.R', '0.01')
Optimize Control
----------------
We are now ready to optimize control of our system heater using our calibrated
MPC model. Specificlaly, we would like to maintain a comfortable temperature
in our zone with the minimum amount of heater energy. We can do this by using
the optimization module of MPCPy.
>>> from mpcpy import optimization
First, we need to collect some constraint data to add to our optimization
problem. In this case, we will constrain the heating input to between
0 and 4000 W, and the temperature to a comfortable range, between
20 and 25 degC. We collect contraint data from a CSV using a constraint
exodata data object. The constraint CSV looks like:
::
Time,Qflow_min,Qflow_max,T_min,T_max
01/01/17 12:00 AM,0,4000,20,25
01/01/17 01:00 AM,0,4000,20,25
01/01/17 02:00 AM,0,4000,20,25
...
01/02/17 10:00 PM,0,4000,20,25
01/02/17 11:00 PM,0,4000,20,25
01/03/17 12:00 AM,0,4000,20,25
The constraint exodata object is used to determine which column of data matches
with which model variable and whether it is a less-than-or-equal-to (LTE) or
greater-than-or-equal-to (GTE) constraint:
>>> # Define variable map
>>> variable_map = {'Qflow_min' : ('Qflow', 'GTE', units.W),
... 'Qflow_max' : ('Qflow', 'LTE', units.W),
... 'T_min' : ('Tzone', 'GTE', units.degC),
... 'T_max' : ('Tzone', 'LTE', units.degC)}
>>> # Instantiate constraint exodata object
>>> constraints = exodata.ConstraintFromCSV('Constraints.csv',
... variable_map,
... tz_name = weather.tz_name)
>>> # Collect data
>>> constraints.collect_data('1/1/2017', '1/3/2017')
>>> # Get data
>>> constraints.display_data() # doctest: +ELLIPSIS
Qflow_GTE Qflow_LTE Tzone_GTE Tzone_LTE
Time
2017-01-01 06:00:00+00:00 0.0 4000.0 20.0 25.0
2017-01-01 07:00:00+00:00 0.0 4000.0 20.0 25.0
2017-01-01 08:00:00+00:00 0.0 4000.0 20.0 25.0
-etc-
We can now instantiate an optimization object using our calibrated MPC model,
selecting an optimization problem type and solver package, and specifying
which of the variables in the model to treat as the objective variable.
In this case, we choose an energy minimization problem (integral of variable
over time horizon) to be solved using JModelica, and Qflow to be the variable
we wish to minimize the integral of over the time horizon.
>>> opt_problem = optimization.Optimization(model,
... optimization.EnergyMin,
... optimization.JModelica,
... 'Qflow',
... constraint_data = constraints.data)
The information provided is used to automatically generate a .mop (optimization
model file for JModelica) and transfer the optimization problem using JModelica.
Using the ``optimize()`` function optimizes the variables defined in the control
data of the model object and updates their timeseries data with the optimal
solution for the time period specified. Note that other than the constraints,
the exogenous data within the model object is used, and the control interval
is assumed to be the same as the measurement sampling rate of the model. Use
the ``get_optimization_options()`` and ``set_optimization_options()`` to see
and change the options for the optimization solver; for instance number
of control points, maximum iteration number, tolerance, or maximum CPU time.
See the documentation for these functions for more information.
>>> opt_problem.optimize('1/2/2017', '1/3/2017') # doctest: +ELLIPSIS
-etc-
We can get the optimization solver statistics in the form of
(return message, # of iterations, objective value, solution time in seconds):
>>> opt_problem.get_optimization_statistics() # doctest: +ELLIPSIS
('Solve_Succeeded', 12, -etc-)
We can retrieve the optimal control solution and verify that the
constraints were satisfied. The intermediate points are a result of the
direct collocation method used by JModelica.
>>> opt_problem.display_measurements('Simulated').applymap('{:.2f}'.format) # doctest: +ELLIPSIS
Qflow Tzone
Time
2017-01-02 06:00:00+00:00 669.93 298.15
2017-01-02 06:09:18.183693+00:00 1512.95 293.15
2017-01-02 06:38:41.816307+00:00 2599.01 293.15
2017-01-02 07:00:00+00:00 1888.28 293.15
-etc-
Finally, we can simulate the model using the optimized control trajectory.
Note that the ``model.control_data`` dictionary is updated by the
``opt_problem.optimize()`` function.
>>> model.control_data['Qflow'].display_data().loc[pd.to_datetime('1/2/2017 06:00:00'):pd.to_datetime('1/3/2017 06:00:00')].map('{:.2f}'.format) # doctest: +ELLIPSIS
2017-01-02 06:00:00+00:00 669.93
2017-01-02 06:09:18.183693+00:00 1512.95
2017-01-02 06:38:41.816307+00:00 2599.01
2017-01-02 07:00:00+00:00 1888.28
-etc-
>>> model.simulate('1/2/2017', '1/3/2017') # doctest: +ELLIPSIS
-etc-
>>> model.display_measurements('Simulated').applymap('{:.2f}'.format) # doctest: +ELLIPSIS
Qflow Tzone
Time
2017-01-02 06:00:00+00:00 669.93 293.15
2017-01-02 07:00:00+00:00 1888.28 291.41
2017-01-02 08:00:00+00:00 2277.67 293.03
-etc-
Note there is some mismatch between the simulated model output temperature
and the raw optimal control solution model output temperature output.
This is due to the interpolation of control input results during simulation
not aligning with the collocation polynomials and timestep determined by the
optimization solver. We can solve the optimization problem again, this
time updating the ``model.control_data`` with a greater time resolution of 1
second. Some mismatch will still occur due to the optimization solution
using collocation being an approximation of the true dynamic model.
>>> opt_problem.optimize('1/2/2017', '1/3/2017', res_control_step=1.0) # doctest: +ELLIPSIS
-etc-
>>> model.control_data['Qflow'].display_data().loc[pd.to_datetime('1/2/2017 06:00:00'):pd.to_datetime('1/3/2017 06:00:00')].map('{:.2f}'.format) # doctest: +ELLIPSIS
2017-01-02 06:00:00+00:00 669.93
2017-01-02 06:00:01+00:00 671.66
2017-01-02 06:00:02+00:00 673.38
-etc-
>>> model.simulate('1/2/2017', '1/3/2017') # doctest: +ELLIPSIS
-etc-
>>> model.display_measurements('Simulated').applymap('{:.2f}'.format) # doctest: +ELLIPSIS
Qflow Tzone
Time
2017-01-02 06:00:00+00:00 669.93 293.15
2017-01-02 07:00:00+00:00 1888.28 292.67
2017-01-02 08:00:00+00:00 2277.67 293.13
-etc-
"""
if __name__ == "__main__":
import doctest
doctest.ELLIPSIS_MARKER = '-etc-'
(n_fails, n_tests) = doctest.testmod()
if n_fails:
print('\nTutorial finished with {0} fails.'.format(n_fails));
else:
print('\nTutorial finished OK.')
| 2.59375 | 3 |
app/python/scalrpy/msg_sender.py | ePlusPS/scalr | 0 | 110596 | <filename>app/python/scalrpy/msg_sender.py
# vim: tabstop=4 shiftwidth=4 softtabstop=4
#
# Copyright 2013, 2014 Scalr Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from gevent import monkey
monkey.patch_all()
import os
import sys
cwd = os.path.dirname(os.path.abspath(__file__))
scalrpy_dir = os.path.join(cwd, '..')
sys.path.insert(0, scalrpy_dir)
import time
import socket
import requests
from scalrpy.util import helper
from scalrpy.util import dbmanager
from scalrpy.util import cryptotool
from scalrpy.util import application
from scalrpy import LOG
from scalrpy import exceptions
helper.patch_gevent()
app = None
class MsgSender(application.ScalrIterationApplication):
nothing_todo_sleep = 5
max_processing_messages = 500
def __init__(self, argv=None):
self.description = "Scalr messaging application"
super(MsgSender, self).__init__(argv=argv)
self.config.update({
'cratio': 120,
'pool_size': 250,
'interval': 1,
})
self.iteration_timeout = 120
self._db = None
self._pool = None
self._processing_messages = set()
def configure(self):
helper.update_config(
self.scalr_config.get('msg_sender', {}), self.config)
helper.validate_config(self.config)
socket.setdefaulttimeout(self.config['instances_connection_timeout'])
self._db = dbmanager.ScalrDB(self.config['connections']['mysql'])
self._pool = helper.GPool(pool_size=self.config['pool_size'])
self.max_processing_messages = 2 * self.config['pool_size']
def _encrypt(self, server_id, crypto_key, data, headers=None):
assert server_id, 'server_id'
assert crypto_key, 'scalarizr.key'
assert data, 'data to encrypt'
crypto_algo = dict(name="des_ede3_cbc", key_size=24, iv_size=8)
data = cryptotool.encrypt_scalarizr(crypto_algo, data, cryptotool.decrypt_key(crypto_key))
headers = headers or dict()
headers['X-Signature'], headers['Date'] = cryptotool.sign(data, crypto_key)
headers['X-Server-Id'] = server_id
msg = "Server: {0}, key: {1} ... {2}".format(server_id, crypto_key[0:5], crypto_key[-5:])
LOG.debug(msg)
return data, headers
def get_messages(self):
query = (
"SELECT messageid, server_id, event_id, message_format, "
"handle_attempts ,message, message_name, status "
"FROM messages "
"WHERE type = 'out' "
"AND status = 0 "
"AND messageid IS NOT NULL "
"AND messageid != '' "
"AND message_version = 2 "
"AND UNIX_TIMESTAMP(dtlasthandleattempt)+handle_attempts*{cratio}<UNIX_TIMESTAMP() "
"ORDER BY dtadded ASC "
"LIMIT {limit}"
).format(cratio=self.config['cratio'], limit=self.config['pool_size'])
return self._db.execute(query)
def get_servers(self, messages):
servers_id = list(set([_['server_id'] for _ in messages if _['server_id']]))
if not servers_id:
return ()
statuses = [
'Running',
'Initializing',
'Importing',
'Temporary',
'Pending terminate',
'Pending suspend',
]
query = (
"SELECT server_id, farm_id, farm_roleid farm_role_id, remote_ip, local_ip, platform "
"FROM servers "
"WHERE server_id IN ({0}) AND status IN ({1})"
).format(str(servers_id)[1:-1], str(statuses)[1:-1])
servers = self._db.execute(query)
props = ['scalarizr.ctrl_port', 'scalarizr.key']
self._db.load_server_properties(servers, props)
for server in servers:
if 'scalarizr.ctrl_port' not in server:
server['scalarizr.ctrl_port'] = 8013
if 'scalarizr.key' not in server:
server['scalarizr.key'] = None
self._db.load_vpc_settings(servers)
return servers
def make_request(self, message, server):
data, headers = self._encrypt(
server['server_id'],
server['scalarizr.key'],
message['message'])
instances_connection_policy = self.scalr_config.get(server['platform'], {}).get(
'instances_connection_policy', self.scalr_config['instances_connection_policy'])
ip, port, proxy_headers = helper.get_szr_ctrl_conn_info(
server, instances_connection_policy)
headers.update(proxy_headers)
if not ip:
msg = "Unable to determine ip"
raise Exception(msg)
if message['message_format'] == 'json':
headers['Content-type'] = 'application/json'
url = 'http://%s:%s/%s' % (ip, port, 'control')
request = {
'url': url,
'data': data,
'headers': headers,
}
return request
def update(self, message):
try:
if message['status'] == 1:
if message['event_id']:
query = (
"UPDATE events "
"SET msg_sent = msg_sent + 1 "
"WHERE event_id = '{0}'"
).format(message['event_id'])
self._db.execute(query, retries=1)
if message['message_name'] == 'ExecScript':
query = "DELETE FROM messages WHERE messageid = '{0}'".format(message['messageid'])
self._db.execute(query, retries=1)
return
query = (
"UPDATE messages "
"SET status=1, message='', handle_attempts=handle_attempts+1, dtlasthandleattempt=NOW() "
"WHERE messageid='{0}'").format(message['messageid'])
else:
query = (
"UPDATE messages "
"SET status={0}, handle_attempts=handle_attempts+1, dtlasthandleattempt=NOW() "
"WHERE messageid='{1}'").format(message['status'], message['messageid'])
self._db.execute(query, retries=1)
finally:
if message['messageid'] in self._processing_messages:
self._processing_messages.remove(message['messageid'])
def process_message(self, message, server):
try:
try:
request = self.make_request(message, server)
except:
message['status'] = 3
msg = "Make request failed, reason: {error}".format(error=helper.exc_info())
raise Exception(msg)
if not request['url']:
message['status'] = 3
msg = "Wrong request: {request}".format(request=request)
raise Exception(msg)
msg = "Send message: {message_id}, request: {request}"
msg = msg.format(
message_id=message['messageid'],
request={'url': request['url'], 'headers': request['headers']})
LOG.debug(msg)
r = requests.post(
request['url'],
data=request['data'],
headers=request['headers'],
timeout=self.config['instances_connection_timeout'])
if r.status_code != 201:
msg = "Bad response code: {code}".format(code=r.status_code)
raise Exception(msg)
message['status'] = 1
msg = "Delivery Ok, message: {message_id}"
msg = msg.format(message_id=message['messageid'])
LOG.debug(msg)
except:
if message['status'] == 0 and int(message['handle_attempts']) >= 2:
message['status'] = 3
msg = "Delivery failed, message: {message_id}, server: {server}, reason: {error}"
server['scalarizr.key'] = '******'
msg = msg.format(
message_id=message['messageid'], server=server, error=helper.exc_info())
LOG.warning(msg)
self.update(message)
def do_iteration(self):
while len(self._processing_messages) > self.max_processing_messages:
time.sleep(1)
messages = self.get_messages()
if not messages:
time.sleep(self.nothing_todo_sleep)
return
servers = self.get_servers(messages)
servers_map = dict((server['server_id'], server) for server in servers)
for message in messages:
try:
if message['messageid'] in self._processing_messages:
continue
self._processing_messages.add(message['messageid'])
if message['server_id'] not in servers_map:
msg = (
"Server '{server_id}' doesn't exist or not in right status, set message "
"status to 3"
).format(server_id=message['server_id'])
LOG.warning(msg)
message['status'] = 3
self._pool.wait()
self._pool.apply_async(self.update, (message,))
else:
server = servers_map[message['server_id']]
self._pool.wait()
self._pool.apply_async(self.process_message, (message, server))
except:
msg = "Unable to process message: {message_id}, reason: {error}"
msg = msg.format(message_id=message['messageid'], error=helper.exc_info())
LOG.warning(msg)
def on_iteration_error(self):
self._pool.kill()
def main():
global app
app = MsgSender()
try:
app.load_config()
app.configure()
app.run()
except exceptions.AlreadyRunningError:
LOG.info(helper.exc_info(where=False))
except (SystemExit, KeyboardInterrupt):
pass
except:
LOG.exception('Oops')
if __name__ == '__main__':
main()
| 1.34375 | 1 |
_pytest/test_extractors.py | chrismaeda/rasa_nlu | 0 | 110724 | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from rasa_nlu.training_data import TrainingData
def test_crf_extractor(spacy_nlp):
from rasa_nlu.extractors.crf_entity_extractor import CRFEntityExtractor
ext = CRFEntityExtractor()
examples = [
{
"text": "anywhere in the west",
"intent": "restaurant_search",
"entities": [{"start": 16, "end": 20, "value": "west", "entity": "location"}]
},
{
"text": "central indian restaurant",
"intent": "restaurant_search",
"entities": [{"start": 0, "end": 7, "value": "central", "entity": "location"}]
}]
ext.train(TrainingData(training_examples=examples), spacy_nlp, True, ext.crf_features)
crf_format = ext._from_text_to_crf('anywhere in the west', spacy_nlp)
assert ([word[0] for word in crf_format] == ['anywhere', 'in', 'the', 'west'])
feats = ext._sentence_to_features(crf_format)
assert ('BOS' in feats[0])
assert ('EOS' in feats[-1])
assert ('0:low:in' in feats[1])
ext.extract_entities('anywhere in the west', spacy_nlp)
def test_crf_json_from_BILOU(spacy_nlp):
from rasa_nlu.extractors.crf_entity_extractor import CRFEntityExtractor
ext = CRFEntityExtractor()
ext.BILOU_flag = True
sentence = u"I need a home cleaning close-by"
r = ext._from_crf_to_json(spacy_nlp(sentence), ['O', 'O', 'O', 'B-what', 'L-what', 'B-where', 'I-where', 'L-where'])
assert len(r) == 2, "There should be two entities"
assert r[0] == {u'start': 9, u'end': 22, u'value': u'home cleaning', u'entity': u'what'}
assert r[1] == {u'start': 23, u'end': 31, u'value': u'close-by', u'entity': u'where'}
def test_crf_json_from_non_BILOU(spacy_nlp):
from rasa_nlu.extractors.crf_entity_extractor import CRFEntityExtractor
ext = CRFEntityExtractor()
ext.BILOU_flag = False
sentence = u"I need a home cleaning close-by"
r = ext._from_crf_to_json(spacy_nlp(sentence), ['O', 'O', 'O', 'what', 'what', 'where', 'where', 'where'])
assert len(r) == 5, "There should be five entities" # non BILOU will split multi-word entities - hence 5
assert r[0] == {u'start': 9, u'end': 13, u'value': u'home', u'entity': u'what'}
assert r[1] == {u'start': 14, u'end': 22, u'value': u'cleaning', u'entity': u'what'}
assert r[2] == {u'start': 23, u'end': 28, u'value': u'close', u'entity': u'where'}
assert r[3] == {u'start': 28, u'end': 29, u'value': u'-', u'entity': u'where'}
assert r[4] == {u'start': 29, u'end': 31, u'value': u'by', u'entity': u'where'}
| 1.554688 | 2 |
extrabacon-2.0/improved/shellcode_8_4(6)5.py | JS-Burns/CVE-2016-6366 | 171 | 110852 | ##
## this file autogenerated
## 8.4(6)5
##
jmp_esp_offset = "192.168.3.11"
saferet_offset = "172.16.17.32"
fix_ebp = "72"
pmcheck_bounds = "0.176.88.9"
pmcheck_offset = "96.186.88.9"
pmcheck_code = "192.168.3.11"
admauth_bounds = "0.32.8.8"
admauth_offset = "240.33.8.8"
admauth_code = "172.16.31.10"
# "8.4(6)5" = ["192.168.3.11","172.16.17.32","72","0.176.88.9","172.16.58.3","192.168.3.11","0.32.8.8","240.33.8.8","172.16.31.10"], | 0.5625 | 1 |
plot_data/veusz_plot_mahalanobis.py | ososinski/video_blender | 1 | 110980 | <gh_stars>1-10
import veusz.embed
import numpy as np
import argparse
import os
def is_valid_file(parser, arg):
if not os.path.exists(arg):
parser.error("The file %s does not exist!" % arg)
else:
return arg
# return open(arg, 'r') # return an open file #handle
parser = argparse.ArgumentParser(description='Plot data array [FILE] as an [OUT].png @ [dpi] dpi.')
parser.add_argument('file', metavar="FILE", help='the files to process', type=lambda x: is_valid_file(parser, x))
parser.add_argument('-o', '--out', dest='output',default="OUT",
help='output file name (default: [FILE_STEM].png)')
parser.add_argument('--cMap', dest='colourMap',default="grey",
help='veusz colourMap [grey]')
parser.add_argument('-s', '--scaling', dest='scaling',default="log",
help='veusz colour scaling [log]')
parser.add_argument('--crop', nargs=4, dest='bb', metavar="N",default=[0,0,0,0],
help='image boundary [left right top bottom]', type=int)
parser.add_argument('--range', nargs=2, dest='minmax', metavar="N",default=[0,0],
help='value range [MIN MAX]', type=float)
parser.add_argument('--dpi', dest='dpi', type=int, default=100, help='the png dpi [100]')
parser.add_argument('-d', '--display', dest='display', action='store_true', default=False, help='Display the result')
args = parser.parse_args()
filename = args.file
args.path = os.path.dirname(os.path.realpath((args.file)))
if args.output is "OUT":
stem,ext = os.path.splitext(args.file)
args.output = "%s_plot.png"%(stem)
else:
pass
#print (args)
#print (filename)
#data = np.loadtxt(open(args.file[0], "rb"),dtype=float, delimiter=' ')
data = np.genfromtxt(args.file,dtype=float,delimiter=" ")
#print data.shape
embed = veusz.embed.Embedded("veusz")
page = embed.Root.Add("page")
page.width.val = "%.02fin"%(data.shape[1]/10.0)
page.height.val = "%.02fin"%(data.shape[0]/10.0)
graph = page.Add("graph", autoadd=False)
x_axis = graph.Add("axis")
y_axis = graph.Add("axis")
# this stops intelligent axis extending
#embed.Set('x/autoExtend', False)
#embed.Set('x/autoExtendZero', False)
image_plot = graph.Add("image")
if(args.minmax[0]!=0 or args.minmax[1]!=0):
image_plot.min.val=args.minmax[0]
image_plot.max.val=args.minmax[1]
graph.leftMargin.val = "0cm"
graph.rightMargin.val = "0cm"
graph.topMargin.val = "0cm"
graph.bottomMargin.val = "0cm"
embed.ImportFile2D(args.file, "img_2d")
embed.SetData2D("img", data)
image_plot.colorScaling.val = args.scaling
image_plot.colorMap.val = args.colourMap
image_plot.data.val = "img_2d"
x_axis.MinorTicks.hide.val = True
x_axis.MajorTicks.hide.val = True
#x_axis.autoMirror.val = False #mirror line on the other side
x_axis.Line.hide.val = True
x_axis.autoRange.val = "exact"
x_axis.mode.val = "labels"
#x_axis.datascale.val = 1
y_axis.MinorTicks.hide.val = True
y_axis.MajorTicks.hide.val = True
#y_axis.autoMirror.val = False #mirror line on the other side
y_axis.Line.hide.val = True
y_axis.autoRange.val = "exact"
y_axis.mode.val = "labels"
graph.Border.hide.val = True
#resize for different crop // the 'if' because we all make top/bottom value mistakes
if(args.bb[0]!=0 or args.bb[1]!=0 or args.bb[2]!=0 or args.bb[3]!=0):
page.width.val = "%fin"%(abs(args.bb[1]-args.bb[0])/10.0)
if args.bb[0]<args.bb[1] :
x_axis.min.val = args.bb[0]
x_axis.max.val = args.bb[1]
else:
x_axis.min.val = args.bb[1]
x_axis.max.val = args.bb[0]
page.height.val = "%fin"%(abs(args.bb[3]-args.bb[2])/10.0)
if args.bb[2]<args.bb[3] :
y_axis.min.val = args.bb[2]
y_axis.max.val = args.bb[3]
else:
y_axis.min.val = args.bb[3]
y_axis.max.val = args.bb[2]
#typeface = "Arial"
#for curr_axis in [x_axis, y_axis]:
# curr_axis.Label.font.val = typeface
# curr_axis.TickLabels.font.val = typeface
#embed.Export("poisson.pdf", backcolor="white")
embed.Export(
#"poisson_{dpi:n}.png".format(dpi=args.dpi),
args.output,
backcolor="white",
dpi=args.dpi,
antialias=False
)
if(args.display):
embed.WaitForClose()
else:
embed.Close()
| 2.015625 | 2 |
vit/vit.py | Burf/VisionTransformer-Tensorflow2 | 0 | 111108 | import tensorflow as tf
import numpy as np
class MultiHeadSelfAttention(tf.keras.layers.Layer):
def __init__(self, emb_dim = 768, n_head = 12, out_dim = None, relative_window_size = None, dropout_rate = 0., kernel_initializer = tf.keras.initializers.RandomNormal(mean = 0, stddev = 0.01), **kwargs):
#ScaledDotProductAttention
super(MultiHeadSelfAttention, self).__init__(**kwargs)
self.emb_dim = emb_dim
self.n_head = n_head
if emb_dim % n_head != 0:
raise ValueError("Shoud be embedding dimension % number of heads = 0.")
if out_dim is None:
out_dim = self.emb_dim
self.out_dim = out_dim
if relative_window_size is not None and np.ndim(relative_window_size) == 0:
relative_window_size = [relative_window_size, relative_window_size]
self.relative_window_size = relative_window_size
self.projection_dim = emb_dim // n_head
self.dropout_rate = dropout_rate
self.query = tf.keras.layers.Dense(emb_dim, kernel_initializer = kernel_initializer)
self.key = tf.keras.layers.Dense(emb_dim, kernel_initializer = kernel_initializer)
self.value = tf.keras.layers.Dense(emb_dim, kernel_initializer = kernel_initializer)
self.combine = tf.keras.layers.Dense(out_dim, kernel_initializer = kernel_initializer)
def build(self, input_shape):
if self.relative_window_size is not None:
self.relative_position_bias_table = self.add_weight("relative_position_bias_table", shape = [((2 * self.relative_window_size[0]) - 1) * ((2 * self.relative_window_size[1]) - 1), self.n_head], trainable = self.trainable)
coords_h = np.arange(self.relative_window_size[0])
coords_w = np.arange(self.relative_window_size[1])
coords = np.stack(np.meshgrid(coords_h, coords_w, indexing = "ij")) #2, Wh, Ww
coords = np.reshape(coords, [2, -1])
relative_coords = np.expand_dims(coords, axis = -1) - np.expand_dims(coords, axis = -2) #2, Wh * Ww, Wh * Ww
relative_coords = np.transpose(relative_coords, [1, 2, 0]) #Wh * Ww, Wh * Ww, 2
relative_coords[:, :, 0] += self.relative_window_size[0] - 1 #shift to start from 0
relative_coords[:, :, 1] += self.relative_window_size[1] - 1
relative_coords[:, :, 0] *= 2 * self.relative_window_size[1] - 1
relative_position_index = np.sum(relative_coords, -1)
self.relative_position_index = tf.Variable(tf.convert_to_tensor(relative_position_index), trainable = False, name= "relative_position_index")
def attention(self, query, key, value, relative_position_bias = None):
score = tf.matmul(query, key, transpose_b = True)
n_key = tf.cast(tf.shape(key)[-1], tf.float32)
scaled_score = score / tf.math.sqrt(n_key)
if relative_position_bias is not None:
scaled_score = scaled_score + relative_position_bias
weight = tf.nn.softmax(scaled_score, axis = -1)
if 0 < self.dropout_rate:
weight = tf.nn.dropout(weight, self.dropout_rate)
out = tf.matmul(weight, value)
return out
def separate_head(self, x):
out = tf.keras.layers.Reshape([-1, self.n_head, self.projection_dim])(x)
out = tf.keras.layers.Permute([2, 1, 3])(out)
return out
def call(self, inputs):
query = self.query(inputs)
key = self.key(inputs)
value = self.value(inputs)
query = self.separate_head(query)
key = self.separate_head(key)
value = self.separate_head(value)
relative_position_bias = None
if self.relative_window_size is not None:
relative_position_bias = tf.gather(self.relative_position_bias_table, tf.reshape(self.relative_position_index, [-1]))
relative_position_bias = tf.reshape(relative_position_bias, [self.relative_window_size[0] * self.relative_window_size[1], self.relative_window_size[0] * self.relative_window_size[1], -1]) #Wh * Ww,Wh * Ww, nH
relative_position_bias = tf.transpose(relative_position_bias, [2, 0, 1]) #nH, Wh * Ww, Wh * Ww
relative_position_bias = tf.expand_dims(relative_position_bias, axis = 0)
attention = self.attention(query, key, value, relative_position_bias)
attention = tf.keras.layers.Permute([2, 1, 3])(attention)
attention = tf.keras.layers.Reshape([-1, self.emb_dim])(attention)
out = self.combine(attention)
return out
def get_config(self):
config = super(MultiHeadSelfAttention, self).get_config()
config["emb_dim"] = self.emb_dim
config["n_head"] = self.n_head
config["out_dim"] = self.out_dim
config["relative_window_size"] = self.relative_window_size
config["projection_dim"] = self.projection_dim
config["dropout_rate"] = self.dropout_rate
return config
class TransformerBlock(tf.keras.layers.Layer):
def __init__(self, emb_dim = 768, n_head = 12, n_feature = 3072, dropout_rate = 0.1, **kwargs):
super(TransformerBlock, self).__init__(**kwargs)
self.emb_dim = emb_dim
self.n_head = n_head
self.n_feature = n_feature
self.dropout_rate = dropout_rate
self.attention = MultiHeadSelfAttention(emb_dim, n_head)
self.feed_forward = [tf.keras.layers.Dense(n_feature, activation = tf.keras.activations.gelu), tf.keras.layers.Dense(emb_dim)]
self.layer_norm = [tf.keras.layers.LayerNormalization(epsilon = 1e-6), tf.keras.layers.LayerNormalization(epsilon = 1e-6)]
if 0 < dropout_rate:
self.dropout = tf.keras.layers.Dropout(dropout_rate)
def call(self, inputs):
out = self.layer_norm[0](inputs)
out = self.attention(out)
if 0 < self.dropout_rate:
out = self.dropout(out)
att_out = self.layer_norm[1](inputs + out)
out = self.feed_forward[0](att_out)
if 0 < self.dropout_rate:
out = self.dropout(out)
out = self.feed_forward[1](out)
if 0 < self.dropout_rate:
out = self.dropout(out)
return att_out + out
def get_config(self):
config = super(TransformerBlock, self).get_config()
config["emb_dim"] = self.emb_dim
config["n_head"] = self.n_head
config["n_feature"] = self.n_feature
config["dropout_rate"] = self.dropout_rate
return config
class VisionTransformer(tf.keras.layers.Layer):
def __init__(self, n_class = 1000, include_top = True, patch_size = 16, distillation = False, emb_dim = 768, n_head = 12, n_feature = 3072, n_layer = 12, dropout_rate = 0.1, ori_input_shape = None, method = "bicubic", **kwargs):
super(VisionTransformer, self).__init__(**kwargs)
self.n_class = n_class
self.include_top = include_top
self.patch_size = patch_size if not isinstance(patch_size, int) else [patch_size, patch_size]
self.distillation = distillation
self.emb_dim = emb_dim
self.n_head = n_head
self.n_feature = n_feature
self.n_layer = n_layer
self.dropout_rate = dropout_rate
self.ori_input_shape = ori_input_shape
self.method = method
self.patch_projection = tf.keras.layers.Dense(emb_dim)
self.encoder = [TransformerBlock(emb_dim, n_head, n_feature, dropout_rate) for _ in range(n_layer)]
if include_top:
self.logits = tf.keras.layers.Dense(n_class, kernel_initializer = "zeros", name = "logits")
if self.distillation:
self.dist_logits = tf.keras.layers.Dense(n_class, kernel_initializer = "zeros", name = "kd_logits")
def build(self, input_shape):
n_patches = (input_shape[-3] // self.patch_size[0]) * (input_shape[-2] // self.patch_size[1])
pos_dim = n_patches + 1
self.class_emb = self.add_weight("class_embedding", shape = (1, 1, self.emb_dim), trainable = self.trainable)
if self.distillation:
pos_dim += 1
self.dist_emb = self.add_weight("kd_embedding", shape = (1, 1, self.emb_dim), trainable = self.trainable)
if self.ori_input_shape is not None:
ori_n_patches = (self.ori_input_shape[0] // self.patch_size[0]) * (self.ori_input_shape[1] // self.patch_size[1])
ori_pos_dim = ori_n_patches + 1
if self.distillation:
ori_pos_dim += 1
self.pos_emb = self.add_weight("position_embedding", shape = (1, ori_pos_dim, self.emb_dim), trainable = self.trainable)
self.pos_emb = self.resize_pos_embedding(self.pos_emb, pos_dim, self.distillation, self.method)
else:
self.pos_emb = self.add_weight("position_embedding", shape = (1, pos_dim, self.emb_dim), trainable = self.trainable)
def extract_patches(self, images, patch_size):
patches = tf.image.extract_patches(images = images, sizes = [1, patch_size[0], patch_size[1], 1], strides = [1, patch_size[0], patch_size[1], 1], rates = [1, 1, 1, 1], padding = "VALID")
n_patch, patch_dim = tf.keras.backend.int_shape(patches)[-2:]
patches = tf.keras.layers.Reshape([n_patch ** 2, patch_dim])(patches)
return patches
def resize_pos_embedding(self, pos_embedding, new_pos_dim, distillation = False, method = "bicubic"):
pos_emb_token = pos_embedding[:, :1]
pos_emb_grid = pos_embedding[:, 1:]
new_pos_dim -= 1
if distillation:
pos_emb_dist_token = pos_embedding[:, -1:]
pos_emb_grid = pos_embedding[:, 1:-1]
new_pos_dim -= 1
pos_dim, emb_dim = tf.keras.backend.int_shape(pos_emb_grid)[1:]
n_patch = np.sqrt(pos_dim).astype(int)
new_n_patch = np.sqrt(new_pos_dim).astype(int)
pos_emb_grid = tf.reshape(pos_emb_grid, [1, n_patch, n_patch, emb_dim])
pos_emb_grid = tf.image.resize(pos_emb_grid, [new_n_patch, new_n_patch], method = method)
pos_emb_grid = tf.reshape(pos_emb_grid, [1, new_n_patch **2, emb_dim])
pos_embedding = [pos_emb_token, pos_emb_grid]
if distillation:
pos_embedding.append(pos_emb_dist_token)
pos_embedding = tf.concat(pos_embedding, axis = 1)
return pos_embedding
def call(self, inputs):
out = self.extract_patches(inputs, self.patch_size)
out = self.patch_projection(out)
batch_size = tf.shape(inputs)[0]
class_emb = tf.broadcast_to(self.class_emb, [batch_size, 1, self.emb_dim])
out = [class_emb, out]
if self.distillation:
dist_emb = tf.broadcast_to(self.dist_emb, [batch_size, 1, self.emb_dim])
out.append(dist_emb)
out = tf.concat(out, axis = 1)
out = out + self.pos_emb
for encoder in self.encoder:
out = encoder(out)
if self.include_top:
pre_logits = out[:, 0] #class token
logits = self.logits(pre_logits)
if self.distillation:
pre_dist_logits = out[:, -1] #distillation token
dist_logits = self.dist_logits(pre_dist_logits)
out = [logits, dist_logits]
else:
out = logits
return out
def get_config(self):
config = super(VisionTransformer, self).get_config()
config["patch_size"] = self.patch_size
config["distillation"] = self.distillation
config["emb_dim"] = self.emb_dim
config["n_head"] = self.n_head
config["n_feature"] = self.n_feature
config["n_layer"] = self.n_layer
config["emb_dim"] = self.emb_dim
config["dropout_rate"] = self.dropout_rate
config["n_class"] = self.n_class
config["include_top"] = self.include_top
config["ori_input_shape"] = self.ori_input_shape
config["method"] = self.method
return config
def train_model(input, logits, kd_logits = None, soft = True, alpha = 0.5, tau = 1.0):
y_true = tf.keras.layers.Input(shape = (None,), name = "y_true", dtype = tf.float32)
kd_true = None
if kd_logits is not None:
kd_true = tf.keras.layers.Input(shape = (None,), name = "kd_true", dtype = tf.float32)
_y_true = tf.keras.layers.Lambda(lambda args: tf.cond(tf.equal(tf.shape(args[0])[-1], 1), true_fn = lambda: tf.one_hot(tf.cast(args[0], tf.int32), tf.shape(args[1])[-1])[:, 0], false_fn = lambda: args[0]))([y_true, logits])
_y_true = tf.cast(_y_true, logits.dtype)
if kd_logits is not None:
_kd_true = tf.keras.layers.Lambda(lambda args: tf.cond(tf.equal(tf.shape(args[0])[-1], 1), true_fn = lambda: tf.one_hot(tf.cast(args[0], tf.int32), tf.shape(args[1])[-1])[:, 0], false_fn = lambda: args[0]))([kd_true, kd_logits])
_kd_true = tf.cast(_kd_true, kd_logits.dtype)
logits = tf.where(tf.equal(logits, 0), tf.keras.backend.epsilon(), logits)
kd_logits = tf.where(tf.equal(kd_logits, 0), tf.keras.backend.epsilon(), kd_logits)
accuracy = tf.keras.metrics.categorical_accuracy(_y_true, logits)
loss = logits_loss = tf.keras.losses.categorical_crossentropy(_y_true, logits)
if kd_logits is not None:
if soft:
kd_loss = tf.keras.losses.kl_divergence(tf.nn.softmax(_kd_true / tau), tf.nn.softmax(kd_logits / tau)) * (tau ** 2)
else:
kd_loss = tf.keras.losses.categorical_crossentropy(_kd_true, kd_logits)
loss = (1 - alpha) * logits_loss + alpha * kd_loss
model = tf.keras.Model([l for l in [input, y_true, kd_true] if l is not None], loss)
model.add_metric(accuracy, name = "accuracy", aggregation = "mean")
model.add_metric(loss, name = "loss", aggregation = "mean")
model.add_loss(loss)
return model
def vit_small(include_top = True, weights = None, input_tensor = None, input_shape = None, classes = 1000, distillation = False):
if input_tensor is None:
img_input = tf.keras.layers.Input(shape = input_shape)
else:
if not tf.keras.backend.is_keras_tensor(input_tensor):
img_input = tf.keras.layers.Input(tensor = input_tensor, shape = input_shape)
else:
img_input = input_tensor
out = VisionTransformer(classes, include_top, patch_size = 16, distillation = distillation, emb_dim = 768, n_head = 8, n_feature = 2304, n_layer = 8, dropout_rate = 0.1, ori_input_shape = None, method = "bicubic")(img_input)
model = tf.keras.Model(img_input, out)
if weights is not None:
model.load_weights(weights)
return model
def vit_base(include_top = True, weights = None, input_tensor = None, input_shape = None, classes = 1000, distillation = False):
if input_tensor is None:
img_input = tf.keras.layers.Input(shape = input_shape)
else:
if not tf.keras.backend.is_keras_tensor(input_tensor):
img_input = tf.keras.layers.Input(tensor = input_tensor, shape = input_shape)
else:
img_input = input_tensor
out = VisionTransformer(classes, include_top, patch_size = 16, distillation = distillation, emb_dim = 768, n_head = 12, n_feature = 3072, n_layer = 12, dropout_rate = 0.1, ori_input_shape = None, method = "bicubic")(img_input)
model = tf.keras.Model(img_input, out)
if weights is not None:
model.load_weights(weights)
return model
def vit_large(include_top = True, weights = None, input_tensor = None, input_shape = None, classes = 1000, distillation = False):
if input_tensor is None:
img_input = tf.keras.layers.Input(shape = input_shape)
else:
if not tf.keras.backend.is_keras_tensor(input_tensor):
img_input = tf.keras.layers.Input(tensor = input_tensor, shape = input_shape)
else:
img_input = input_tensor
out = VisionTransformer(classes, include_top, patch_size = 16, distillation = distillation, emb_dim = 1024, n_head = 16, n_feature = 4096, n_layer = 24, dropout_rate = 0.1, ori_input_shape = None, method = "bicubic")(img_input)
model = tf.keras.Model(img_input, out)
if weights is not None:
model.load_weights(weights)
return model
def vit_huge(include_top = True, weights = None, input_tensor = None, input_shape = None, classes = 1000, distillation = False):
if input_tensor is None:
img_input = tf.keras.layers.Input(shape = input_shape)
else:
if not tf.keras.backend.is_keras_tensor(input_tensor):
img_input = tf.keras.layers.Input(tensor = input_tensor, shape = input_shape)
else:
img_input = input_tensor
out = VisionTransformer(classes, include_top, patch_size = 16, distillation = distillation, emb_dim = 1280, n_head = 16, n_feature = 5120, n_layer = 32, dropout_rate = 0.1, ori_input_shape = None, method = "bicubic")(img_input)
model = tf.keras.Model(img_input, out)
if weights is not None:
model.load_weights(weights)
return model
| 1.90625 | 2 |
fastspider/db/redis_db.py | coco369/fastspider | 6 | 111236 | <reponame>coco369/fastspider<filename>fastspider/db/redis_db.py
# encoding=utf-8
"""
Auth: coco369
Email: <EMAIL>
CreateTime: 2021/08/26
Desc: fastspider核心代码, redis
"""
import time
import redis
from fastspider.utils.logger import log
from fastspider.settings import common
class RedisDB(object):
def __init__(self, url=None, ip=None, port=None, password=<PASSWORD>, db=1, max_connections=30, **kwargs):
self._url = url or common.REDISDB_URL
self._ip = ip or common.REDISDB_IP
self._port = port or common.REDISDB_PORT
self._password = port or common.REDISDB_USER_PASS
self._db = port or common.REDISDB_DB
self._max_connections = max_connections
self._kwargs = kwargs
self._client = None
self._redis = None
self.connect()
def __repr__(self):
if self._url:
return f"<RedisDB url:{self._url}>"
return f"<RedisDB host: {self._ip} port: {self._ip} password: {self._password}>"
@property
def _client(self):
try:
if not self._redis.ping():
raise ConnectionError("链接redis失败, 请检测redis配置")
except Exception as e:
self._reconnect()
return self._redis
@_client.setter
def _client(self, val):
self._redis = val
def connect(self):
"""
链接redis
"""
if not self._url:
if not self._ip or not self._port:
raise Exception("请在配置 REDISDB_IP, REDISDB_PORT")
if self._ip and self._port:
self._client = redis.StrictRedis(host=self._ip, port=self._port, db=self._db, password=self._password,
decode_responses=True, max_connections=self._max_connections,
**self._kwargs)
else:
self._client = redis.StrictRedis.from_url(url=self._url, decode_responses=True)
return self._client
def _reconnect(self):
"""
重新链接redis
"""
retry_count = 0
while True:
try:
retry_count += 1
log.error(f"redis 连接断开, 重新连接 {retry_count}")
if self.connect():
log.info(f"redis 连接成功")
return True
except (ConnectionError, TimeoutError) as e:
log.error(f"连接失败 e: {e}")
time.sleep(2)
def set(self, key, value, **kwargs):
"""
redis-string类型
"""
return self._client.set(key, value, **kwargs)
def set_expire(self, key, seconds):
"""
设置过期时间
"""
return self._client.expire(key, seconds)
def zadd(self, table_name, requests, priority=0):
"""
使用有序set集合进行数据的存储
:param table_name: 集合名称
:param requests: 请求支持list 或者单个值
:param priority: 优先等级,支持list 或者 单个值, 可不传, 不传的情况下默认请求的优先级为0。或者也可以传入列表, 根据列表中的值来定义请求的执行优先级, 值越低, 越优先。
:return:
"""
if isinstance(requests, list):
if not isinstance(priority, list):
priority = [priority] * len(requests)
else:
assert len(priority) == len(requests), "请求队列和优先级的值需要一一对应"
# 批量操作
pipeline = self._client.pipeline()
pipeline.multi()
for key, value in zip(requests, priority):
pipeline.execute_command("ZADD", table_name, value, key)
log.info(f"RedisDB 中插入任务成功, 数据格式为: {key}")
return pipeline.execute()
else:
log.info(f"RedisDB 中插入任务成功, 数据格式为: {requests}")
return self._client.execute_command("ZADD", table_name, priority, requests)
def zrem(self, table_name, values):
"""
移除有序set集合中的元素, 如果元素不存在则忽略
:param table_name: 集合名称
:param values: 移除的元素, 支持列表 或者 单个值
:return:
"""
if isinstance(values, list):
self._client.zrem(table_name, *values)
else:
self._client.zrem(table_name, values)
def zcount(self, table_name, priority_min=None, priority_max=None):
"""
计算有序set集合中的元素的个数
:param table_name: 集合名称
:param priority_min: 优先级范围的最小值
:param priority_max: 优先级范围的最大值
:return:
"""
if priority_min != None and priority_max != None:
return self._client.zcount(table_name, priority_min, priority_max)
else:
return self._client.zcard(table_name)
def zrangebyscore_set_score(
self, table, priority_min, priority_max, score, count=None
):
"""
@summary: 返回指定分数区间的数据 闭区间, 同时修改分数
---------
@param table: 集合名称
@param priority_min: 最小分数
@param priority_max: 最大分数
@param score: 分数值
@param count: 获取的数量,为空则表示分数区间内的全部数据
---------
@result:
"""
# 使用lua脚本, 保证操作的原子性
lua = """
-- local key = KEYS[1]
local min_score = ARGV[1]
local max_score = ARGV[2]
local set_score = ARGV[3]
local count = ARGV[4]
-- 取值
local datas = nil
if count then
datas = redis.call('zrangebyscore', KEYS[1], min_score, max_score, 'withscores','limit', 0, count)
else
datas = redis.call('zrangebyscore', KEYS[1], min_score, max_score, 'withscores')
end
local real_datas = {} -- 数据
--修改优先级
for i=1, #datas, 2 do
local data = datas[i]
local score = datas[i+1]
table.insert(real_datas, data) -- 添加数据
redis.call('zincrby', KEYS[1], set_score - score, datas[i])
end
return real_datas
"""
cmd = self._client.register_script(lua)
if count:
res = cmd(keys=[table], args=[priority_min, priority_max, score, count])
else:
res = cmd(keys=[table], args=[priority_min, priority_max, score])
return res
def zremrangebyscore(self, table_name, priority_min, priority_max):
"""
用于移除有序set集中,指定分数(score)区间内的所有成员
:param table_name: 集合名称
:param priority_min: 优先级最小值
:param priority_max: 优先级最大值
"""
return self._client.zremrangebyscore(table_name, priority_min, priority_max)
| 1.601563 | 2 |
examples/sensors/navigation.py | phuanh004/rover | 0 | 111364 | <gh_stars>0
#robot_simpleping.py
#uses an avoid function for obstacles
from gpiozero import Robot, DistanceSensor
from time import sleep
import random
import os
from dotenv import load_dotenv
load_dotenv()
#Define Motor Driver and encoder GPIO pins
##################################################
# Motor A, Left Side GPIO CONSTANTS
PWML = 21 # PWMA - H-Bridge enable pin
FL = 20 # AI1 - Forward Drive
RL = 16 # AI2 - Reverse Drive
# Motor B, Right Side GPIO CONSTANTS
PWMR = 5 # PWMB - H-Bridge enable pin
FR = 13 # BI1 - Forward Drive
RR = 19 # BI2 - Reverse Drive
ECHO = 23 #echo pin on distance sensor
TRIG = 24 #trigger pin on distance sensor
RDISK = 17 # Encoder disk on the right rear wheel
# initialize distance sensor object for threshold of .3m
sensor = DistanceSensor(ECHO,TRIG, max_distance=1, threshold_distance=.3)
TDIST = 0.3
# robot object:pins are forward, reverse left, then right
rover = Robot(
(os.getenv('MOTOR_A_FL'),os.getenv('MOTOR_A_RL'),os.getenv('MOTOR_A_PWML')),
(os.getenv('MOTO_A_FR'),os.getenv('MOTO_A_RR'),os.getenv('MOTO_A_PWMR'))
)
#fwd/bk speed, spin turn speed and curve speeds
FSPD = 1
BSPD = .4
TSPD = BSPD * 1.75
CSPD = 1
#function for obstacle avoidance
def avoid():
print (f"{(sensor.distance * 100):.1f}, cm, too close!")
#print(sensor.distance * 100, " cm, too close!")
rover.stop()
sleep(.5)
rand = random.randint(0,9)
if rand % 2 == 0:
print (f"The number is even, back up to the right")
rover.backward(TSPD,curve_right=CSPD)
else:
print (f"The number is odd, back up to the left")
rover.backward(TSPD,curve_left=CSPD)
sleep(.5)
print ("ok, keep going")
def main():
while True:
if sensor.distance > TDIST:
rover.forward(FSPD)
sensor.when_in_range = avoid #when_in_range is builtin method
sleep(.1)
main() | 2.6875 | 3 |
Chapter02/Pandas_Join.py | PacktPublishing/Bioinformatics-with-Python-Cookbook-third-edition | 9 | 111492 | # # Pandas advanced
import numpy as np
import pandas as pd
# # Code to sample original data
#
# ```
# vdata = pd.read_csv("2021VAERSDATA.csv.gz", encoding="iso-8859-1")
# vdata.sample(frac=0.9).to_csv("vdata_sample.csv.gz", index=False)
# vax = pd.read_csv("2021VAERSVAX.csv.gz", encoding="iso-8859-1")
# vax.sample(frac=0.9).to_csv("vax_sample.csv.gz", index=False)
# ```
vdata = pd.read_csv("vdata_sample.csv.gz") # No encoding
vax = pd.read_csv("vax_sample.csv.gz")
vdata_with_vax = vdata.join(
vax.set_index("VAERS_ID"),
on="VAERS_ID",
how="inner")
len(vdata), len(vax), len(vdata_with_vax)
lost_vdata = vdata.loc[~vdata.index.isin(vdata_with_vax.index)]
lost_vdata
lost_vax = vax[~vax["VAERS_ID"].isin(vdata_with_vax["VAERS_ID"])]
lost_vax
# Left, Right and outer caveats
vdata_with_vax_left = vdata.join(
vax.set_index("VAERS_ID"),
on="VAERS_ID")
vdata_with_vax_left.groupby("VAERS_ID").size().sort_values()
len(vdata_with_vax_left), len(vdata_with_vax_left.VAERS_ID.unique())
# +
#vdata_all = pd.read_csv("2021VAERSDATA.csv.gz", encoding="iso-8859-1")
#vax_all = pd.read_csv("2021VAERSVAX.csv.gz", encoding="iso-8859-1")
# -
dead = vdata[vdata.DIED == "Y"]
vax19 = vax[vax.VAX_TYPE == "COVID19"]
vax19_dead = vax19.join(dead.set_index("VAERS_ID"), on="VAERS_ID", how="right")
# join on id, discuss
len(vax19), len(dead), len(vax19_dead)
len(vax19_dead[vax19_dead.VAERS_ID.duplicated()])
len(vax19_dead) - len(dead)
vax19_dead["STATE"] = vax19_dead["STATE"].str.upper()
dead_lot = vax19_dead[["VAERS_ID", "VAX_LOT", "STATE"]].set_index(["VAERS_ID", "VAX_LOT"])
dead_lot_clean = dead_lot[~dead_lot.index.duplicated()]
dead_lot_clean = dead_lot_clean.reset_index()
dead_lot_clean[dead_lot_clean.VAERS_ID.isna()]
baddies = dead_lot_clean.groupby("VAX_LOT").size().sort_values(ascending=False)
for i, (lot, cnt) in enumerate(baddies.items()):
print(lot, cnt, len(dead_lot_clean[dead_lot_clean.VAX_LOT == lot].groupby("STATE")))
if i == 10:
break
| 1.46875 | 1 |
tests/test_turbomoleparser.py | nomad-coe/electronic-parsers | 0 | 111620 | <reponame>nomad-coe/electronic-parsers<gh_stars>0
#
# Copyright The NOMAD Authors.
#
# This file is part of NOMAD. See https://nomad-lab.eu for further info.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import pytest
import numpy as np
from nomad.datamodel import EntryArchive
from electronicparsers.turbomole import TurbomoleParser
def approx(value, abs=0, rel=1e-6):
return pytest.approx(value, abs=abs, rel=rel)
@pytest.fixture(scope='module')
def parser():
return TurbomoleParser()
def test_aoforce(parser):
archive = EntryArchive()
parser.parse('tests/data/turbomole/aoforce/vib.out', archive, None)
assert archive.run[0].program.version == '7.2 ( 21285 )'
assert archive.run[0].time_run.date_start.magnitude == 1532973127.689
sec_method = archive.run[0].method[0]
assert sec_method.electronic.method == 'DFT'
assert sec_method.dft.xc_functional.correlation[0].name == 'GGA_C_P86'
assert len(sec_method.atom_parameters) == 4
assert len(sec_method.basis_set[0].atom_centered) == 4
assert sec_method.electronic.van_der_waals_method == 'DFT-D3'
assert sec_method.x_turbomole_controlIn_scf_conv == 8
sec_scc = archive.run[0].calculation[0]
assert sec_scc.energy.total.value.magnitude == approx(-3.58404386e-15)
assert sec_scc.energy.zero_point.value.magnitude == approx(1.02171533e-18)
assert sec_scc.energy.current.value.magnitude == approx(-3.58302215e-15)
assert np.shape(sec_scc.hessian_matrix) == (31, 31, 3, 3)
assert sec_scc.hessian_matrix[3][2][2][0] == approx(-38.1728237)
assert np.shape(sec_scc.x_turbomole_vibrations_normal_modes) == (93, 31, 3)
assert sec_scc.x_turbomole_vibrations_normal_modes[0][4][1] == approx(-0.02383)
assert sec_scc.x_turbomole_vibrations_mode_energies[46] == approx(1005.73)
assert sec_scc.x_turbomole_vibrations_intensities[72] == approx(41.61)
assert sec_scc.x_turbomole_vibrations_infrared_activity[49]
assert not sec_scc.x_turbomole_vibrations_raman_activity[5]
sec_system = archive.run[0].system[0]
assert len(sec_system.atoms.positions) == 31
assert sec_system.atoms.positions[7][1].magnitude == approx(-9.34235013e-11)
assert sec_system.atoms.labels[21] == 'O'
def test_ccsdf12(parser):
archive = EntryArchive()
parser.parse('tests/data/turbomole/ccsdf12.out', archive, None)
sec_sccs = archive.run[0].calculation
assert len(sec_sccs) == 3
assert sec_sccs[0].energy.total.value.magnitude == approx(-2.99865659e-15)
assert sec_sccs[1].energy.total.value.magnitude == approx(-2.99841134e-15)
assert sec_sccs[2].energy.total.value.magnitude == approx(-2.99844594e-15)
assert sec_sccs[1].energy.current.value.magnitude == approx(-5.78479974e-18)
sec_scfs = sec_sccs[0].scf_iteration
assert len(sec_scfs) == 13
assert sec_scfs[8].energy.total.value.magnitude == approx(-2.99844594e-15)
assert sec_scfs[2].time_calculation.magnitude == 2.09
def test_grad_statpt_dscf(parser):
archive = EntryArchive()
parser.parse('tests/data/turbomole/acrolein_grad_statpt_dscf.out', archive, None)
sec_methods = archive.run[0].method
assert sec_methods[0].basis_set[0].atom_centered[0].name == 'def2-SVP'
assert len(sec_methods) == 3
assert sec_methods[0].dft.xc_functional.hybrid[0].name == 'HYB_GGA_XC_B3LYP'
sec_systems = archive.run[0].system
assert len(sec_systems) == 3
assert sec_systems[1].atoms.positions[5][1].magnitude == approx(1.22377337e-10,)
sec_sccs = archive.run[0].calculation
assert sec_sccs[0].forces.total.value_raw[6][0].magnitude == approx(-4.2984543e-12)
sec_scfs = sec_sccs[2].scf_iteration
assert len(sec_scfs) == 3
assert sec_scfs[1].energy.total.value.magnitude == approx(-8.35592725e-16)
assert sec_scfs[0].x_turbomole_delta_eigenvalues.magnitude == approx(2.92683961e-22)
assert sec_sccs[2].energy.kinetic_electronic.value.magnitude == approx(8.27834082e-16)
sec_sampling = archive.workflow[0]
assert sec_sampling.geometry_optimization.x_turbomole_geometry_optimization_trustregion_min.magnitude == approx(5.29177211e-14)
assert sec_sampling.geometry_optimization.method == 'BFGS'
assert sec_sampling.geometry_optimization.convergence_tolerance_force_maximum.magnitude == approx(8.2387235e-11)
def test_escf(parser):
archive = EntryArchive()
parser.parse('tests/data/turbomole/benzene_escf.out', archive, None)
sec_method = archive.run[0].method[0]
assert sec_method.electronic.method == 'G0W0'
assert sec_method.x_turbomole_gw_eta_factor.magnitude == approx(4.35974472e-21)
assert sec_method.x_turbomole_gw_approximation == 'G0W0'
sec_scc = archive.run[0].calculation[0]
sec_eigs_gw = sec_scc.eigenvalues[0]
assert sec_eigs_gw.value_ks[0][0][9].magnitude == approx(-3.59608546e-18)
assert sec_eigs_gw.value_exchange[0][0][1].magnitude == approx(-1.55874163e-17)
assert sec_eigs_gw.qp_linearization_prefactor[0][0][19] == 0.786
def test_freeh(parser):
archive = EntryArchive()
parser.parse('tests/data/turbomole/freeh.out', archive, None)
sec_sccs = archive.run[0].calculation
assert len(sec_sccs) == 2
assert sec_sccs[0].energy.zero_point.value.magnitude == approx(4.89692971e-19)
assert sec_sccs[1].energy.correction_entropy.value.magnitude == approx(2.00144971e-19)
assert sec_sccs[1].thermodynamics[0].heat_capacity_c_v.magnitude == approx(2.27860167e-22)
assert sec_sccs[1].thermodynamics[0].pressure.magnitude == 100000.0
def test_pnoccsd(parser):
archive = EntryArchive()
parser.parse('tests/data/turbomole/pnoccsd.out', archive, None)
assert np.shape(archive.run[0].system[0].atoms.positions) == (51, 3)
sec_methods = archive.run[0].method
assert len(sec_methods) == 4
assert sec_methods[0].electronic.method == 'CCSD(T)'
assert sec_methods[1].electronic.method == 'MP2'
assert sec_methods[2].electronic.method == 'CCSD'
assert sec_methods[3].electronic.method == 'CCSD(T0)'
sec_sccs = archive.run[0].calculation
assert len(sec_sccs) == 4
assert sec_sccs[0].energy.total.value.magnitude == approx(-5.63810959e-15)
assert sec_sccs[1].energy.total.value.magnitude == approx(-5.63669838e-15)
assert sec_sccs[2].energy.current.value.magnitude == approx(-2.19140251e-17)
assert sec_sccs[3].energy.total.value.magnitude == approx(-5.6380984e-15)
sec_scfs = sec_sccs[0].scf_iteration
assert len(sec_scfs) == 13
assert sec_scfs[6].energy.total.value.magnitude == approx(-5.63708622e-15)
def test_ricc2(parser):
archive = EntryArchive()
parser.parse('tests/data/turbomole/MgO_embedding_ricc2.out', archive, None)
sec_systems = archive.run[0].system
assert len(sec_systems) == 4
assert sec_systems[0].atoms.positions[4][2].magnitude == approx(6.38760003e-10)
assert sec_systems[1].atoms.positions[18][0].magnitude == approx(4.25840002e-10)
assert sec_systems[2].atoms.positions[25][2].magnitude == approx(2.12920001e-10)
assert sec_systems[3].atoms.positions[-2][1].magnitude == approx(8.51680003e-10)
sec_sccs = archive.run[0].calculation
assert len(sec_sccs) == 3
assert sec_sccs[1].energy.total.value.magnitude == approx(-8.6955048e-15)
def test_ridft(parser):
archive = EntryArchive()
parser.parse('tests/data/turbomole/ridft.out', archive, None)
sec_method = archive.run[0].method[0]
assert sec_method.x_turbomole_dft_d3_version == '3.1 Rev 0'
sec_scc = archive.run[0].calculation[0]
assert sec_scc.energy.van_der_waals.value.magnitude == approx(-1.32811671e-18)
assert sec_scc.energy.total.value.magnitude == approx(-2.25881721e-14)
assert sec_scc.x_turbomole_virial_theorem == approx(1.94918952771)
sec_scf = sec_scc.scf_iteration
assert len(sec_scf) == 28
assert sec_scf[3].x_turbomole_energy_2electron_scf_iteration.magnitude == approx(1.02566632e-13)
assert sec_scf[23].energy.xc.value.magnitude == approx(-2.28814098e-15)
| 1.304688 | 1 |
mdgraph/models/gnn_lstm/config.py | braceal/pytorch-geometric-sandbox | 1 | 111748 | import argparse
import json
from typing import Union
from pathlib import Path
PathLike = Union[str, Path]
def get_parser():
parser = argparse.ArgumentParser()
parser.add_argument(
"--variational_node", action="store_true", help="Use variational node encoder"
)
parser.add_argument(
"--variational_lstm",
action="store_true",
help="Use variational LSTM Autoencoder",
)
parser.add_argument(
"--graph_attention",
action="store_true",
help="Use GAT network for node encoder.",
)
parser.add_argument(
"--node_recon_loss",
action="store_true",
help="Add node embedding reconstruction loss.",
)
parser.add_argument(
"--use_node_z",
action="store_true",
help="Compute adjacency matrix reconstruction using "
"node_encoder embeddings instead of LSTM decoder output",
)
parser.add_argument("--epochs", type=int, default=400)
parser.add_argument("--batch_size", type=int, default=128)
parser.add_argument("--lr", type=float, default=0.01)
parser.add_argument(
"--split_pct",
type=float,
default=0.8,
help="Percentage of data to use for training. The rest goes to validation.",
)
parser.add_argument(
"--data_path",
type=str,
default=str(Path(__file__).parent / "../../test/data/BBA-subset-100.h5"),
)
parser.add_argument(
"--lstm_num_layers",
type=int,
default=1,
help="Number of LSTM layers for encoder and decoder.",
)
parser.add_argument(
"--bidirectional",
action="store_true",
help="Whether LSTM should be bidirectional.",
)
parser.add_argument(
"--tsne_interval",
type=int,
default=5,
help="Run t-SNE every `tsne_interval` epochs.",
)
parser.add_argument(
"--run_dir",
type=Path,
default="./test_plots",
help="Output directory for model results.",
)
parser.add_argument(
"-f", "--jupyter", default="jupyter", help="For jupyter compatability"
)
return parser
def get_args():
parser = get_parser()
args = parser.parse_args()
return args
def args_from_json(args_json: PathLike):
parser = get_parser() # argparse.ArgumentParser()
with open(args_json, "r") as f:
t_args = argparse.Namespace()
t_args.__dict__.update(json.load(f))
args = parser.parse_args(namespace=t_args)
return args
| 1.960938 | 2 |
utils.py | Cocos-BCX/data_analysis | 14 | 111876 | <filename>utils.py
# -*- coding:utf-8 -*-
import os
import re
import logging
import datetime as dt
from logging.handlers import RotatingFileHandler
from logging.handlers import TimedRotatingFileHandler
class SubFormatter(logging.Formatter):
converter=dt.datetime.fromtimestamp
def formatTime(self, record, datefmt=None):
ct = self.converter(record.created)
if datefmt:
s = ct.strftime(datefmt)
else:
t = ct.strftime("%Y-%m-%d %H:%M:%S")
s = "%s,%03d" % (t, record.msecs)
return s
class Logging(object):
def __init__(self, log_dir='./logs', log_name='server', console=True):
self.logger = logging.getLogger(log_name)
self.logger.setLevel(logging.DEBUG)
#formatter = logging.Formatter("%(asctime)s [%(name)s] [%(funcName)s:%(lineno)s] [%(levelname)s]: %(message)s", "%Y-%m-%d %H:%M:%S")
formatter = SubFormatter(fmt='%(asctime)s [%(name)s] [%(funcName)s:%(lineno)s] [%(levelname)s]: %(message)s', datefmt='%Y-%m-%d %H:%M:%S.%f')
# file handler
if not os.path.exists(log_dir):
os.makedirs(log_dir)
log_file = log_dir + '/' + log_name
#fh = logging.FileHandler(log_file)
#fh = TimedRotatingFileHandler(filename=log_file, when="D", interval=1, backupCount=7)
fh = TimedRotatingFileHandler(filename=log_file, when="H", interval=1, backupCount=3*24)
fh.suffix = "%Y-%m-%d_%H-%M.log"
fh.extMatch = re.compile(r"^\d{4}-\d{2}-\d{2}_\d{2}-\d{2}.log$")
fh.setLevel(logging.DEBUG)
fh.setFormatter(formatter)
self.logger.addHandler(fh)
# console handler
# define a Handler which writes INFO messages or higher to the sys.stderr
if console:
ch = logging.StreamHandler()
ch.setLevel(logging.DEBUG)
ch.setFormatter(formatter)
self.logger.addHandler(ch)
def getLogger(self):
return self.logger
| 1.6875 | 2 |
tools/pbrt/pbrt/__init__.py | PearCoding/PearRay | 19 | 112004 | from .parser import Parser, Operation
from .writer import Writer
from .operator import Operator | 0.539063 | 1 |
y_tebya_ect/y_tebya_ect/game/urls.py | regalcat/y-tebya-ect | 0 | 112132 | from django.conf.urls import url
from . import views
urlpatterns = [
url(
regex=r'^(?P<username>[\w.@+-]+)/$',
view=views.GameDashboardView.as_view(),
name='dashboard'
),
]
| 0.972656 | 1 |
toolsws/backends/__init__.py | diegodlh/operations-software-tools-webservice | 8 | 112260 | <reponame>diegodlh/operations-software-tools-webservice
from .backend import Backend
from .gridengine import GridEngineBackend
from .kubernetes import KubernetesBackend
__all__ = [Backend, GridEngineBackend, KubernetesBackend]
| 0.365234 | 0 |
Artificial Intelligence/Search and Optimization/Introduction to Game Playing/testcode.py | bhupendpatil/Practice | 1 | 112388 | import minimax_helpers
from gamestate import *
g = GameState()
print("Calling min_value on an empty board...")
v = minimax_helpers.min_value(g)
if v == -1:
print("min_value() returned the expected score!")
else:
print("Uh oh! min_value() did not return the expected score.")
"""
Output:
Calling min_value on an empty board...
Uh oh! min_value() did not return the expected score.
"""
| 2.015625 | 2 |
functions_advanced/exercise/02_odd_or_even.py | Galchov/python-advanced | 0 | 112516 | <reponame>Galchov/python-advanced
command = input()
numbers_sequence = [int(x) for x in input().split()]
odd_numbers = []
even_numbers = []
for number in numbers_sequence:
if number % 2 == 0:
even_numbers.append(number)
else:
odd_numbers.append(number)
if command == 'Odd':
print(sum(odd_numbers) * len(numbers_sequence))
elif command == 'Even':
print(sum(even_numbers) * len(numbers_sequence))
| 2.640625 | 3 |
api/utils.py | matej-vavrek/fragalysis-backend | 0 | 112644 | import xml.etree.ElementTree as ET
from django.contrib.auth.models import User
from django.core.exceptions import ObjectDoesNotExist
from django.http import HttpResponse
from rdkit import Chem
from rdkit.Chem import AllChem, Draw, Atom, rdFMCS, rdDepictor
import re
from rdkit.Chem.Draw.MolDrawing import DrawingOptions
from rdkit.Chem.Draw import rdMolDraw2D
from rest_framework.authtoken.models import Token
from frag.utils.network_utils import get_fragments, canon_input
ISO_COLOUR_MAP = {
100: (1, 0, 0),
101: (0, 1, 0),
102: (0, 0, 1),
103: (1, 0, 1),
104: (1, 1, 0),
105: (0, 1, 1),
106: (0.5, 0.5, 0.5),
107: (1, 0.5, 1),
}
def get_token(request):
"""
Get the authentication token for a given request.
Should just return an un-authenticated user token if nothing.
:param request:
:return:
"""
try:
user = User.objects.get(username=request.user)
token, created = Token.objects.get_or_create(user=user)
return token.key
except ObjectDoesNotExist:
return ""
def _transparentsvg(svg):
"""
Give an SVG a white background
:param svg:
:return:
"""
# Make the white background transparent
tree = ET.fromstring(svg)
rect = tree.find("rect")
rect.set("style", rect.get("style").replace("#FFFFFF", "none"))
# Recover some missing attributes for correct browser rendering
tree.set("version", "1.1")
tree.set("xmlns", "http://www.w3.org/2000/svg")
tree.set("xmlns:rdkit", "http://www.rdkit.org/xml")
tree.set("xmlns:xlink", "http://www.w3.org/1999/xlink")
return '<?xml version="1.0" encoding="UTF-8"?>' + ET.tostring(tree).strip()
def highlight_diff(prb_mol, ref_mol, width, height):
"""
Draw a molecule (prb_mol) with the differences from a reference model highlighted
:param prb_mol: smiles of the probe molecule
:param ref_mol: smiles of the reference molecule
:param width: output image width
:param height: output image height
:return: svg string of the image
"""
if not width:
width = 200
if not height:
height = 200
mols = [Chem.MolFromSmiles(prb_mol), Chem.MolFromSmiles(ref_mol)]
[Chem.Kekulize(m) for m in mols]
match = Chem.rdFMCS.FindMCS(mols, ringMatchesRingOnly=True, completeRingsOnly=True)
match_mol = Chem.MolFromSmarts(match.smartsString)
rdDepictor.Compute2DCoords(mols[0])
unconserved = [i for i in range(mols[0].GetNumAtoms()) if i not in mols[0].GetSubstructMatch(match_mol)]
drawer = rdMolDraw2D.MolDraw2DSVG(width, height)
drawer.DrawMolecule(mols[0], highlightAtoms=unconserved)
drawer.FinishDrawing()
svg = drawer.GetDrawingText()
return svg
def draw_mol(
smiles,
height=200,
width=200,
img_type=None,
highlightAtoms=[],
atomcolors=[],
highlightBonds=[],
bondcolors={},
mol=None,
):
"""
Draw a molecule from a smiles
:param smiles: the SMILES to render
:param height: the height in px
:param width: the width in px
:return: an SVG as a string of the inage
"""
if mol is None:
mol = Chem.MolFromSmiles(smiles)
if mol is None:
return "None Mol"
AllChem.Compute2DCoords(mol)
Chem.Kekulize(mol)
if not height:
height = 200
if not width:
width = 200
if img_type == "png":
img = Draw.MolToImage(
mol,
highlightBonds=highlightBonds,
highlightBondColors=bondcolors,
)
img = img.convert("RGBA")
datas = img.getdata()
newData = []
for item in datas:
if item[0] == 255 and item[1] == 255 and item[2] == 255:
newData.append((255, 255, 255, 0))
else:
newData.append(item)
img.putdata(newData)
response = HttpResponse(content_type="image/png")
img.save(response, "PNG")
return response
else:
drawer = rdMolDraw2D.MolDraw2DSVG(height, width)
drawopt = drawer.drawOptions()
drawopt.clearBackground = False
drawer.DrawMolecule(
mol,
highlightAtoms=highlightAtoms,
highlightAtomColors=atomcolors,
highlightBonds=highlightBonds,
highlightBondColors=bondcolors,
)
drawer.DrawMolecule(mol)
drawer.FinishDrawing()
return drawer.GetDrawingText().replace("svg:", "")
def parse_vectors(vector_list):
return [int(x) for x in vector_list.split(",")]
def parse_bool(input_string):
if input_string.lower() in ("yes", "true", "t", "y", "1"):
return True
elif input_string.lower() in ("no", "false", "f", "n", "0"):
return False
else:
raise ValueError("Value not parsable")
def parse_atom_ids(input_list, mol):
"""
List of the form id,id,isotope,addHs
e.g. 1,2,104,True
:param input_list:
:param mol:
:return:
"""
spl_list = input_list.split(",")
bond_ids = []
atom_ids = []
bond_colours = {}
for i, data in enumerate(spl_list):
list_len = 4
if i % list_len in [0, 1]:
atom_ids.append(int(spl_list[i]))
if i % list_len == 2:
iso = int(spl_list[i])
if i % list_len == 3:
add_hs = parse_bool(spl_list[i])
atom_id_1 = atom_ids[0]
atom_id_2 = atom_ids[1]
if add_hs:
mol = AllChem.AddHs(mol)
# Replace the H with the atom id in atom_ids[0], atom_ids[1] with *
h_atoms = [x for x in mol.GetAtoms() if x.GetAtomicNum() == 1]
atom_remove = [
x.GetIdx() for x in h_atoms if x.GetIdx() in [atom_id_1, atom_id_2]
][0]
ed_mol = AllChem.EditableMol(mol)
# Remove the other Hs
ed_mol.ReplaceAtom(atom_remove, Atom(0))
# Get a new editable molecule
mol = ed_mol.GetMol()
mol = Chem.MolFromSmiles(Chem.MolToSmiles(mol))
# Record the new Atom Ids
atom_ids = [
[x.GetBonds()[0].GetBeginAtomIdx(), x.GetBonds()[0].GetEndAtomIdx()]
for x in mol.GetAtoms()
if x.GetAtomicNum() == 0
][0]
atom_id_1 = atom_ids[0]
atom_id_2 = atom_ids[1]
bond = mol.GetBondBetweenAtoms(atom_id_1, atom_id_2)
bond_ids.append(bond.GetIdx())
bond_colours[bond.GetIdx()] = ISO_COLOUR_MAP[iso]
atom_ids = []
return bond_ids, bond_colours, mol
def parse_xenons(input_smi):
mol = Chem.MolFromSmiles(input_smi)
e_mol = AllChem.EditableMol(mol)
xenons = [atom for atom in mol.GetAtoms() if atom.GetAtomicNum() == 54]
bond_ids = []
bond_colours = {}
for xe in xenons:
bond_id = xe.GetBonds()[0].GetIdx()
bond_ids.append(bond_id)
if len(xenons) > 1:
bond_colours[bond_id] = ISO_COLOUR_MAP[xe.GetIsotope()]
else:
bond_colours[bond_id] = ISO_COLOUR_MAP[101]
e_mol.ReplaceAtom(xe.GetIdx(), Atom(0))
return bond_ids, bond_colours, e_mol.GetMol()
def get_params(smiles, request):
try:
smiles = canon_input(smiles)
except:
smiles = ""
height = None
mol = None
bond_id_list = []
highlightBondColors = {}
if "height" in request.GET:
height = int(request.GET["height"])
width = None
if "width" in request.GET:
width = int(request.GET["width"])
if "atom_indices" in request.GET:
mol = Chem.MolFromSmiles(smiles)
bond_id_list, highlightBondColors, mol = parse_atom_ids(
request.GET["atom_indices"], mol
)
if "Xe" in smiles:
bond_id_list, highlightBondColors, mol = parse_xenons(smiles)
img_type = request.GET.get("img_type", None)
get_mol = draw_mol(
smiles,
width=width,
height=height,
img_type=img_type,
highlightBonds=bond_id_list,
mol=mol,
bondcolors=highlightBondColors,
)
if type(get_mol) == HttpResponse:
return get_mol
return HttpResponse(get_mol)
def get_highlighted_diffs(request):
prb_smiles = request.GET['prb_smiles']
ref_smiles = request.GET['ref_smiles']
height = None
width = None
if "height" in request.GET:
height = int(request.GET["height"])
if "width" in request.GET:
width = int(request.GET["width"])
return HttpResponse(highlight_diff(prb_mol=prb_smiles, ref_mol=ref_smiles, height=height, width=width))
def mol_view(request):
if "smiles" in request.GET:
smiles = request.GET["smiles"].rstrip(".svg")
return get_params(smiles, request)
else:
return HttpResponse("Please insert SMILES")
| 1.625 | 2 |
example.py | heironeous/mijiahygrothermolib | 0 | 112772 | from mijiahygrothermo import MijiaHygrothermo
for device in MijiaHygrothermo.discover():
data = device.get_latest_properties()
print("- {}".format(data['macAddress']))
print(" name: {}".format(data['name']))
print(" firmware: {}".format(data['firmwareVersion']))
print(" battery level: {}%".format(data['batteryPercentage']))
print(" temperature: {}*C".format(data['temperature']))
print(" humidity: {}%".format(data['humidity']))
print(" last data read: {}%".format(data['lastDataRead']))
print()
| 1.054688 | 1 |
cms/views.py | Parveen3300/Reans | 0 | 112900 | from django.shortcuts import render, redirect
from django.views.generic import TemplateView
# Create your views here.
from cms.models import TermsConditions
from cms.models import WebsiteCookiesPolicy
from cms.models import WebsitePolicy
from cms.models import ReplacementCancellationPolicy
from cms.models import AboutUs
from cms.models import ContactUs
class About_Us(TemplateView):
"""
About_Us
"""
template_name = 'about_us.html'
def get(self, request):
"""
this About_Us method used for get the terms dynamic data
"""
about_us_data = AboutUs.objects.last()
return render(
request,
self.template_name,
{'about_us_data': about_us_data if about_us_data else []}
)
class Contact_Us(TemplateView):
"""
Contact_Us
"""
template_name = 'contact.html'
def get(self, request):
"""
this Contact_Us method used for get the terms dynamic data
"""
contact_us_data = ContactUs.objects.last()
return render(
request,
self.template_name,
{'contact_us_data': contact_us_data if contact_us_data else []}
)
def post(self, request):
"""
this post method used for submit the contact detail:
date 15 nov 2021
@ravisingh
"""
if request.method == "POST":
data = {
'name': request.POST['name'],
'email': request.POST['email'],
'mobile': request.POST['mobile'],
'message': request.POST['message']
}
if data.values():
ContactUs.objects.create(**data)
# name = request.POST['name']
# email = request.POST['email']
# mobile = request.POST['mobile']
# message = request.POST['message']
# ContactUs.objects.create(
# name=name, email=email, mobile=mobile, message=message)
return redirect("/")
return redirect('/contact-us/')
class TermsAndConditions(TemplateView):
"""
TermsConditions
"""
template_name = 'terms_condition.html'
def get(self, request):
"""
this TermsConditions method used for get the terms dynamic data
"""
terms_conditions = TermsConditions.objects.last()
return render(
request,
self.template_name,
{'terms_conditions': terms_conditions if terms_conditions else []}
)
class WebCookiePolicy(TemplateView):
"""
WebCookiePolicy
"""
template_name = 'cookie_policy.html'
def get(self, request):
"""
this WebCookiePolicy method used for get the terms dynamic data
"""
cookie_policy = WebsiteCookiesPolicy.objects.last()
return render(
request,
self.template_name,
{'cookie_policy': cookie_policy if cookie_policy else []}
)
class Support(TemplateView):
"""
Support
"""
template_name = 'support.html'
def get(self, request):
"""
this Support method used for get the terms dynamic data
"""
return render(
request,
self.template_name, {}
)
class ReplacementPolicy(TemplateView):
"""
WebCookiePolicy
"""
template_name = 'replacement_policy.html'
def get(self, request):
"""
this ReplacementPolicy method used for get the terms dynamic data
"""
return render(request, self.template_name, {})
| 1.554688 | 2 |
colossalai/gemini/tensor/_ops/linear.py | weiplanet/ColossalAI | 0 | 113028 | <reponame>weiplanet/ColossalAI<filename>colossalai/gemini/tensor/_ops/linear.py
import torch
from colossalai.gemini.tensor import stateful_op_impl
from ..stateful_tensor import StatefulTensorV2
from packaging import version
@stateful_op_impl(torch.nn.functional.linear)
def stateful_linear(types, args, kwargs, pg):
"""Handles ``__torch_function__`` dispatch for ``torch.nn.functional.linear``.
This method computes a linear.
"""
input_tensor = args[0]
weight = args[1]
if version.parse(torch.__version__) > version.parse("1.11.0"):
if len(args) == 3:
bias = args[2]
else:
bias = None
else:
bias = kwargs.get('bias', None)
if isinstance(bias, StatefulTensorV2):
bias = bias.torch_tensor()
# Add communication logic before and after linear call.
if isinstance(weight, StatefulTensorV2):
return torch.nn.functional.linear(input_tensor, weight.torch_tensor(), bias)
else:
return torch.nn.functional.linear(input_tensor, weight, bias)
| 1.601563 | 2 |
nextcord/ext/space/client.py | japandotorg/nextcord-ext-space | 0 | 113156 | import logging
import asyncio
from msilib import sequence
from nextcord import Client
from nextcord.gateway import ResumeWebSocket
from .config import Configuration
logger = logging.getLogger(__name__)
class Astronaut(Client):
def __init__(self, *args, **kwargs):
self.config: Configuration = kwargs.pop('config')
logger.debug("initiated Astronaut with %a.", self.config)
self.comet = self.config.anal_cls(self.config.recorder, self.config.event_flags, self.config.op_flags)
super().__init__(*args, **kwargs)
self.http = self.config.httpclient_cls.from_http_client(self.http, self.config.session_cls, self.comet)
def takeoff(self, event, *args, **kwargs) -> None:
self._schedule_event(self.comet.log, 'space_logging', event, *args, **kwargs)
super().takeoff(event, *args, **kwargs)
async def start(self, *args, **kwargs) -> None:
await self.config.recorder.start()
next_id = await self.config.recorder.last_events_id() + 1
logger.debug("The next ID for the 'events' table is %a", next_id)
await super().start(*args, **kwargs)
async def close(self) -> None:
await super().close()
if self.config.recorder.started:
curr_id = await self.config.recorder.last_events_id()
logger.debug("The final ID for the 'events' table is %a", curr_id)
await self.config.recorder.end()
async def _connect(self) -> None:
cls = self.config.gw_cls
mason = cls.from_client(self, shard_id=self.shard_id)
self.ws: cls = await asyncio.wait_for(mason, timeout=180.0)
while True:
try:
await self.ws.poll_event()
except ResumeWebSocket:
logger.info('Got a request to RESUME the websocket.')
self.takeoff('disconnect')
mason = cls.from_client(self, shard_id=self.shard_id, session=self.ws.session_id,
sequence=self.ws.sequence, resume=True)
self.ws = await asyncio.wait_for(mason, timeout=180.0) | 1.46875 | 1 |
redbrick/coco/coco_main.py | dereklukacs/redbrick-sdk | 1 | 113284 | """Main file for converting RedBrick format to coco format."""
import asyncio
from typing import Dict, List, Optional, Tuple
import aiohttp
from yarl import URL
import tenacity
from redbrick.utils.async_utils import gather_with_concurrency
from redbrick.utils import aioimgspy
from redbrick.utils.logging import print_warning
from redbrick.common.constants import MAX_CONCURRENCY, MAX_RETRY_ATTEMPTS
from .polygon import rb2coco_polygon
from .bbox import rb2coco_bbox
from .categories import rb_get_class_id, rb2coco_categories_format
async def _get_image_dimension_map(
datapoints: List[Dict],
) -> Dict[str, Tuple[int, int]]:
"""Get a map from taskId to (width, height) of the images."""
@tenacity.retry(
stop=tenacity.stop_after_attempt(MAX_RETRY_ATTEMPTS),
wait=tenacity.wait_exponential(multiplier=1, min=1, max=10),
retry=tenacity.retry_if_not_exception_type((KeyboardInterrupt,)),
)
async def _get_size(
session: aiohttp.ClientSession, datapoint: Dict
) -> Tuple[str, Tuple[int, int]]:
if not datapoint["itemsPresigned"] or not datapoint["itemsPresigned"][0]:
return datapoint["taskId"], (0, 0)
async with session.get(
# encode with yarl so that aiohttp doesn't encode again.
URL(datapoint["itemsPresigned"][0], encoded=True)
) as response:
temp = await aioimgspy.probe(response.content) # type: ignore
return datapoint["taskId"], (temp["width"], temp["height"])
# limit to 30, default is 100, cleanup is done by session
conn = aiohttp.TCPConnector(limit=MAX_CONCURRENCY)
async with aiohttp.ClientSession(connector=conn) as session:
coros = [_get_size(session, dpoint) for dpoint in datapoints]
all_sizes = await gather_with_concurrency(10, coros, "Getting image dimensions")
await asyncio.sleep(0.250) # give time to close ssl connections
return {temp[0]: temp[1] for temp in all_sizes}
# pylint: disable=too-many-locals
def coco_converter(
datapoints: List[Dict],
taxonomy: Dict,
image_dims_map: Optional[Dict[str, Tuple[int, int]]] = None,
) -> Dict:
"""Convert redbrick labels to standard coco format."""
coco_categories = rb2coco_categories_format(taxonomy)
if image_dims_map is None:
image_dims_map = asyncio.run(_get_image_dimension_map(datapoints))
images: List[Dict] = []
annotations: List[Dict] = []
for data in datapoints:
file_name = data["name"]
task_id = data["taskId"]
labels = data["labels"]
width, height = image_dims_map[task_id]
current_image_id = len(images)
image_entry = {
"id": current_image_id,
"task_id": task_id,
"file_name": file_name,
"raw_url": data["items"][0],
"height": height,
"width": width,
}
if "itemsPresigned" in data:
image_entry["signed_url"] = data["itemsPresigned"][0]
images.append(image_entry)
skipped_labels = 0
for label in labels:
annotation_index = len(annotations)
try:
if label.get("bbox2d"):
class_id = rb_get_class_id(label["category"][0], taxonomy)
coco_label = rb2coco_bbox(
label,
annotation_index,
current_image_id,
class_id,
width,
height,
)
annotations.append(coco_label)
elif label.get("polygon"):
class_id = rb_get_class_id(label["category"][0], taxonomy)
coco_label = rb2coco_polygon(
label,
annotation_index,
current_image_id,
class_id,
width,
height,
)
annotations.append(coco_label)
else:
skipped_labels += 1
except Exception: # pylint: disable=broad-except
skipped_labels += 1
if skipped_labels:
print_warning(f"Skipped {skipped_labels} labels for {data['taskId']}")
return {
"images": images,
"annotations": annotations,
"categories": coco_categories,
"info": {},
"licenses": [],
}
| 1.601563 | 2 |
core/utils/network/sms.py | vsilent/smarty-bot | 1 | 113412 | <filename>core/utils/network/sms.py
from twilio.rest import TwilioRestClient
# Your Account Sid and Auth Token from twilio.com/user/account
account_sid = "AC32a3c49700934481addd5ce1659f04d2"
auth_token = ""
client = TwilioRestClient(account_sid, auth_token)
message = client.sms.messages.create(body="Jenny please?! I love you <3",
to="+14159352345", # Replace with your phone number
from_="+14158141829") # Replace with your Twilio number
print message.sid
| 1.523438 | 2 |
28 Max of three.py | jamesharrop/PracticePython | 1 | 113540 | '''
Max Of Three
Exercise 28
Implement a function that takes as input three variables, and returns the largest of the three.
Do this without using the Python max() function
'''
import random
def max_of_three(a, b, c):
if a>b:
if a>c:
return a
else:
return c
else:
if b>c:
return b
else:
return c
def max_of_arbitrary_sized_list(a: list):
max = a[0]
for number in a:
if number > max:
max = number
return max
def test_it():
a = random.randint(-100, 100)
b = random.randint(-100, 100)
c = random.randint(-100, 100)
if max([a, b, c]) != (max_of_three(a,b,c)):
print(a, b, c)
else:
print(".", end = "")
if max([a, b, c]) != (max_of_arbitrary_sized_list([a,b,c])):
print(a, b, c)
else:
print("_", end = "")
for _ in range(0, 100):
test_it() | 2.8125 | 3 |
backend/dqm/checks/check_pii.py | google/dqm | 9 | 113668 | <reponame>google/dqm<filename>backend/dqm/checks/check_pii.py
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from dqm.check_bricks import (
Check,
DataType,
Parameter,
Platform,
Result,
ResultField,
Theme,
)
from dqm.helpers import analytics
class CheckPii(Check):
"""Detect tracked URLs containing PII related informations, as e-mail, name or
password.
GIVEN
A site URL
WHEN
A query to GA Reporting API v4, filtering on (""ga:pagePath"" dimension
contains ""e-mail="" OR ""name="" OR ""password="" OR ""@"") returns 1 or
more results
THEN
The URL should be flagged as unsafe.
"""
title = 'PII in tracked URI'
description = """
Detect tracked URLs containing PII related informations, as e-mail, name or
password.
"""
platform = Platform.Ga
theme = Theme.Trustful
parameters = [
Parameter(name='viewId', data_type=DataType.STRING, delegate=True),
Parameter(name='startDate', data_type=DataType.DATE, delegate=True),
Parameter(name='endDate', data_type=DataType.DATE, delegate=True),
Parameter(name='blackList', title='PII to avoid', data_type=DataType.LIST,
default=['e-mail', 'name', 'password']),
]
result_fields = [
ResultField(name='url', title='URL', data_type=DataType.STRING),
ResultField(name='param', title='Parameter name', data_type=DataType.STRING)
]
def run(self, params):
params = self.validate_values(params)
black_list = params['blackList']
urls = analytics.get_url_parameters(
view_id=params['viewId'],
start_date=params['startDate'],
end_date=params['endDate'])
problems = []
for url in urls:
for p in url['params']:
if p in black_list:
problems.append({
'url': url['url'],
'param': p,
})
return Result(success=not problems, payload=problems)
| 1.4375 | 1 |
agora_analytica/analytics/linear.py | Kyrmy/prototyyppi | 0 | 113796 | import logging
from typing import Tuple
import pandas as pd
import numpy as np
from cachetools import cached
from cachetools.keys import hashkey
from . import _get_common_columns
logger = logging.getLogger(__name__)
ACCEPTED_TYPES = ["linear"]
def distance(source: pd.Series, target: pd.Series, answers: pd.DataFrame,
answer_scale=5, bias_min=0.2, bias_max=2.0) -> float:
""" Calculate distance between targets.
Uses less common answers to skew bias.
:param scale: (optional) Scale on which questions are asked, starting from 1. Defaults to 5.
:param bias_min: (optional) float Minimum allowed bias.
:param bias_max: (optional) float Maximum allowed bias
"""
# Collect columns that source and target have both answered.
columns = _get_common_columns(source, target, answers)
# Stores distances, and is used to calculate mean value.
distances = np.zeros(len(columns))
# Go through answers, and calculate answer distances from source to target
for i, col in enumerate(columns):
# Collect answers into unique set.
answers_set = tuple(set([
np.int(source[col]),
np.int(target[col])
]))
# Calculate similar and different answers
similar_count, different_count = _similar_counts(col, answers, answers_set)
similar_ratio = similar_count / len(answers_set)
different_ratio = different_count / (answer_scale - len(answers_set))
# Calculate bias
bias = np.float(min(bias_max, max(bias_min, different_ratio / similar_ratio)))
# Calculate distance between answers with bias.
distance = np.abs(np.int(source[col]) - np.int(target[col])) * bias
distances[i] = distance
distance_mean = distances.mean() or 0
return distance_mean if not np.isnan(distance_mean) else np.float(0)
@cached(cache={}, key=lambda column, answers, answer_set: hashkey(column, answer_set))
def _similar_counts(column: str, answers: pd.DataFrame, answers_set: Tuple[int]) -> Tuple[np.int, np.int]:
"""
Similar and different answers.
:return: Tuple of different and similar answers
"""
# Create boolean list of people who answered similarly to current `answers_set`
similar_filter = answers[column].isin(answers_set)
# Calculate similar and different answers
similar_count = answers[column].dropna()[similar_filter].count()
different_count = answers[column].dropna()[~similar_filter].count()
logger.debug("'%s': Similar/Different: %i / %i", column, similar_count, different_count)
return (similar_count, different_count)
| 2.953125 | 3 |
prawpapers/configurator.py | nikolajlauridsen/PrawWallpaperDownloader | 16 | 113924 | <reponame>nikolajlauridsen/PrawWallpaperDownloader
import configparser
import os
import qprompt
class Configurator:
def __init__(self):
self.config = configparser.ConfigParser()
if not os.path.isfile('config.ini'):
self.create_default_config()
else:
self.config.read('config.ini')
self.type_map = {int: ["minwidth", "minheight", "limit", "threads",
"maxage"],
float: ["ratiolock"],
str: ["sub", "section"],
bool: ["clean", "sort", "albums"]
}
def create_default_config(self):
"""Create a default config file in the current working directory"""
self.config['DEFAULT'] = {'MinWidth' : '1280',
'MinHeight' : '720',
'Sub' : 'wallpapers',
'Limit' : '25',
'Clean' : 'yes',
'Sort' : 'yes',
'MaxAge' : '0',
'Albums' : 'yes',
'Threads' : '10',
'Section' : 'hot',
'RatioLock' : '0.95'}
self.config['user'] = {}
self.save_config()
def get_config(self):
"""Return user instance of the config, items in default and not in user
will be carried into the user config object"""
return self.config['user']
@staticmethod
def clear_screen():
"""Clears the commandline window"""
if os.name == 'nt':
os.system('cls')
else:
os.system('clear')
def create_menu(self):
"""
Creates a qprompt menu and adds all items from config
:return: qprompt menu
"""
menu = qprompt.Menu()
for i, item in enumerate(self.config['user']):
menu.add(str(i+1), item)
menu.add("0", "Exit")
menu.add("-1", "List settings")
menu.add("-2", "Reset config")
return menu
def update_value(self, value):
"""
Prompt the user to input a new value, then save the updated config
and return to menu.
Determines how to prompt the user by finding the value name passed as
argument in one of the lists in the type map.
:param value: value name to be updated e.g. minwidth
"""
desc_str = "Enter new {} (currently {})".format(value, self.config['user'][value])
if value in self.type_map[int]:
self.config['user'][value] = str(qprompt.ask_int(desc_str))
elif value in self.type_map[float]:
self.config['user'][value] = str(qprompt.ask_float(desc_str))
elif value in self.type_map[str]:
self.config['user'][value] = qprompt.ask_str(desc_str)
elif value in self.type_map[bool]:
desc_str += " y/n"
if qprompt.ask_yesno(desc_str):
self.config['user'][value] = "yes"
else:
self.config['user'][value] = "no"
self.save_config()
self.clear_screen()
print('Config saved...')
def list_settings(self):
# Find the length of the longest key name
pad = 0
for key in self.config['user'].keys():
if len(key) > pad:
pad = len(key)
pad += 1 # Add one to the padding, a little air is pretty
# Clear screen and print all the settings as a pretty list.
self.clear_screen()
print('Current settings: ')
for key in self.config['user']:
print('{:{align}}: {}'.format(key, self.config['user'][key], align=pad))
# Pause for the user to be able to read the settings.
input('\nPress enter to return to menu.')
def menu(self):
"""Run the configurator menu allowing user to edit config"""
menu = self.create_menu()
selection = menu.show(returns="desc")
if selection == "Reset config":
answer = qprompt.ask_yesno("Are you sure you want to reset your config? y/n")
if answer:
self.create_default_config()
print("Config reset.")
else:
print('Reset canceled')
elif selection == "List settings":
self.list_settings()
self.clear_screen()
self.menu()
elif selection == "Exit":
pass
else:
self.update_value(selection)
self.menu()
def save_config(self):
with open('config.ini', 'w') as configfile:
self.config.write(configfile)
| 1.9375 | 2 |
main.py | Ren-Zhi-Chao/python-selenium | 0 | 114052 | <filename>main.py
#! /usr/bin/env python
# -*- coding: utf-8 -*-
import os
from util.ConfigParser import ConfigParser
import xlrd
from domain.Core import MyWebDrive, BROWSER_OPETION, OTHER_OPETION, ELEMENT_OPETION
from util.TimeUtil import getTime, DATE_FORMATE
config = ConfigParser()
output = config.fileOutput()
# from selenium import webdriver
# from util.TimeUtil import *
# from util.ConfigParser import ConfigParser
# DRIVE = {
# 'chrome': 'Chrome'
# }
# class MyWebdriver:
# browser = None
# def __init__(self, drive_type = DRIVE['chrome'], load_timeout = 300):
# if 'Chrome' == drive_type:
# self.browser = webdriver.Chrome()
# self.setOutTime(load_timeout)
# #self.page_source
# def setOutTime(self, timeout):
# self.browser.set_page_load_timeout(timeout)
# def OpenBrowser(self, url):
# attempt = 1
# print('%s 第 [%d] 尝试连接【%s】站点'%(getNow(), attempt, url))
# self.browser.get(url)
# print(self.browser)
# myWD = MyWebdriver(drive_type = 'Chrome')
# myWD.OpenBrowser('http://www.baidu.com')
EXCEL = {
"browser": {
"valid": ['open', 'resize', 'back', 'forward', 'refresh', 'close', 'quit'],
"func": BROWSER_OPETION
},
"other": {
"valid": ['wait'],
"func": OTHER_OPETION
},
"element": {
"valid": ['clear', 'set', 'clear&set'],
"func": ELEMENT_OPETION
}
}
def fun_isdigit(val):
try:
return int(float(val))
except:
return ''
# 行处理
def sheet_handle(driver, row_value, output = None):
print(row_value)
xh = int(row_value[0])
type = row_value[3].lower()
seletor = row_value[4].lower()
optn = row_value[5].lower()
express = row_value[6].lower()
value = str(row_value[7]).lower()
wait_time = str(row_value[8]).strip()
wait_time = fun_isdigit(wait_time)
save_image = row_value[9]
skip = row_value[10]
if skip:
return
if not type in EXCEL:
print('...元素类型选择错误.....')
return
if not (optn in EXCEL[type]['valid']):
print('...选择的类型不正确.....')
return
funKey = None
if 'browser' == type or 'other' == type:
funKey = optn
else:
funKey = seletor
EXCEL[type]['func'][funKey](driver,
value = value, optn = optn, wait_time = wait_time,
express = express, seletor = seletor)
if save_image:
file_output = output + os.sep + str(int(xh)) + '.png'
print(' => 页面截图: %s'%file_output)
driver.get_screenshot_as_file(file_output)
print('方法结束..')
if __name__ == "__main__":
config = ConfigParser()
file_path = config.filePath() + os.sep + config.fileName()
if not os.path.exists(file_path):
print(' ERROR: 文件不存在! %s'%file_path)
else:
workbook = xlrd.open_workbook(file_path)
sheet_datas = workbook.sheets()
driver = MyWebDrive().GetDrive(config.driveType())
for sheet in sheet_datas:
if sheet.name.startswith('script'):
if output:
output = output + getTime(DATE_FORMATE['DIR']) + os.sep + sheet.name + os.sep
if not os.path.exists(output):
os.makedirs(output)
# print(sheet.nrows, sheet.ncols)
for i in range(sheet.nrows):
if i > 0:
print('当前操作第[%d]行'%i)
sheet_handle(driver, sheet.row_values(i), output = output) | 1.703125 | 2 |
util/devices_performance_util.py | Kk0t/Appium-App-utomation | 0 | 114180 | # -*- coding: utf-8 -*-
# @Time : 2021/5/31 14:54
# @Author : WuBingTai
import subprocess
import os
from math import ceil
pkg_name = "com.myzaker.ZAKER_Phone"
cpu = []
men = []
flow = [[], []]
def top_cpu(pkg_name):
cmd = "adb shell dumpsys cpuinfo | grep " + pkg_name
temp = []
# cmd = "adb shell top -n %s -s cpu | grep %s$" %(str(times), pkg_name)
top_info = subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE).stdout.readlines()
# print(top_info)
for info in top_info:
temp.append(info.split()[2].decode()) # bytes转换为string
# print("cpu占用:%s" %cpu)
for i in temp:
if i != "0%":
cpu.append(i.split("%")[0])
return cpu
def get_men(pkg_name):
cmd = "adb shell dumpsys meminfo %s" % (pkg_name)
temp = []
m = []
men_s = subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE).stdout.readlines()
for info in men_s:
temp.append(info.split())
# print("内存占用:%s" %men[19][1].decode()+"K")
m.append(temp)
for t in m:
men.append(t[19][1].decode())
return men
# 取到流量后可以用步骤后的流量减去步骤前的流量得到步骤消耗流量!也可以用时间差来计算!
def getFlow(pid="31586"):
flow_info = os.popen("adb shell cat /proc/" + pid + "/net/dev").readlines()
t = []
for info in flow_info:
temp_list = info.split()
t.append(temp_list)
flow[0].append(ceil(int(t[6][1]) / 1024)) # 下载
flow[1].append(ceil(int(t[6][9]) / 1024)) # 发送
return flow
if __name__ == '__main__':
print(top_cpu(pkg_name))
| 1.789063 | 2 |
trainNN/iterutils.py | seqcode/iTF | 0 | 114308 | """ Helper module with methods for one-hot sequence encoding and generators to
to enable whole genome iteration """
import h5py
import numpy as np
import tensorflow as tf
from collections import defaultdict
class Sequence:
dic = {
"A": 0,
"T": 1,
"G": 2,
"C": 3
}
""" Methods for manipulation of DNA Sequence """
def __init__(self):
pass
@staticmethod
def map(buf, seqlength):
numSeq = len(buf)
seqLen = len(buf[0])
# initialize the matrix to seqlen x 4
seqMatrixs = np.zeros((numSeq,seqLen,4), dtype=int)
# change the value to matrix
for i in range(0,numSeq):
dnaSeq = buf[i].upper()
seqMatrix = seqMatrixs[i]
for j in range(0,seqLen):
try:
seqMatrix[j, Sequence.dic[dnaSeq[j]]] = 1
except KeyError:
continue
return seqMatrixs
@staticmethod
def add_to_buffer(buf, line):
buf.append(line.strip())
class Chromatin:
""" Methods for manipulating discrete chromatin tag counts/ domain calls"""
def __init__(self):
pass
@staticmethod
def map(buf, seqlen):
return np.array(buf)
@staticmethod
def add_to_buffer(buf, line):
chrom = line.strip().split()
val = [float(x) for x in chrom]
buf.append(val)
def assign_handler(dtype):
""" Choosing class based on input file type"""
if dtype == "seq":
# use Sequence methods
handler = Sequence
else:
# use Chromatin methods
handler = Chromatin
return handler
def train_generator(h5file, filename, batchsize, seqlen, dtype, iterflag):
""" A generator to return a batch of training data, while iterating over the file in a loop. """
handler = assign_handler(dtype)
with open(filename, "r") as fp:
line_index = 0
buf = [] # buf is my feature buffer
while True:
for line in fp:
if line_index < batchsize:
handler.add_to_buffer(buf, line)
line_index += 1
else:
yield handler.map(buf, seqlen)
buf = [] # clean buffer
handler.add_to_buffer(buf, line)
line_index = 1 # reset line index
if iterflag == "repeat":
# reset file pointer
fp.seek(0)
else:
yield handler.map(buf, seqlen)
break
def train_generator_h5(h5file, dspath, batchsize, seqlen, dtype, iterflag):
""" A generator to return a batch of training data, while iterating over the file in a loop. """
with h5py.File(h5file, 'r', libver='latest', swmr=True) as h5:
ds = h5[dspath][:]
num_samples = ds.shape[0]
dim = len(ds.shape)
start_index = 0
end_index = 0
while True:
start_index = end_index
end_index += batchsize
if end_index >= num_samples:
if iterflag == "repeat":
# reset
c1 = ds[start_index:num_samples]
end_index = batchsize - c1.shape[0]
c2 = ds[0: end_index]
chunk = np.vstack([c1, c2]) if dim>1 \
else np.concatenate([c1, c2])
yield chunk
else:
yield ds[start_index:num_samples]
break
else:
yield ds[start_index:end_index]
def train_TFRecord_dataset(dspath, batchsize, dataflag):
#raw_dataset = tf.data.TFRecordDataset(dspath["TFRecord"])
# prepare feature description
feature_description = defaultdict()
feature_description["seq"] = tf.io.FixedLenFeature([], tf.string)
feature_description["label"] = tf.io.FixedLenFeature([], tf.int64)
for ct in dspath["chromatin_tracks"]:
feature_description[ct] = tf.io.FixedLenFeature([], tf.string)
def _parse_function(example_proto, flag="seqonly"):
# Parse the input `tf.train.Example` proto using the feature dictionary
example_message = tf.io.parse_single_example(example_proto, feature_description)
seq = example_message["seq"]
seq = tf.io.parse_tensor(seq, out_type=tf.int64)
combined_chromatin_data = []
for ct in dspath["chromatin_tracks"]:
ct_message = example_message[ct]
ct_message = tf.io.parse_tensor(ct_message, out_type=tf.float64)
combined_chromatin_data.append(ct_message)
combined_chromatin_data = tf.concat(combined_chromatin_data, axis=0)
label = example_message["label"]
if flag=="seqonly":
return (seq, label)
else:
return {"seq":seq, "chrom_input":combined_chromatin_data}, label
def _parse_function_wrapper(example_proto):
return _parse_function(example_proto, dataflag)
files = tf.data.Dataset.from_tensors(dspath["TFRecord"])
parsed_dataset = (files.interleave(tf.data.TFRecordDataset, num_parallel_calls=tf.data.AUTOTUNE)
.shuffle(100)
.map(_parse_function_wrapper, num_parallel_calls=tf.data.AUTOTUNE)
.batch(batchsize, drop_remainder=True)
.prefetch(tf.data.AUTOTUNE))
return parsed_dataset
| 2.671875 | 3 |
web/prowlbackend/prowlbackend/settings/__init__.py | stensjoberg/pton-prowl | 0 | 114436 | <filename>web/prowlbackend/prowlbackend/settings/__init__.py
# Change dev to prod for production setting
from .dev import *
# WARNING: Never run in production with development settings
| 0.402344 | 0 |
SWIM-Executables/Windows/pyinstaller-2.0 for windows/support/rthooks/pyi_rth_babel.py | alexsigaras/SWIM | 47 | 114564 | import os
import sys
d = "localedata"
d = os.path.join(sys._MEIPASS, d)
import babel.localedata
babel.localedata._dirname = d
| 0.53125 | 1 |
heat_dashboard/test/tests/content/test_resource_types.py | efenfauzi/heat-dashboard | 13 | 114692 | # Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from django.urls import reverse
from heat_dashboard import api
from heat_dashboard.test import helpers as test
from heat_dashboard.test.helpers import IsHttpRequest
class ResourceTypesTests(test.TestCase):
@test.create_mocks({api.heat: ('resource_types_list',)})
def test_index(self):
self.mock_resource_types_list.return_value = \
self.resource_types.list()
res = self.client.get(
reverse('horizon:project:resource_types:index'))
self.assertTemplateUsed(
res, 'horizon/common/_data_table_view.html')
self.assertContains(res, 'AWS::CloudFormation::Stack')
self.mock_resource_types_list.assert_called_once_with(
IsHttpRequest(), filters={})
@test.create_mocks({api.heat: ('resource_type_get',)})
def test_detail_view(self):
rt = self.api_resource_types.first()
self.mock_resource_type_get.return_value = rt
url = reverse('horizon:project:resource_types:details',
args=[rt['resource_type']])
res = self.client.get(url)
self.assertTemplateUsed(res, 'horizon/common/_detail.html')
self.assertNoMessages()
self.mock_resource_type_get.assert_called_once_with(
IsHttpRequest(), rt['resource_type'])
| 1.476563 | 1 |
visualizations/stratifiedSurvival.py | zm00094/vs_eventListTools | 0 | 114820 | <gh_stars>0
# -*- coding: utf-8 -*-
"""
Created on Fri Mar 9 08:07:47 2018
@author: jel2
"""
def stratifiedSurvival(t,eventTime,eventIndicator=None,followupTime=None,group=None):
import matplotlib.pyplot as plt
import lifelines as lf
from lifelines.plotting import add_at_risk_counts
import pandas as pd
import copy
tm=t[eventTime].copy()
if(group is None):
grp=pd.Series('Population',index=t.index)
else:
grp=t[group]
if(eventIndicator is None):
ev=~t[eventTime].isnull()
tm[tm.isnull()]=t.loc[tm.isnull(),followupTime]
######### Kaplan Meier curves stratified by sex
kl=list()
kmf = lf.KaplanMeierFitter()
fig,ax=plt.subplots()
for g in set(grp):
kmf.fit(tm[grp==g],ev[grp==g],label=g)
kmf.plot(ax=ax)
kl.append(copy.deepcopy(kmf))
add_at_risk_counts(*kl, ax=ax)
plt.legend(loc='lower left')
plt.ylim([0,1])
plt.xlabel('Time (years)')
plt.ylabel('Survival')
plt.title('Kaplan-Meier survival curve')
# add_at_risk_counts(kmf1,kmf2, ax=ax)
#ax.spines['bottom'].set_position(('axes', -0.15 * 6.0 / fig.get_figheight()))
| 2.0625 | 2 |
COMTRIS/modules/bs4_crawler/error_edit.py | songyw0517/COMTRIS | 0 | 114948 | import os
from pymongo import *
myclient = MongoClient(os.environ["COMTRIS_MONGODB_URI"])
db = myclient['COMTRIS']
col = db['pc_quote']
col.update({}, {'$inc' : {'id' : 2000000}}, multi=True) # id 200만 증가
# col.update({}, {'$inc' : {'id' , '2000000'}})
# 이 에러는 key : value를 맞추지 않은 경우에 발생한다.
# bson.errors.InvalidDocument: cannot encode object: {2000000, 'id'}, of type: <class 'set'>
# updateMany는 하나의 document의 여러 키 값을 바꿀 때 사용하는 것이다.
# update는 하나의 document의 하나의 키 값을 변경할 때 사용한다.
# 여러개의 document를 선택하려면, update의 multi를 True로 바꾸면 된다.
| 1.179688 | 1 |
stdlib2-src/dist-packages/gtweak/tweaks/tweak_group_startup.py | ch1huizong/Scode | 0 | 115076 | <reponame>ch1huizong/Scode
# This file is part of gnome-tweak-tool.
#
# Copyright (c) 2011 <NAME>
#
# gnome-tweak-tool is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# gnome-tweak-tool is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with gnome-tweak-tool. If not, see <http://www.gnu.org/licenses/>.
from __future__ import print_function
import os.path
import subprocess
import logging
from gi.repository import Gtk, GLib, Gio
from gtweak.tweakmodel import Tweak
from gtweak.widgets import ListBoxTweakGroup, UI_BOX_SPACING
from gtweak.utils import AutostartManager, AutostartFile
def _list_header_func(row, before, user_data):
if before and not row.get_header():
row.set_header (Gtk.Separator(orientation=Gtk.Orientation.HORIZONTAL))
class _AppChooser(Gtk.Dialog):
def __init__(self, main_window, running_exes):
Gtk.Dialog.__init__(self, title=_("Applications"))
self._running = {}
self._all = {}
lb = Gtk.ListBox()
lb.props.margin = 5
lb.set_sort_func(self._sort_apps, None)
lb.set_header_func(_list_header_func, None)
apps = Gio.app_info_get_all()
for a in apps:
if a.should_show():
running = a.get_executable() in running_exes
w = self._build_widget(
a,
_("running") if running else "")
if w:
self._all[w] = a
self._running[w] = running
lb.add(w)
sw = Gtk.ScrolledWindow()
sw.props.hscrollbar_policy = Gtk.PolicyType.NEVER
sw.add(lb)
self.add_button(_("_Close"), Gtk.ResponseType.CLOSE)
self.add_button(_("Add Application"), Gtk.ResponseType.OK)
self.get_content_area().pack_start(sw, True, True, 0)
self.set_modal(True)
self.set_transient_for(main_window)
self.set_size_request(400,300)
self.listbox = lb
def _sort_apps(self, a, b, user_data):
arun = self._running.get(a)
brun = self._running.get(b)
if arun and not brun:
return -1
elif not arun and brun:
return 1
else:
aname = self._all.get(a).get_name()
bname = self._all.get(b).get_name()
if aname < bname:
return -1
elif aname > bname:
return 1
else:
return 0
def _build_widget(self, a, extra):
row = Gtk.ListBoxRow()
g = Gtk.Grid()
if not a.get_name():
return None
icn = a.get_icon()
if icn:
img = Gtk.Image.new_from_gicon(a.get_icon(),Gtk.IconSize.DIALOG)
g.attach(img, 0, 0, 1, 1)
img.props.hexpand = False
else:
img = None #attach_next_to treats this correctly
lbl = Gtk.Label(label=a.get_name(), xalign=0)
g.attach_next_to(lbl,img,Gtk.PositionType.RIGHT,1,1)
lbl.props.hexpand = True
lbl.props.halign = Gtk.Align.START
lbl.props.vexpand = False
lbl.props.valign = Gtk.Align.CENTER
if extra:
g.attach_next_to(
Gtk.Label(label=extra),
lbl,Gtk.PositionType.RIGHT,1,1)
row.add(g)
#row.get_style_context().add_class('tweak-white')
return row
def get_selected_app(self):
row = self.listbox.get_selected_row()
if row:
return self._all.get(row)
return None
class _StartupTweak(Gtk.ListBoxRow, Tweak):
def __init__(self, df, **options):
Gtk.ListBoxRow.__init__(self)
Tweak.__init__(self,
df.get_name(),
df.get_description(),
**options)
grid = Gtk.Grid(column_spacing=10)
icn = df.get_icon()
if icn:
img = Gtk.Image.new_from_gicon(icn,Gtk.IconSize.DIALOG)
grid.attach(img, 0, 0, 1, 1)
else:
img = None #attach_next_to treats this correctly
lbl = Gtk.Label(label=df.get_name(), xalign=0.0)
grid.attach_next_to(lbl,img,Gtk.PositionType.RIGHT,1,1)
lbl.props.hexpand = True
lbl.props.halign = Gtk.Align.START
btn = Gtk.Button(label=_("Remove"))
grid.attach_next_to(btn,lbl,Gtk.PositionType.RIGHT,1,1)
btn.props.vexpand = False
btn.props.valign = Gtk.Align.CENTER
self.add(grid)
self.props.margin_start = 1
self.props.margin_end = 1
self.get_style_context().add_class('tweak-startup')
self.btn = btn
class AddStartupTweak(Gtk.ListBoxRow, Tweak):
def __init__(self, **options):
Gtk.ListBoxRow.__init__(self)
Tweak.__init__(self, _("New startup application"),
_("Add a new application to be run at startup"),
**options)
img = Gtk.Image()
img.set_from_icon_name("list-add-symbolic", Gtk.IconSize.BUTTON)
self.btn = Gtk.Button(label="", image=img, always_show_image=True)
self.btn.get_style_context().remove_class("button")
self.add(self.btn)
self.get_style_context().add_class('tweak-startup')
class AutostartListBoxTweakGroup(ListBoxTweakGroup):
def __init__(self):
tweaks = []
self.asm = AutostartManager()
files = self.asm.get_user_autostart_files()
for f in files:
try:
df = Gio.DesktopAppInfo.new_from_filename(f)
except TypeError:
logging.warning("Error loading desktopfile: %s" % f)
continue
sdf = _StartupTweak(df)
sdf.btn.connect("clicked", self._on_remove_clicked, sdf, df)
tweaks.append( sdf )
add = AddStartupTweak()
add.btn.connect("clicked", self._on_add_clicked)
tweaks.append(add)
ListBoxTweakGroup.__init__(self,
_("Startup Applications"),
*tweaks,
css_class='tweak-group-startup')
self.set_header_func(_list_header_func, None)
def _on_remove_clicked(self, btn, widget, df):
self.remove(widget)
AutostartFile(df).update_start_at_login(False)
def _on_add_clicked(self, btn):
a = _AppChooser(
self.main_window,
set(self._get_running_executables()))
a.show_all()
resp = a.run()
if resp == Gtk.ResponseType.OK:
df = a.get_selected_app()
if df:
AutostartFile(df).update_start_at_login(True)
sdf = _StartupTweak(df)
sdf.btn.connect("clicked", self._on_remove_clicked, sdf, df)
self.add_tweak_row(sdf, 0).show_all()
a.destroy()
def _get_running_executables(self):
exes = []
cmd = subprocess.Popen([
'ps','-e','-w','-w','-U',
str(os.getuid()),'-o','cmd'],
stdout=subprocess.PIPE)
out = cmd.communicate()[0]
for l in out.split('\n'):
exe = l.split(' ')[0]
if exe and exe[0] != '[': #kernel process
exes.append( os.path.basename(exe) )
return exes
TWEAK_GROUPS = [
AutostartListBoxTweakGroup(),
]
| 1.492188 | 1 |