code
stringlengths 3
1.05M
| repo_name
stringlengths 5
104
| path
stringlengths 4
251
| language
stringclasses 1
value | license
stringclasses 15
values | size
int64 3
1.05M
|
---|---|---|---|---|---|
#!/usr/bin/env python
# vim:fileencoding=utf-8
from __future__ import (unicode_literals, division, absolute_import,
print_function)
__license__ = 'GPL v3'
__copyright__ = '2014, Kovid Goyal <kovid at kovidgoyal.net>'
from tinycss.css21 import CSS21Parser
from tinycss.parsing import remove_whitespace, split_on_comma, ParseError
class MediaQuery(object):
__slots__ = 'media_type', 'expressions', 'negated'
def __init__(self, media_type='all', expressions=(), negated=False):
self.media_type = media_type
self.expressions = expressions
self.negated = negated
def __repr__(self):
return '<MediaQuery type=%s negated=%s expressions=%s>' % (
self.media_type, self.negated, self.expressions)
def __eq__(self, other):
return self.media_type == getattr(other, 'media_type', None) and \
self.negated == getattr(other, 'negated', None) and \
self.expressions == getattr(other, 'expressions', None)
class MalformedExpression(Exception):
def __init__(self, tok, msg):
Exception.__init__(self, msg)
self.tok = tok
class CSSMedia3Parser(CSS21Parser):
''' Parse media queries as defined by the CSS 3 media module '''
def parse_media(self, tokens, errors):
if not tokens:
return [MediaQuery('all')]
queries = []
for part in split_on_comma(remove_whitespace(tokens)):
negated = False
media_type = None
expressions = []
try:
for i, tok in enumerate(part):
if i == 0 and tok.type == 'IDENT':
val = tok.value.lower()
if val == 'only':
continue # ignore leading ONLY
if val == 'not':
negated = True
continue
if media_type is None and tok.type == 'IDENT':
media_type = tok.value
continue
elif media_type is None:
media_type = 'all'
if tok.type == 'IDENT' and tok.value.lower() == 'and':
continue
if not tok.is_container:
raise MalformedExpression(tok, 'expected a media expression not a %s' % tok.type)
if tok.type != '(':
raise MalformedExpression(tok, 'media expressions must be in parentheses not %s' % tok.type)
content = remove_whitespace(tok.content)
if len(content) == 0:
raise MalformedExpression(tok, 'media expressions cannot be empty')
if content[0].type != 'IDENT':
raise MalformedExpression(content[0], 'expected a media feature not a %s' % tok.type)
media_feature, expr = content[0].value, None
if len(content) > 1:
if len(content) < 3:
raise MalformedExpression(content[1], 'malformed media feature definition')
if content[1].type != ':':
raise MalformedExpression(content[1], 'expected a :')
expr = content[2:]
if len(expr) == 1:
expr = expr[0]
elif len(expr) == 3 and (expr[0].type, expr[1].type, expr[1].value, expr[2].type) == (
'INTEGER', 'DELIM', '/', 'INTEGER'):
# This should really be moved into token_data, but
# since RATIO is not part of CSS 2.1 and does not
# occur anywhere else, we special case it here.
r = expr[0]
r.value = (expr[0].value, expr[2].value)
r.type = 'RATIO'
r._as_css = expr[0]._as_css + expr[1]._as_css + expr[2]._as_css
expr = r
else:
raise MalformedExpression(expr[0], 'malformed media feature definition')
expressions.append((media_feature, expr))
except MalformedExpression as err:
errors.extend(ParseError(err.tok, err.message))
media_type, negated, expressions = 'all', True, ()
queries.append(MediaQuery(media_type or 'all', expressions=tuple(expressions), negated=negated))
return queries
| nozuono/calibre-webserver | src/tinycss/media3.py | Python | gpl-3.0 | 4,645 |
import math
from ..df import DocumentFrequencyVectorCreator
from . import InverseDocumentFrequencyVector
class InverseDocumentFrequencyVectorCreator(DocumentFrequencyVectorCreator):
"""Creates inverse-document-frequency vectors
Inherits from :class:`recommender.vector.abstractvector.VectorCreator`
:parameter sqlite3_connection: connection to a database build with :class:`recommender.vector.vectortablecreator.VectorTableCreator`
:type sqlite3_connection: sqlite3.Connection
:raises: TypeError
"""
def __init__(self, db_connection_str):
super(InverseDocumentFrequencyVectorCreator, self).__init__(db_connection_str)
self._create_inverse_document_frequency_view()
pass
def _create_vector(self, document_id=None):
vector = InverseDocumentFrequencyVector()
with self._get_db_connection() as conn:
cursor = conn.cursor()
self._create_log_function(conn)
values = self._get_vector_values_from_db(cursor)
for value in [] if values is None else values:
vector.add_to_vector(value)
return vector
def _get_vector_values_from_db(self, c):
c.execute(
'''
SELECT
[term_id]
, [name]
, [value]
FROM
[InverseDocumentFrequency]
;
''')
vector_values = []
for result in c.fetchall():
vector_values.append((result[0], result[1], result[2]))
pass
return None if not vector_values else vector_values
def _create_log_function(self, conn):
conn.create_function('log10', 1, InverseDocumentFrequencyVectorCreator.log_10)
pass
@staticmethod
def log_10(x):
"""simply a method calculating log_10 used by the view in :func:`_create_inverse_document_frequency_view`
"""
base = 10
return math.log(x, base)
def _create_inverse_document_frequency_view(self):
"""Creates a view in the database required for building idf-vectors
"""
with self._get_db_connection() as conn:
self._create_log_function(conn)
c = conn.cursor()
c.execute(
'''
CREATE VIEW IF NOT EXISTS [InverseDocumentFrequency] AS
SELECT
[term_id]
, [name]
, log10
(
CAST ((SELECT [document_count] from [N]) AS REAL) / [df].[value]
)
AS [value]
FROM
[DocumentFrequency] AS [df]
ORDER BY
[term_id]
;
''')
pass
| dustywind/bachelor-thesis | impl/recommender/vector/idf/inversedocumentfrequencyvectorcreator.py | Python | mit | 2,882 |
#
# Copyright 2013 Quantopian, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
class ZiplineError(Exception):
msg = None
def __init__(self, *args, **kwargs):
self.args = args
self.kwargs = kwargs
self.message = str(self)
def __str__(self):
msg = self.msg.format(**self.kwargs)
return msg
__unicode__ = __str__
__repr__ = __str__
class WrongDataForTransform(ZiplineError):
"""
Raised whenever a rolling transform is called on an event that
does not have the necessary properties.
"""
msg = "{transform} requires {fields}. Event cannot be processed."
class UnsupportedSlippageModel(ZiplineError):
"""
Raised if a user script calls the override_slippage magic
with a slipage object that isn't a VolumeShareSlippage or
FixedSlipapge
"""
msg = """
You attempted to override slippage with an unsupported class. \
Please use VolumeShareSlippage or FixedSlippage.
""".strip()
class OverrideSlippagePostInit(ZiplineError):
# Raised if a users script calls override_slippage magic
# after the initialize method has returned.
msg = """
You attempted to override slippage outside of `initialize`. \
You may only call override_slippage in your initialize method.
""".strip()
class RegisterTradingControlPostInit(ZiplineError):
# Raised if a user's script register's a trading control after initialize
# has been run.
msg = """
You attempted to set a trading control outside of `initialize`. \
Trading controls may only be set in your initialize method.
""".strip()
class UnsupportedCommissionModel(ZiplineError):
"""
Raised if a user script calls the override_commission magic
with a commission object that isn't a PerShare, PerTrade or
PerDollar commission
"""
msg = """
You attempted to override commission with an unsupported class. \
Please use PerShare or PerTrade.
""".strip()
class OverrideCommissionPostInit(ZiplineError):
"""
Raised if a users script calls override_commission magic
after the initialize method has returned.
"""
msg = """
You attempted to override commission outside of `initialize`. \
You may only call override_commission in your initialize method.
""".strip()
class TransactionWithNoVolume(ZiplineError):
"""
Raised if a transact call returns a transaction with zero volume.
"""
msg = """
Transaction {txn} has a volume of zero.
""".strip()
class TransactionWithWrongDirection(ZiplineError):
"""
Raised if a transact call returns a transaction with a direction that
does not match the order.
"""
msg = """
Transaction {txn} not in same direction as corresponding order {order}.
""".strip()
class TransactionWithNoAmount(ZiplineError):
"""
Raised if a transact call returns a transaction with zero amount.
"""
msg = """
Transaction {txn} has an amount of zero.
""".strip()
class TransactionVolumeExceedsOrder(ZiplineError):
"""
Raised if a transact call returns a transaction with a volume greater than
the corresponding order.
"""
msg = """
Transaction volume of {txn} exceeds the order volume of {order}.
""".strip()
class UnsupportedOrderParameters(ZiplineError):
"""
Raised if a set of mutually exclusive parameters are passed to an order
call.
"""
msg = "{msg}"
class OrderDuringInitialize(ZiplineError):
"""
Raised if order is called during initialize()
"""
msg = "{msg}"
class TradingControlViolation(ZiplineError):
"""
Raised if an order would violate a constraint set by a TradingControl.
"""
msg = """
Order for {amount} shares of {sid} violates trading constraint {constraint}.
""".strip()
| licco/zipline | zipline/errors.py | Python | apache-2.0 | 4,242 |
# $Id: saslprofile.py,v 1.8 2004/09/28 01:19:20 jpwarren Exp $
# $Revision: 1.8 $
#
# BEEPy - A Python BEEP Library
# Copyright (c) 2002-2004 Justin Warren <[email protected]>
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
#
#
# SASLProfile is the base SASL profile class
# It should be inherited from to implement particular
# SASL mechanisms
__profileClass__ = "SASLProfile"
uri = "http://iana.org/beep/SASL"
import profile
import re
import base64
class SASLProfile(profile.Profile):
""" This is an abstract class to provide the core SASL Profile API
"""
def __init__(self, session):
"""__init__() is used to set up special SASL data such
as certificates, user dbases, etc.
"""
profile.Profile.__init__(self, session)
def decodeBlob(self, data):
"""decodeBlob() extracts the data from the <blob> section of
the payload data and decodes it from base64.
It's really XML, but I don't think using a full parser
is warranted here.
"""
blobPattern = r'<blob>(.*)</blob>'
blobRE = re.compile(blobPattern, re.IGNORECASE | re.DOTALL)
match = re.search(blobRE, data)
if match:
try:
decoded_data = base64.decodestring(match.group(1))
return decoded_data
except Exception, e:
raise SASLProfileException("bad SASL data: %s" % e)
else:
raise SASLProfileException("No blob to decode in datablock")
def encodeBlob(self, data):
"""encodeBlob() takes the data passed in and returns the appropriate
<blob></blob> structure with base64 encoded data.
"""
blob = "<blob>"
blob += base64.encodestring(data)
blob += "</blob>"
return blob
def parseStatus(self, data):
"""parseStatus() extracts the status code from the <blob> block
"""
blobStatusPattern = '<blob\sstatus=[\'"](.*)[\'"]\s*/>'
blobStatusRE = re.compile(blobStatusPattern, re.IGNORECASE)
match = re.search(blobStatusRE, data)
if match:
return match.group(1)
else:
return None
def parseError(self, data):
"""parseError() extracts the error code from the <error> block
"""
blobErrorPattern = '<error\scode=[\'"](.*)[\'"]\s*>(.*)</error>'
blobErrorRE = re.compile(blobErrorPattern, re.IGNORECASE)
match = re.search(blobErrorRE, data)
if match:
code = match.group(1)
errormsg = match.group(2)
return code,errormsg
else:
return None
class SASLProfileException(profile.ProfileException):
def __init__(self, args):
self.args = args
| Wurldtech/beepy | beepy/profiles/saslprofile.py | Python | lgpl-2.1 | 3,491 |
# -*- coding: utf-8 -*-
from datetime import datetime
import bcrypt
import hashlib
import os
import uuid
from flask import current_app as app
from flask.ext.sqlalchemy import SQLAlchemy
from itsdangerous import (BadSignature, SignatureExpired,
TimedJSONWebSignatureSerializer as Serializer)
from sqlalchemy.exc import OperationalError
from sqlalchemy.sql.expression import func
from slugify import slugify
from shiva.utils import MetadataManager
from shiva import dbtypes
db = SQLAlchemy()
__all__ = ('db', 'Artist', 'Album', 'Track', 'LyricsCache', 'User')
def random_row(model):
"""Retrieves a random row for the given model."""
try:
# PostgreSQL, SQLite
instance = model.query.order_by(func.random()).limit(1).first()
except OperationalError:
# MySQL
instance = model.query.order_by(func.rand()).limit(1).first()
return instance
# Table relationships
track_artist = db.Table('trackartist',
db.Column('track_pk', dbtypes.GUID, db.ForeignKey('tracks.pk')),
db.Column('artist_pk', dbtypes.GUID, db.ForeignKey('artists.pk')),
)
track_album = db.Table('trackalbum',
db.Column('track_pk', dbtypes.GUID, db.ForeignKey('tracks.pk')),
db.Column('album_pk', dbtypes.GUID, db.ForeignKey('albums.pk')),
)
class Artist(db.Model):
__tablename__ = 'artists'
pk = db.Column(dbtypes.GUID, default=uuid.uuid4, primary_key=True)
# TODO: Update the files' Metadata when changing this info.
name = db.Column(db.String(128), unique=True, nullable=False)
slug = db.Column(db.String(128))
image = db.Column(db.String(256))
events = db.Column(db.String(256))
date_added = db.Column(db.Date(), nullable=False)
def __init__(self, *args, **kwargs):
if 'date_added' not in kwargs:
kwargs['date_added'] = datetime.today()
super(Artist, self).__init__(*args, **kwargs)
@property
def albums(self):
# FIXME: Optimize. Check comments for Album.artists method.
albums = []
for track in self.tracks:
for album in track.albums:
if album not in albums:
albums.append(album)
return albums
@classmethod
def random(cls):
return random_row(cls)
def __setattr__(self, attr, value):
if attr == 'name':
super(Artist, self).__setattr__('slug', slugify(value))
super(Artist, self).__setattr__(attr, value)
def __repr__(self):
return '<Artist (%s)>' % self.name
class Album(db.Model):
__tablename__ = 'albums'
pk = db.Column(dbtypes.GUID, default=uuid.uuid4, primary_key=True)
name = db.Column(db.String(128), nullable=False)
slug = db.Column(db.String(128))
year = db.Column(db.Integer)
cover = db.Column(db.String(256))
date_added = db.Column(db.Date(), nullable=False)
def __init__(self, *args, **kwargs):
if 'date_added' not in kwargs:
kwargs['date_added'] = datetime.today()
super(Album, self).__init__(*args, **kwargs)
@property
def artists(self):
"""
Calculates the artists for this album by traversing the list of tracks.
This is a terrible way of doing this, but we assume that the worst case
will still be good enough to defer the optimization of this method for
the future.
"""
artists = []
# FIXME: Optimize
for track in self.tracks:
for artist in track.artists:
if artist not in artists:
artists.append(artist)
return artists
@classmethod
def random(cls):
return random_row(cls)
def __setattr__(self, attr, value):
if attr == 'name':
super(Album, self).__setattr__('slug', slugify(value))
super(Album, self).__setattr__(attr, value)
def __repr__(self):
return '<Album (%s)>' % self.name
class Track(db.Model):
__tablename__ = 'tracks'
pk = db.Column(dbtypes.GUID, default=uuid.uuid4, primary_key=True)
path = db.Column(db.Unicode(256), unique=True, nullable=False)
title = db.Column(db.String(128))
slug = db.Column(db.String(128))
bitrate = db.Column(db.Integer)
file_size = db.Column(db.Integer)
length = db.Column(db.Integer)
ordinal = db.Column(db.Integer)
date_added = db.Column(db.Date(), nullable=False)
hash = db.Column(db.String(32))
lyrics = db.relationship('LyricsCache', backref='tracks', uselist=False)
albums = db.relationship('Album', secondary=track_album, lazy='dynamic',
backref=db.backref('tracks', lazy='dynamic'))
artists = db.relationship('Artist', secondary=track_artist, lazy='dynamic',
backref=db.backref('tracks', lazy='dynamic'))
def __init__(self, path, *args, **kwargs):
if not isinstance(path, (basestring, file)):
raise ValueError('Invalid parameter for Track. Path or File '
'expected, got %s' % type(path))
_path = path
if isinstance(path, file):
_path = path.name
no_metadata = kwargs.get('no_metadata', False)
if 'no_metadata' in kwargs:
del(kwargs['no_metadata'])
hash_file = kwargs.get('hash_file', False)
if 'hash_file' in kwargs:
del(kwargs['hash_file'])
self._meta = None
self.set_path(_path, no_metadata=no_metadata)
if hash_file:
self.hash = self.calculate_hash()
if 'date_added' not in kwargs:
kwargs['date_added'] = datetime.today()
super(Track, self).__init__(*args, **kwargs)
@classmethod
def random(cls):
return random_row(cls)
def __setattr__(self, attr, value):
if attr == 'title':
slug = slugify(value) if value else None
super(Track, self).__setattr__('slug', slug)
super(Track, self).__setattr__(attr, value)
def get_path(self):
if self.path:
return self.path.encode('utf-8')
return None
def set_path(self, path, no_metadata=False):
if path != self.get_path():
self.path = path
if no_metadata:
return None
if os.path.exists(self.get_path()):
meta = self.get_metadata_reader()
self.file_size = meta.filesize
self.bitrate = meta.bitrate
self.length = meta.length
self.ordinal = meta.track_number
self.title = meta.title
def calculate_hash(self):
md5 = hashlib.md5()
block_size = 128 * md5.block_size
with open(self.get_path(), 'rb') as f:
for chunk in iter(lambda: f.read(block_size), b''):
md5.update(chunk)
return md5.hexdigest()
def get_metadata_reader(self):
"""Return a MetadataManager object."""
if not getattr(self, '_meta', None):
self._meta = MetadataManager(self.get_path())
return self._meta
def __repr__(self):
return "<Track ('%s')>" % self.title
class TrackPlaylistRelationship(db.Model):
__tablename__ = 'trackplaylist'
pk = db.Column(dbtypes.GUID, default=uuid.uuid4, primary_key=True)
track_pk = db.Column(dbtypes.GUID, db.ForeignKey('tracks.pk'),
nullable=False)
playlist_pk = db.Column(dbtypes.GUID, db.ForeignKey('playlists.pk'),
nullable=False)
previous_track_pk = db.Column(dbtypes.GUID,
db.ForeignKey('trackplaylist.pk'))
track = db.relationship('Track')
playlist = db.relationship('Playlist')
previous_track = db.relationship('TrackPlaylistRelationship',
uselist=False)
def __repr__(self):
return "<TrackPlaylistRelationship ('%s')>" % (self.pk)
class Playlist(db.Model):
__tablename__ = 'playlists'
pk = db.Column(dbtypes.GUID, default=uuid.uuid4, primary_key=True)
name = db.Column(db.String(128), nullable=False)
read_only = db.Column(db.Boolean, nullable=False, default=True)
user_pk = db.Column(dbtypes.GUID, db.ForeignKey('users.pk'),
nullable=False)
creation_date = db.Column(db.DateTime, nullable=False)
user = db.relationship('User')
tracks = db.relationship('Track', backref='playlists', lazy='dynamic',
secondary='trackplaylist',
primaryjoin=(pk == TrackPlaylistRelationship.playlist_pk))
def __init__(self, *args, **kwargs):
kwargs['creation_date'] = datetime.now()
super(Playlist, self).__init__(*args, **kwargs)
def remove_at(self, index=None):
"""
Removes an item from the playlist. The playlist is a linked list, so
this method takes care of removing the element and updating any links
to it.
"""
try:
index = int(index)
except:
raise ValueError
if index < 0:
raise ValueError
query = TrackPlaylistRelationship.query.filter_by(playlist=self)
count = query.count()
if index >= count:
raise IndexError
# Playlist-track relationship
r_track = self.get_track_at(index)
next_track = TrackPlaylistRelationship.query.filter(
TrackPlaylistRelationship.playlist == self,
TrackPlaylistRelationship.previous_track == r_track).first()
if next_track:
# Update linked list
next_track.previous_track = r_track.previous_track
db.session.add(next_track)
# import ipdb; ipdb.set_trace()
db.session.delete(r_track)
db.session.commit()
def insert(self, index, track):
"""
Inserts a track in the playlist. The playlist tracks are structured in
a linked list, to insert an item in the list this method find the item
in the right position and updates the links in both.
If the value None is given as index, the track will be appended at the
end of the list.
"""
if index is not None:
try:
index = int(index)
except:
raise ValueError
if index < 0:
raise ValueError
if track is None:
raise ValueError
rel = TrackPlaylistRelationship(playlist=self, track=track)
query = TrackPlaylistRelationship.query.filter_by(playlist=self)
count = query.count()
if index is None:
index = count
if count == 0 and index > 0:
raise ValueError
if count > 0:
if index > count:
raise ValueError
# r_track is not an actual track, but a relationship between the
# playlist and a track.
if index == count: # Append at the end
r_track = self.get_track_at(index - 1)
rel.previous_track = r_track
else:
r_track = self.get_track_at(index)
if not r_track:
raise ValueError
rel.previous_track = r_track.previous_track
r_track.previous_track = rel
db.session.add(r_track)
db.session.add(rel)
db.session.commit()
def get_track_at(self, index):
"""
This method finds the track at position `index` in the current
playlist. Will return None if the track is not present.
It fetches the playlist's parent (the track with `previous_track_pk`
None) and queries for each susequent item until the requested item is
found. This implementation is the slowest, but for now is ok because
is also the simplest.
This is a very good candidate for optimization.
"""
counter = 0
# Get the parent
track = TrackPlaylistRelationship.query.filter_by(
playlist=self, previous_track=None).first()
while True:
if counter == index:
return track
elif counter > index:
return None
track = TrackPlaylistRelationship.query.filter(
TrackPlaylistRelationship.playlist == self,
TrackPlaylistRelationship.previous_track == track).first()
counter += 1
@property
def length(self):
query = TrackPlaylistRelationship.query.filter_by(playlist=self)
return query.count()
def __repr__(self):
return "<Playlist ('%s')" % self.name
class LyricsCache(db.Model):
__tablename__ = 'lyricscache'
pk = db.Column(dbtypes.GUID, default=uuid.uuid4, primary_key=True)
text = db.Column(db.Text)
source = db.Column(db.String(256))
track_pk = db.Column(dbtypes.GUID, db.ForeignKey('tracks.pk'),
nullable=False)
def __repr__(self):
return "<LyricsCache ('%s')>" % self.track.title
class User(db.Model):
__tablename__ = 'users'
pk = db.Column(dbtypes.GUID, default=uuid.uuid4, primary_key=True)
display_name = db.Column(db.String(256))
email = db.Column(db.String(256), unique=True, nullable=False)
password = db.Column(db.String(256), nullable=True)
salt = db.Column(db.String(256), nullable=True)
# Metadata
# Should these attributes be in their own table?
is_public = db.Column(db.Boolean, nullable=False, default=False)
is_active = db.Column(db.Boolean, nullable=False, default=False)
is_admin = db.Column(db.Boolean, nullable=False, default=False)
creation_date = db.Column(db.DateTime, nullable=False)
def __init__(self, *args, **kwargs):
kwargs['creation_date'] = datetime.now()
super(User, self).__init__(*args, **kwargs)
def __setattr__(self, *args, **kwargs):
if args[0] == 'password':
password = args[1]
salt = None
if password not in (None, ''):
password, salt = self.hash_password(password)
self.salt = salt
args = ('password', password)
super(User, self).__setattr__(*args, **kwargs)
def hash_password(self, password, salt=None):
salt = salt or self.salt or bcrypt.gensalt()
_pass = bcrypt.hashpw(password.encode('utf-8'), salt.encode('utf-8'))
return (_pass, salt)
def verify_password(self, password):
_password, salt = self.hash_password(password)
return _password == self.password
def generate_auth_token(self, expiration=None):
if not expiration:
expiration = app.config.get('AUTH_EXPIRATION_TIME', 3600)
if not isinstance(expiration, int):
raise ValueError
s = Serializer(app.config['SECRET_KEY'], expires_in=expiration)
return s.dumps({'pk': str(self.pk)})
@staticmethod
def verify_auth_token(token):
s = Serializer(app.config['SECRET_KEY'])
try:
data = s.loads(token)
except (SignatureExpired, BadSignature):
return None
user = User.query.get(data['pk'])
return user
def __repr__(self):
return "<User ('%s')>" % self.email
| maurodelazeri/shiva-server | shiva/models.py | Python | mit | 15,321 |
# -*- coding:utf-8 -*-
## @package JsMVA.JsMVAMagic
# @author Attila Bagoly <[email protected]>
# IPython magic class for JsMVA
from IPython.core.magic import Magics, magics_class, line_magic
from IPython.core.magic_arguments import argument, magic_arguments, parse_argstring
@magics_class
class JsMVAMagic(Magics):
## Standard constructor
# @param self pointer to object
# @param shell ipython API
def __init__(self, shell):
super(JsMVAMagic, self).__init__(shell)
## jsmva magic
# @param self pointer to object
# @param line jsmva arguments: on/off
@line_magic
@magic_arguments()
@argument('arg', nargs="?", default="on", help='Enable/Disable JavaScript visualisation for TMVA')
def jsmva(self, line):
from JPyInterface import functions
args = parse_argstring(self.jsmva, line)
if args.arg == 'on':
functions.register()
elif args.arg == 'off':
functions.unregister()
elif args.arg == "noOutput":
functions.register(True)
## Function for registering the magic class
def load_ipython_extension(ipython):
ipython.register_magics(JsMVAMagic) | Y--/root | bindings/pyroot/JsMVA/JsMVAMagic.py | Python | lgpl-2.1 | 1,182 |
#
# Copyright 2014 Grupo de Sistemas Inteligentes (GSI) DIT, UPM
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import os
import logging
logger = logging.getLogger(__name__)
ROOT = os.path.dirname(__file__)
DEFAULT_FILE = os.path.join(ROOT, 'VERSION')
def read_version(versionfile=DEFAULT_FILE):
try:
with open(versionfile) as f:
return f.read().strip()
except IOError: # pragma: no cover
logger.error('Running an unknown version of senpy. Be careful!.')
return 'devel'
__version__ = read_version()
| gsi-upm/senpy | senpy/version.py | Python | apache-2.0 | 1,082 |
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from rasa_core.featurizers import Featurizer, BinaryFeaturizer, \
ProbabilisticFeaturizer
import numpy as np
def test_fail_to_load_non_existent_featurizer():
assert Featurizer.load("non_existent_class") is None
def test_binary_featurizer_handles_on_non_existing_features():
f = BinaryFeaturizer()
encoded = f.encode({"a": 1.0, "b": 1.0, "c": 0.0, "e": 1.0},
{"a": 0, "b": 3, "c": 2, "d": 1})
assert (encoded == np.array([1, 0, 0, 1])).all()
def test_binary_featurizer_uses_correct_dtype_int():
f = BinaryFeaturizer()
encoded = f.encode({"a": 1.0, "b": 1.0, "c": 0.0}, {"a": 0, "b": 3, "c": 2,
"d": 1})
assert encoded.dtype == np.int32
def test_binary_featurizer_uses_correct_dtype_float():
f = BinaryFeaturizer()
encoded = f.encode({"a": 1.0, "b": 0.2, "c": 0.0}, {"a": 0, "b": 3, "c": 2,
"d": 1})
assert encoded.dtype == np.float64
def test_probabilistic_featurizer_handles_on_non_existing_features():
f = ProbabilisticFeaturizer()
encoded = f.encode({"a": 1.0, "b": 0.2, "c": 0.0, "e": 1.0},
{"a": 0, "b": 3, "c": 2, "d": 1})
assert (encoded == np.array([1, 0, 0, 0.2])).all()
| deepak02/rasa_core | tests/test_featurizer.py | Python | apache-2.0 | 1,453 |
import unittest2 as unittest
from mock import Mock, patch
from time import time
import sys
from os.path import dirname, abspath
sys.path.insert(0, dirname(dirname(abspath(__file__))) + '/src')
sys.path.insert(0, dirname(dirname(abspath(__file__))) + '/src/analyzer')
import algorithms
import settings
class TestAlgorithms(unittest.TestCase):
"""
Test all algorithms with a common, simple/known anomalous data set
"""
def _addSkip(self, test, reason):
print reason
def data(self, ts):
"""
Mostly ones (1), with a final value of 1000
"""
timeseries = map(list, zip(map(float, range(int(ts) - 86400, int(ts) + 1)), [1] * 86401))
timeseries[-1][1] = 1000
timeseries[-2][1] = 1
timeseries[-3][1] = 1
return ts, timeseries
def test_tail_avg(self):
_, timeseries = self.data(time())
self.assertEqual(algorithms.tail_avg(timeseries), 334)
def test_grubbs(self):
_, timeseries = self.data(time())
self.assertTrue(algorithms.grubbs(timeseries))
@patch.object(algorithms, 'time')
def test_first_hour_average(self, timeMock):
timeMock.return_value, timeseries = self.data(time())
self.assertTrue(algorithms.first_hour_average(timeseries))
def test_stddev_from_average(self):
_, timeseries = self.data(time())
self.assertTrue(algorithms.stddev_from_average(timeseries))
def test_stddev_from_moving_average(self):
_, timeseries = self.data(time())
self.assertTrue(algorithms.stddev_from_moving_average(timeseries))
def test_mean_subtraction_cumulation(self):
_, timeseries = self.data(time())
self.assertTrue(algorithms.mean_subtraction_cumulation(timeseries))
@patch.object(algorithms, 'time')
def test_least_squares(self, timeMock):
timeMock.return_value, timeseries = self.data(time())
self.assertTrue(algorithms.least_squares(timeseries))
def test_histogram_bins(self):
_, timeseries = self.data(time())
self.assertTrue(algorithms.histogram_bins(timeseries))
@patch.object(algorithms, 'time')
def test_run_selected_algorithm(self, timeMock):
timeMock.return_value, timeseries = self.data(time())
result, ensemble, datapoint = algorithms.run_selected_algorithm(timeseries, "test.metric")
self.assertTrue(result)
self.assertTrue(len(filter(None, ensemble)) >= settings.CONSENSUS)
self.assertEqual(datapoint, 1000)
@unittest.skip('Fails inexplicable in certain environments.')
@patch.object(algorithms, 'CONSENSUS')
@patch.object(algorithms, 'ALGORITHMS')
@patch.object(algorithms, 'time')
def test_run_selected_algorithm_runs_novel_algorithm(self, timeMock,
algorithmsListMock, consensusMock):
"""
Assert that a user can add their own custom algorithm.
This mocks out settings.ALGORITHMS and settings.CONSENSUS to use only a
single custom-defined function (alwaysTrue)
"""
algorithmsListMock.__iter__.return_value = ['alwaysTrue']
consensusMock = 1
timeMock.return_value, timeseries = self.data(time())
alwaysTrue = Mock(return_value=True)
with patch.dict(algorithms.__dict__, {'alwaysTrue': alwaysTrue}):
result, ensemble, tail_avg = algorithms.run_selected_algorithm(timeseries)
alwaysTrue.assert_called_with(timeseries)
self.assertTrue(result)
self.assertEqual(ensemble, [True])
self.assertEqual(tail_avg, 334)
if __name__ == '__main__':
unittest.main()
| sdgdsffdsfff/skyline | tests/algorithms_test.py | Python | mit | 3,680 |
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (c) 2013 jmesteve All Rights Reserved
# https://github.com/jmesteve
# <[email protected]>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published
# by the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import purchase
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| jmesteve/openerpseda | openerp/addons_extra/purchase_draft/__init__.py | Python | agpl-3.0 | 1,129 |
# encoding: utf-8
import datetime
from south.db import db
from south.v2 import DataMigration
from django.db import models
from canvas.redis_models import redis, RedisSet
class Migration(DataMigration):
def forwards(self, orm):
User = orm['auth.User']
for x in range(40):
day = datetime.date.today() - datetime.timedelta(x)
dayset = RedisSet('metrics:signup:%s:uniques' % day.strftime("%Y.%m.%d"))
for user_id in User.objects.filter(date_joined__range=(day, day+datetime.timedelta(1))).values_list('id', flat=True):
dayset.sadd(user_id)
def backwards(self, orm):
"Write your backwards methods here."
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '254', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'canvas.apiapp': {
'Meta': {'object_name': 'APIApp'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '255'})
},
'canvas.apiauthtoken': {
'Meta': {'unique_together': "(('user', 'app'),)", 'object_name': 'APIAuthToken'},
'app': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['canvas.APIApp']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'token': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '40'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"})
},
'canvas.bestof': {
'Meta': {'object_name': 'BestOf'},
'category': ('django.db.models.fields.related.ForeignKey', [], {'default': 'None', 'related_name': "'best_of'", 'null': 'True', 'blank': 'True', 'to': "orm['canvas.Category']"}),
'chosen_by': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"}),
'comment': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'best_of'", 'to': "orm['canvas.Comment']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'timestamp': ('canvas.util.UnixTimestampField', [], {})
},
'canvas.category': {
'Meta': {'object_name': 'Category'},
'allow_textonlyop': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'description': ('django.db.models.fields.CharField', [], {'max_length': '140'}),
'disable_remix': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'founded': ('django.db.models.fields.FloatField', [], {'default': '1298956320'}),
'founder': ('django.db.models.fields.related.ForeignKey', [], {'default': 'None', 'related_name': "'founded_groups'", 'null': 'True', 'blank': 'True', 'to': "orm['auth.User']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'moderators': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'moderated_categories'", 'symmetrical': 'False', 'to': "orm['auth.User']"}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '20'}),
'visibility': ('django.db.models.fields.IntegerField', [], {'default': '0'})
},
'canvas.comment': {
'Meta': {'object_name': 'Comment'},
'anonymous': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'author': ('django.db.models.fields.related.ForeignKey', [], {'default': 'None', 'related_name': "'comments'", 'null': 'True', 'blank': 'True', 'to': "orm['auth.User']"}),
'category': ('django.db.models.fields.related.ForeignKey', [], {'default': 'None', 'related_name': "'comments'", 'null': 'True', 'blank': 'True', 'to': "orm['canvas.Category']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'ip': ('django.db.models.fields.IPAddressField', [], {'default': "'0.0.0.0'", 'max_length': '15'}),
'judged': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'ot_hidden': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'parent_comment': ('django.db.models.fields.related.ForeignKey', [], {'default': 'None', 'related_name': "'replies'", 'null': 'True', 'blank': 'True', 'to': "orm['canvas.Comment']"}),
'parent_content': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'comments'", 'null': 'True', 'to': "orm['canvas.Content']"}),
'replied_comment': ('django.db.models.fields.related.ForeignKey', [], {'default': 'None', 'to': "orm['canvas.Comment']", 'null': 'True', 'blank': 'True'}),
'reply_content': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'used_in_comments'", 'null': 'True', 'to': "orm['canvas.Content']"}),
'reply_text': ('django.db.models.fields.CharField', [], {'max_length': '2000', 'blank': 'True'}),
'score': ('django.db.models.fields.FloatField', [], {'default': '0', 'db_index': 'True'}),
'timestamp': ('canvas.util.UnixTimestampField', [], {'default': '0'}),
'visibility': ('django.db.models.fields.IntegerField', [], {'default': '0'})
},
'canvas.commentflag': {
'Meta': {'object_name': 'CommentFlag'},
'comment': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'flags'", 'to': "orm['canvas.Comment']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'ip': ('django.db.models.fields.IPAddressField', [], {'max_length': '15'}),
'timestamp': ('canvas.util.UnixTimestampField', [], {}),
'type_id': ('django.db.models.fields.IntegerField', [], {}),
'undone': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'db_index': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'flags'", 'to': "orm['auth.User']"})
},
'canvas.commentmoderationlog': {
'Meta': {'object_name': 'CommentModerationLog'},
'comment': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['canvas.Comment']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'moderator': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']", 'null': 'True'}),
'note': ('django.db.models.fields.TextField', [], {}),
'timestamp': ('canvas.util.UnixTimestampField', [], {}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'moderated_comments_log'", 'to': "orm['auth.User']"}),
'visibility': ('django.db.models.fields.IntegerField', [], {})
},
'canvas.commentpin': {
'Meta': {'object_name': 'CommentPin'},
'auto': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'comment': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['canvas.Comment']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'timestamp': ('canvas.util.UnixTimestampField', [], {}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"})
},
'canvas.commentsticker': {
'Meta': {'object_name': 'CommentSticker'},
'comment': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'stickers'", 'to': "orm['canvas.Comment']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'ip': ('django.db.models.fields.IPAddressField', [], {'max_length': '15'}),
'timestamp': ('canvas.util.UnixTimestampField', [], {}),
'type_id': ('django.db.models.fields.IntegerField', [], {}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'default': 'None', 'to': "orm['auth.User']", 'null': 'True', 'blank': 'True'})
},
'canvas.content': {
'Meta': {'object_name': 'Content'},
'alpha': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'animated': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'id': ('django.db.models.fields.CharField', [], {'max_length': '40', 'primary_key': 'True'}),
'ip': ('django.db.models.fields.IPAddressField', [], {'default': "'0.0.0.0'", 'max_length': '15'}),
'remix_of': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'remixes'", 'null': 'True', 'to': "orm['canvas.Content']"}),
'remix_text': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '1000', 'blank': 'True'}),
'source_url': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '4000', 'blank': 'True'}),
'stamps_used': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "'used_as_stamp'", 'blank': 'True', 'to': "orm['canvas.Content']"}),
'timestamp': ('canvas.util.UnixTimestampField', [], {}),
'url_mapping': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['canvas.ContentUrlMapping']", 'null': 'True', 'blank': 'True'}),
'visibility': ('django.db.models.fields.IntegerField', [], {'default': '0'})
},
'canvas.contenturlmapping': {
'Meta': {'object_name': 'ContentUrlMapping'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'})
},
'canvas.emailunsubscribe': {
'Meta': {'object_name': 'EmailUnsubscribe'},
'email': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '255'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'})
},
'canvas.externalcontent': {
'Meta': {'object_name': 'ExternalContent'},
'_data': ('django.db.models.fields.TextField', [], {'default': "'{}'"}),
'content_type': ('django.db.models.fields.CharField', [], {'max_length': '2'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'parent_comment': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'external_content'", 'to': "orm['canvas.Comment']"}),
'source_url': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '4000', 'null': 'True', 'blank': 'True'})
},
'canvas.facebookinvite': {
'Meta': {'object_name': 'FacebookInvite'},
'fb_message_id': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '255'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'invited_fbid': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'invitee': ('django.db.models.fields.related.ForeignKey', [], {'default': 'None', 'related_name': "'facebook_invited_from'", 'null': 'True', 'blank': 'True', 'to': "orm['auth.User']"}),
'inviter': ('django.db.models.fields.related.ForeignKey', [], {'default': 'None', 'related_name': "'facebook_sent_invites'", 'null': 'True', 'blank': 'True', 'to': "orm['auth.User']"})
},
'canvas.facebookuser': {
'Meta': {'object_name': 'FacebookUser'},
'email': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'fb_uid': ('django.db.models.fields.BigIntegerField', [], {'unique': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'gender': ('django.db.models.fields.PositiveSmallIntegerField', [], {'default': '0'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'last_invited': ('canvas.util.UnixTimestampField', [], {'default': '0'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'user': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['auth.User']", 'unique': 'True', 'null': 'True', 'blank': 'True'})
},
'canvas.followcategory': {
'Meta': {'unique_together': "(('user', 'category'),)", 'object_name': 'FollowCategory'},
'category': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'followers'", 'to': "orm['canvas.Category']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'following'", 'to': "orm['auth.User']"})
},
'canvas.invitecode': {
'Meta': {'object_name': 'InviteCode'},
'code': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '32'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'invitee': ('django.db.models.fields.related.ForeignKey', [], {'default': 'None', 'related_name': "'invited_from'", 'null': 'True', 'blank': 'True', 'to': "orm['auth.User']"}),
'inviter': ('django.db.models.fields.related.ForeignKey', [], {'default': 'None', 'related_name': "'sent_invites'", 'null': 'True', 'blank': 'True', 'to': "orm['auth.User']"})
},
'canvas.remixplugin': {
'Meta': {'object_name': 'RemixPlugin'},
'author': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
's3md5': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'timestamp': ('canvas.util.UnixTimestampField', [], {'default': '0'})
},
'canvas.stashcontent': {
'Meta': {'object_name': 'StashContent'},
'content': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['canvas.Content']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"})
},
'canvas.userinfo': {
'Meta': {'object_name': 'UserInfo'},
'free_invites': ('django.db.models.fields.IntegerField', [], {'default': '10'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'invite_bypass': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '255', 'blank': 'True'}),
'is_qa': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'post_anonymously': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'power_level': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'user': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['auth.User']", 'unique': 'True'})
},
'canvas.usermoderationlog': {
'Meta': {'object_name': 'UserModerationLog'},
'action': ('django.db.models.fields.IntegerField', [], {}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'moderator': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']", 'null': 'True'}),
'note': ('django.db.models.fields.TextField', [], {}),
'timestamp': ('canvas.util.UnixTimestampField', [], {}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'moderation_log'", 'to': "orm['auth.User']"})
},
'canvas.userwarning': {
'Meta': {'object_name': 'UserWarning'},
'comment': ('django.db.models.fields.related.ForeignKey', [], {'default': 'None', 'to': "orm['canvas.Comment']", 'null': 'True', 'blank': 'True'}),
'confirmed': ('canvas.util.UnixTimestampField', [], {'default': '0'}),
'custom_message': ('django.db.models.fields.TextField', [], {}),
'disable_user': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'issued': ('canvas.util.UnixTimestampField', [], {}),
'stock_message': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'user_warnings'", 'to': "orm['auth.User']"}),
'viewed': ('canvas.util.UnixTimestampField', [], {'default': '0'})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
}
}
complete_apps = ['canvas']
| canvasnetworks/canvas | website/canvas/migrations/0138_populate_daily_signup_uniques.py | Python | bsd-3-clause | 19,964 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Author: Enrique Henestroza Anguiano
#
"""
DTBParser utility constants, dictionaries, etc.
"""
import re
import unicodedata
import copy
import sys
import os
# Enumerate CONLL fields (with some added ones)
CONLLNUM = 15
ID,TOK,LEM,CPS,FPS,FEAT,GOV,LAB,MAP,GOLD,LCH,RCH,LABS,OBJ,REFL =range(CONLLNUM)
# Enumerate additional structural feature classes for a token.
TOKENFEATNUM = 22
TOKENTOTFEATNUM = CONLLNUM+TOKENFEATNUM
LVAL,LTOK,LLEM,LFPS,LCPS,LLAB,RVAL,RTOK,RLEM,RFPS,RCPS,RLAB,HTOK,HLEM,HFPS,\
HCPS,HLAB,OTOK,OLEM,OFPS,OCPS,OLAB = range(CONLLNUM,TOKENTOTFEATNUM)
# Enumerate additional Parse Correction feature classes for a cand-dep pair.
CANDFEATNUM = 15
ISPR,GDIS,GPTH,LDIS,LDIR,NDEP,PUNC,PFPS,PCPS,PLAB,MFPS,MCPS,MLAB,SLPF,SBCT =\
range(CANDFEATNUM)
# Basic feature items.
FEATITEMNUM = 10
S_2,S_1,S_0,N_0,N_1,N_2,N_3,D_0,C_0,C_D = range(FEATITEMNUM)
# Open part-of-speech categories.
OPENCPOS = set(["A", "ADV", "N", "V"])
# Enumerate weight functions for easy access
RELFREQ, CHISQ, TTEST, BINOM, PMI, LRATIO = range(6)
WGTIDS = {"relfreq":0, "chisq":1, "ttest":2, "binom":3, "pmi":4, "lratio":5}
WGTMIN = {"relfreq":0.0, "chisq":-1.0, "ttest":-1.0, "binom":-1.0, \
"pmi":-1.0, "lratio":0.0}
# Encoding - make common to all modules
ENCODING = "utf-8"
# Stop-list for LEM clustering for verbes auxiliaires.
STOP = set([u"avoir", u"être"])
# Labels
LABELID = ('aff','a_obj','arg','ato','ats','aux_caus','aux_pass','aux_tps',\
'comp','coord','de_obj','dep','dep_coord','det','mod','mod_rel',\
'obj','p_obj','ponct','root','suj')
LABELENUM = tuple(enumerate(LABELID))
IDLABEL = dict([(y,x) for x,y in enumerate(LABELID)])
NUMLABEL = len(LABELID)
# Transitions (only arc-eager uses REDC)
SHFT = 0
ARCL = 1
ARCR = 2
REDC = 3
# Parser Models (only arc-eager-mc uses MRMCN)
MTRNS = 0
MLLAB = 1
MRLAB = 2
MRMCN = 3
# Corrector Models
MCMCN = 0
MCLAB = 1
# ** USER MODIFIED ** Location of preposition list and feature templates.
DATADIR = os.getenv('FREDIST')+'/data'
TEMPLATEDIR = DATADIR
PREPLIST = DATADIR+'/preps_autopos_autolem.txt'
# Intermediate POS between fine and coarse.
INTERPOS = {'V':'V', 'VINF':'VINF', 'VIMP':'V', 'VPP':'VPP', 'VPR':'VPR',\
'NC':'N', 'NPP':'N', 'CS':'CS', 'CC':'CC', 'CLS':'CL', 'CLO':'CL',\
'CLR':'CLR', 'CL':'CL', 'ADJ':'A', 'ADJWH':'A', 'ADV':'ADV',\
'ADVWH':'ADV', 'PRO':'PRO', 'PROREL':'PROREL', 'PROWH':'PRO',\
'DET':'D', 'DETWH':'D', 'P':'P', 'P+D':'P', 'ET':'ET', 'I':'I',\
'PONCT':'PONCT', 'PREF':'PREF', 'VS':'V', 'P+PRO':'P'}
#
# FUNCTIONS
#
#
# Check for punctuation as-per eval07.pl
#
def is_punct(tok):
for ch in tok:
if unicodedata.category(ch)[0] != 'P':
return False
return True
#
# Grouping of some lemmas, for distributional methods.
#
def pretreat_lem(lem, cpos):
tlem = lem
# Group lemmas containing any numbers
if re.search('\d', tlem, re.UNICODE):
tlem = u'<NUM>'
else:
# Group open-pos lemmas if not alpha-numeric (except meta-characters)
relem = re.sub(r'[\_\-\']', r'', tlem)
if cpos in OPENCPOS and re.search('\W', relem, re.UNICODE):
tlem = u'<NAN>'
return tlem
#
# Read a CONLL sentence from a filestream.
#
def read_conll(instream, mode="parse", refl=True):
tsent = [()] # Sentence tokens start at id=1.
osent = []
for line in instream:
if line.rstrip() == "":
# Add gold OBJ and REFL information.
if mode in ["extract", "correct"]:
for did in range(1, len(tsent)):
dep = tsent[did]
gid = dep[GOV]
gov = tsent[gid]
lab = dep[LAB]
# Add right- and left- children.
if gid != 0: # and dep[LAB] != "ponct":
if did < gid:
gov[LCH].append(did)
else:
gov[RCH].append(did)
# Add objects for pp-attachment and coordination.
if gid != 0 and\
((gov[FPS] in ["P", "P+D"] and lab == "obj") or \
(gov[FPS] in ["CC"] and lab == "dep_coord")):
# Favor obj closest on the right
if not gov[OBJ] or \
(gid < did and (gov[OBJ] < gid or \
did < gov[OBJ])) or \
(did < gid and gov[OBJ] < gid and \
did > gov[OBJ]):
gov[OBJ] = did
# Add reflexive marker to lemmas.
if dep[FPS] == "CLR" and gid != 0:
# Favor reflexive closest on the left
if not gov[REFL] or \
(gid > did and (gov[REFL] > gid or \
did > gov[OBJ])) or \
(did > gid and gov[OBJ] > gid and \
did < gov[OBJ]):
gov[REFL] = did
if refl and gov[CPS] == "V":
# Check for 'faire' dep in between.
found_faire = False
if did < gid:
for fid in range(did+1, gid):
if tsent[fid][LEM] == u"faire":
found_faire = True
break
if not found_faire:
gov[LEM] = u"se_"+gov[LEM]
# if tsent[gid][FPS] == "V":
# tsent[gid][LEM] = u"se_"+tsent[gid][LEM]
# # For reparsing, change lemma (for scores) but
# # leave map as-is (for other features).
# if mode == "extract":
# fields_map = []
# for lem,wgt in tsent[gid][MAP]:
# fields_map.append((u"se_"+lem,wgt))
# tsent[gid][MAP] = fields_map
yield osent, tsent
tsent = [()]
osent = []
else:
fields = line.rstrip().split('\t')
osent.append(copy.deepcopy(fields))
# Modify fields required for treating the sentence.
fields[ID] = int(fields[ID])
# if mode in ["correct"]:
# fields[FPS] = INTERPOS[fields[FPS]]
if mode in ["extract"]:
fields[LEM] = pretreat_lem(fields[LEM], fields[CPS])
fields[GOLD] = None
if fields[MAP] == "_" or fields[LEM] in STOP:
fields[MAP] = {fields[LEM]:1.0}
else:
# Combine possible grouped lemmas
fields_map = {}
for x in fields[MAP].split('|'):
lem, wgt = x.rsplit('=', 1)
# lem = pretreat_lem(lem, fields[CPS]) #Assume pretreated!
wgt = float(wgt)
fields_map[lem] = fields_map.get(lem, 0.0) + wgt
fields[MAP] = fields_map
fields[MAP] = fields[MAP].items()
fields_feat = {}
if fields[FEAT] != "_":
for feat in fields[FEAT].split("|"):
f,v = feat.split("=")
fields_feat[f] = v
fields[FEAT] = fields_feat
fields[GOV] = -1 if fields[GOV] == "_" else int(fields[GOV])
tsent.append(fields + [[], [], {}, None, None])
#
# Convert sentence from original+treated lists to CONLL string.
#
def sentence_to_conll(osent, tsent):
for tok in osent:
if tsent:
tok[GOV] = str(tsent[int(tok[ID])][GOV])
tok[LAB] = str(tsent[int(tok[ID])][LAB])
yield "\t".join(tok)
yield ""
#
# Obtain a neighborhood surrounding a dependent's predicted governor.
# Optionally restrict the CPOS of candidate governors, and ignore certain
# CPOS for the purpose of projectivity constraints. Predicted governor
# ALWAYS returned (in 0th index).
#
def neighborhood(sent, did, neigh="dist-3", ipos=['PONCT']):
d = sent[did]
gid = d[GOV]
g = sent[gid]
# if neigh == "binary":
# cands = [gid]
# # If g is a N, it must be the object of a V
# if g[CPOS] == "N" and g[LAB] == "obj" and \
# sent[g[GOV]][CPOS] == "V" and \
# sent[g[GOV]][ID] < gid and gid < did:
# if not cpos or "V" in cpos:
# cands.append(g[GOV])
# # If g is a V, it must have an intervening N object
# elif g[CPOS] == "V" and g[OBJ] and \
# sent[g[OBJ]][CPOS] == "N" and \
# gid < g[OBJ] and g[OBJ] < did:
# if not cpos or "N" in cpos:
# cands.append(g[OBJ])
# return cands
if neigh.startswith("dist"):
type_dist = neigh.split("-")
dist = None
if len(type_dist) == 2:
dist = int(type_dist[1])
if dist and dist < 2:
return [(gid, "1-0", sent[gid][FPS])]
# Work way out
cands = [] # list of (cid, cdist, cpath)
seenc = set()
hid = gid # First "up" node is the predicted governor
hdist = 0
hpath = []
while hid and (not dist or hdist < dist):
hpath.append(sent[hid][FPS])
cands.append((hid, str(hdist+1)+"-0", "-".join(hpath)))
seenc.add(hid)
hdist += 1
# Children of h nearest and to the left/right of d
h_lid = None
h_rid = None
ch = sent[hid][LCH] if hid > did else sent[hid][RCH]
for cid in ch:
if sent[cid][FPS] not in ipos:
if cid < did:
h_lid = cid
elif cid > did:
h_rid = cid
break
h_lid = None if h_lid in seenc else h_lid
h_rid = None if h_rid in seenc else h_rid
# Work way down right-most children of h_lid
cid = h_lid
hcpath = []
cdist = 0
while cid and (not dist or hdist + cdist < dist):
hcpath.append(sent[cid][FPS])
cands.append((cid, str(hdist)+"-"+str(cdist+1),\
"-".join(hpath+hcpath)))
seenc.add(cid)
c_rid = None
for rid in sent[cid][RCH]:
if sent[rid][FPS] not in ipos:
c_rid = rid
cid = c_rid
cdist += 1
# Work way down left-most children of h_rid
cid = h_rid
hcpath = []
cdist = 0
while cid and (not dist or hdist + cdist < dist):
hcpath.append(sent[cid][FPS])
cands.append((cid, str(hdist)+"-"+str(cdist+1),\
"-".join(hpath+hcpath)))
seenc.add(cid)
c_lid = None
for lid in sent[cid][LCH]:
if sent[lid][FPS] not in ipos:
c_lid = lid
break
cid = c_lid
cdist += 1
# Continue up according to certain conditions
h_hid = sent[hid][GOV]
if h_hid == 0 or (hid < did and h_rid or hid > did and h_lid):
hid = None
else:
hid = h_hid
return cands
| enhean/fredist | src/fredist/dtbutils.py | Python | gpl-3.0 | 11,833 |
__author__ = 'sibirrer'
import numpy as np
import pickle
import os.path
from scipy import integrate
import astrofunc.util as util
class BarkanaIntegrals(object):
def I1(self, nu1, nu2, s_, gamma):
"""
integral of Barkana et al. (18)
:param nu2:
:param s_:
:param gamma:
:return:
"""
return self.I1_numeric(nu1, nu2, s_, gamma)
# if not hasattr(self,'I1_interp'):
# self.open_I1()
# return self.I1_interp(nu2, s_, gamma)
def write_I1(self):
self.interp_I1() # creating self.I1_interp
f = open('Interpolations/I1_interp.txt', 'wb')
pickle.dump(self.I1_interp,f)
f.close()
print 'file I1_interp.txt new writen'
def open_I1(self):
if not os.path.isfile('Interpolations/I1_interp.txt'):
self.write_I1()
f = open('Interpolations/I1_interp.txt','rb')
self.I1_interp = pickle.load(f)
f.close()
print 'I1 opened'
def interp_I1(self):
pass
def _I1_intg(self, nu, s_, gamma):
return nu**(-gamma)* self._f(nu-s_)
def I1_numeric(self, nu1, nu2, s_, gamma):
nu1 = util.mk_array(nu1)
nu2 = util.mk_array(nu2)
s_ = util.mk_array(s_)
I1 = np.empty_like(nu2)
for i in range(len(nu2)):
nu_min = nu1[i]
nu_max = nu2[i]
result, error = integrate.quad(self._I1_intg,nu_min,nu_max,args=(s_[i], gamma))
I1[i] = result
return I1
def I2(self, nu1, nu2, s_, gamma):
"""
integral of Barkana et al. (18)
:param nu2:
:param s_:
:param gamma:
:return:
"""
return self.I2_numeric(nu1, nu2, s_, gamma)
# if not hasattr(self,'I2_interp'):
# self.open_I2()
# return self.I2_interp(nu2, s_, gamma)
def write_I2(self):
self.interp_I2() # creating self.I2_interp
f = open('Interpolations/I2_interp.txt', 'wb')
pickle.dump(self.I2_interp,f)
f.close()
print 'file I2_interp.txt new writen'
def open_I2(self):
if not os.path.isfile('Interpolations/I2_interp.txt'):
self.write_I2()
f = open('Interpolations/I2_interp.txt','rb')
self.I2_interp = pickle.load(f)
f.close()
print 'I1 opened'
def interp_I2(self):
pass
def _I2_intg(self, nu, s_, gamma):
return nu**(-gamma)* self._f(s_-nu)
def I2_numeric(self, nu1, nu2, s_, gamma):
nu1 = util.mk_array(nu1)
nu2 = util.mk_array(nu2)
s_ = util.mk_array(s_)
I2 = np.empty_like(nu2)
for i in range(len(nu2)):
nu_min = nu1[i]
nu_max = nu2[i]
result, error = integrate.quad(self._I2_intg,nu_min,nu_max,args=(s_[i], gamma))
I2[i] = result
return I2
def I3(self, nu2, s_, gamma):
"""
integral of Barkana et al. (23)
:param nu2:
:param s_:
:param gamma:
:return:
"""
return self.I3_numeric(nu2, s_, gamma)
# if not hasattr(self,'I3_interp'):
# self.open_I3()
# return self.I3_interp(nu2, s_, gamma)
def write_I3(self):
self.interp_I3() # creating self.I3_interp
f = open('Interpolations/I3_interp.txt', 'wb')
pickle.dump(self.I3_interp,f)
f.close()
print 'file I3_interp.txt new writen'
def open_I3(self):
if not os.path.isfile('Interpolations/I3_interp.txt'):
self.write_I3()
f = open('Interpolations/I3_interp.txt','rb')
self.I3_interp = pickle.load(f)
f.close()
print 'I3 opened'
def interp_I3(self):
pass
def _I3_intg(self, nu, s_, gamma):
return nu**(-gamma) * self._f_deriv(nu-s_)
def I3_numeric(self, nu2, s_, gamma):
nu_min = 0
nu2 = util.mk_array(nu2)
s_ = util.mk_array(s_)
I3 = np.empty_like(nu2)
for i in range(len(nu2)):
nu_max = nu2[i]
result, error = integrate.quad(self._I3_intg,nu_min,nu_max,args=(s_[i], gamma))
I3[i] = result
return I3
def I4(self, nu2, s_, gamma):
"""
integral of Barkana et al. (23)
:param nu2:
:param s_:
:param gamma:
:return:
"""
return self.I4_numeric(nu2, s_, gamma)
# if not hasattr(self,'I4_interp'):
# self.open_I4()
# return self.I4_interp(nu2, s_, gamma)
def write_I4(self):
self.interp_I4() # creating self.I4_interp
f = open('Interpolations/I4_interp.txt', 'wb')
pickle.dump(self.I4_interp,f)
f.close()
print 'file I4_interp.txt new writen'
def open_I4(self):
if not os.path.isfile('Interpolations/I4_interp.txt'):
self.write_I4()
f = open('Interpolations/I4_interp.txt','rb')
self.I4_interp = pickle.load(f)
f.close()
print 'I4 opened'
def interp_I4(self):
pass
def _I4_intg(self, nu, s_, gamma):
return nu**(-gamma) * self._f_deriv(s_-nu)
def I4_numeric(self, nu2, s_, gamma):
nu_min = 0
nu2 = util.mk_array(nu2)
s_ = util.mk_array(s_)
I4 = np.empty_like(nu2)
for i in range(len(nu2)):
nu_max = nu2[i]
result, error = integrate.quad(self._I4_intg,nu_min,nu_max,args=(s_[i], gamma))
I4[i] = result
return I4
def _f(self, mu):
"""
f(mu) function (eq 15 in Barkana et al.)
:param mu:
:return:
"""
return np.sqrt(1/np.sqrt(1+mu**2) - mu/(mu**2+1))
def _f_deriv(self, mu):
"""
f'(mu) function (derivative of eq 15 in barkana et al.)
:param mu:
:return:
"""
a = np.sqrt(mu**2+1)
term1 = -mu*np.sqrt(a-mu) / a**3
term2 = -(a -mu) / (2*(mu**2+1)*np.sqrt(a-mu))
return term1 + term2 | sibirrer/astrofunc | astrofunc/LensingProfiles/barkana_integrals.py | Python | mit | 6,043 |
#!/usr/bin/python3
# -*- coding: utf-8 -*-
# superuser.py file is part of slpkg.
# Copyright 2014-2021 Dimitris Zlatanidis <[email protected]>
# All rights reserved.
# Slpkg is a user-friendly package manager for Slackware installations
# https://gitlab.com/dslackw/slpkg
# Slpkg is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import getpass
def s_user():
"""Check for root user
"""
if getpass.getuser() != "root":
print("\nslpkg: Error: Must have root privileges\n")
raise SystemExit(1)
| dslackw/slpkg | slpkg/superuser.py | Python | gpl-3.0 | 1,096 |
## Copyright 2003-2006 Luc Saffre
## This file is part of the Lino project.
## Lino is free software; you can redistribute it and/or modify it
## under the terms of the GNU General Public License as published by
## the Free Software Foundation; either version 2 of the License, or
## (at your option) any later version.
## Lino is distributed in the hope that it will be useful, but WITHOUT
## ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
## or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public
## License for more details.
## You should have received a copy of the GNU General Public License
## along with Lino; if not, write to the Free Software Foundation,
## Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
from lino.adamo.table import DEFAULT_PRIMARY_KEY
from lino.adamo.row import StoredDataRow, LinkingRow,\
MemoRow, TreeRow, MemoTreeRow,\
BabelRow
from lino.adamo.schema import Schema
#from lino.adamo.dbforms import DbMainForm
from lino.adamo.datatypes import *
from lino.adamo.exceptions import *
from lino.adamo.store import Populator
from lino.adamo.dbreports import DataReport
#from lino.adamo.row import DataRow
#from lino.adamo.schema import DbApplication
from lino.forms.dbforms import ReportForm, DbMainForm, ReportForm, DbApplication
__all__ = filter(lambda x: x[0] != "_", dir())
| MaxTyutyunnikov/lino | obsolete/src/lino/adamo/ddl.py | Python | gpl-3.0 | 1,364 |
import theano
import theano.tensor as T
import numpy as np
import sys
sys.path.insert(0, '../data_loader/')
import load
from theano.tensor.nnet.conv import conv2d
from theano.tensor.signal.downsample import max_pool_2d
# load data
x_train, t_train, x_test, t_test = load.cifar10(dtype=theano.config.floatX, grayscale=False)
labels_test = np.argmax(t_test, axis=1)
# reshape data
x_train = x_train.reshape((x_train.shape[0], 3, 32, 32))
x_test = x_test.reshape((x_test.shape[0], 3, 32, 32))
# define symbolic Theano variables
x = T.tensor4()
t = T.matrix()
# define model: neural network
def floatX(x):
return np.asarray(x, dtype=theano.config.floatX)
def init_weights(shape):
return theano.shared(floatX(np.random.randn(*shape) * 0.1))
def momentum(cost, params, learning_rate, momentum):
grads = theano.grad(cost, params)
updates = []
for p, g in zip(params, grads):
mparam_i = theano.shared(np.zeros(p.get_value().shape, dtype=theano.config.floatX))
v = momentum * mparam_i - learning_rate * g
updates.append((mparam_i, v))
updates.append((p, p + v))
return updates
def model(x, w_c1, b_c1, w_c2, b_c2, w_h3, b_h3, w_o, b_o):
c1 = T.maximum(0, conv2d(x, w_c1) + b_c1.dimshuffle('x', 0, 'x', 'x'))
p1 = max_pool_2d(c1, (3, 3))
c2 = T.maximum(0, conv2d(p1, w_c2) + b_c2.dimshuffle('x', 0, 'x', 'x'))
p2 = max_pool_2d(c2, (2, 2))
p2_flat = p2.flatten(2)
h3 = T.maximum(0, T.dot(p2_flat, w_h3) + b_h3)
p_y_given_x = T.nnet.softmax(T.dot(h3, w_o) + b_o)
return p_y_given_x
w_c1 = init_weights((4, 3, 3, 3))
b_c1 = init_weights((4,))
w_c2 = init_weights((8, 4, 3, 3))
b_c2 = init_weights((8,))
w_h3 = init_weights((8 * 4 * 4, 100))
b_h3 = init_weights((100,))
w_o = init_weights((100, 10))
b_o = init_weights((10,))
params = [w_c1, b_c1, w_c2, b_c2, w_h3, b_h3, w_o, b_o]
p_y_given_x = model(x, *params)
y = T.argmax(p_y_given_x, axis=1)
cost = T.mean(T.nnet.categorical_crossentropy(p_y_given_x, t))
updates = momentum(cost, params, learning_rate=0.01, momentum=0.9)
# compile theano functions
train = theano.function([x, t], cost, updates=updates)
predict = theano.function([x], y)
# train model
batch_size = 50
for i in range(50):
print "iteration {}".format(i + 1)
for start in range(0, len(x_train), batch_size):
x_batch = x_train[start:start + batch_size]
t_batch = t_train[start:start + batch_size]
cost = train(x_batch, t_batch)
predictions_test = predict(x_test)
accuracy = np.mean(predictions_test == labels_test)
print "accuracy: {}\n".format(accuracy)
| JBed/Simple_Theano | 4_simple_conv_net/better_conv_net.py | Python | apache-2.0 | 2,609 |
#!/usr/bin/python
#-*- encoding: utf8 -*-
import random
import json
import threading
import rospy
from std_msgs.msg import String
from geometry_msgs.msg import PointStamped
from mhri_msgs.msg import RaisingEvents, GazeCommand
from mhri_msgs.srv import ReadData, ReadDataRequest
GAZE_CONTROLLER_PERIOD = 0.2
GLANCE_TIMEOUT_MEAN = 5.0
IDLE_TIMEOUT_MEAN = 4.0
class GazeState:
IDLE = 0
FOCUSING = 1
TRACKING = 2
GLANCE = 3
UNKNOWN = -1
class GazeNode:
def __init__(self):
rospy.init_node('gaze', anonymous=False)
self.lock = threading.RLock()
with self.lock:
self.current_state = GazeState.IDLE
self.last_state = self.current_state
# Initialize Variables
self.glance_timeout = 0
self.glance_timecount = 0
self.glance_played = False
self.idle_timeout = 0
self.idle_timecount = 0
self.idle_played = False
rospy.loginfo('\033[92m[%s]\033[0m waiting for bringup social_mind...'%rospy.get_name())
rospy.wait_for_service('environmental_memory/read_data')
rospy.wait_for_service('social_events_memory/read_data')
self.rd_memory = {}
self.rd_memory['environmental_memory'] = rospy.ServiceProxy('environmental_memory/read_data', ReadData)
self.rd_memory['social_events_memory'] = rospy.ServiceProxy('social_events_memory/read_data', ReadData)
rospy.Subscriber('raising_events', RaisingEvents, self.handle_raising_events)
rospy.Subscriber('gaze_focusing', String, self.handle_gaze_focusing)
self.pub_gaze_cmd = rospy.Publisher('gaze_command', GazeCommand, queue_size=10)
self.pub_viz_gaze_cmd = rospy.Publisher('visualization_gaze_cmd', PointStamped, queue_size=10)
rospy.Timer(rospy.Duration(GAZE_CONTROLLER_PERIOD), self.handle_gaze_controller)
rospy.loginfo('\033[92m[%s]\033[0m initialized...'%rospy.get_name())
rospy.spin()
def handle_raising_events(self, msg):
if len(msg.events) == 0:
return
if 'loud_sound_detected' in msg.events:
with self.lock:
if self.current_state != GazeState.GLANCE:
self.last_state = self.current_state
self.current_state = GazeState.GLANCE
elif 'person_appeared' in msg.events or 'face_detected' in msg.events:
with self.lock:
if self.current_state != GazeState.TRACKING:
self.last_state = self.current_state
self.current_state = GazeState.TRACKING
def handle_gaze_focusing(self, msg):
# 환경 메모리에서 전달된 이름에 대한 정보가 있는지 확인해보고, 있으면 타겟설정, 없으면 현재모드 유지
if msg.data != '':
with self.lock:
if self.current_state != GazeState.FOCUSING:
self.last_state = self.current_state
self.current_state = GazeState.FOCUSING
self.focusing_target = msg.data
elif self.current_state == GazeState.FOCUSING and msg.data == '':
with self.lock:
self.current_state = self.last_state
self.focusing_target = ''
def handle_gaze_controller(self, event):
# 0.2ms (조정가능) 주기로 동작되는 컨트롤러 모드에 따라 동작을 달리한다.
if self.current_state == GazeState.IDLE:
# 4 ~ 6초 간격으로 랜덤 타켓 포지션
if not self.idle_played:
self.idle_timecount = 0
self.idle_timeout = random.randrange(
IDLE_TIMEOUT_MEAN/GAZE_CONTROLLER_PERIOD, (IDLE_TIMEOUT_MEAN+2.0)/GAZE_CONTROLLER_PERIOD)
self.idle_played = True
else:
self.idle_timecount += 1
if self.idle_timecount > self.idle_timeout:
cmd = GazeCommand()
# cmd.target_point.header.stamp = rospy.Time.now()
cmd.target_point.header.frame_id = 'base_footprint'
cmd.target_point.point.x = 2.0
cmd.target_point.point.y = random.randrange(-10, 10) / 10.0
cmd.target_point.point.z = 0.6 + (random.randrange(-2, 5) / 10.0)
cmd.max_speed = random.randrange(5, 15) / 100.0
self.pub_gaze_cmd.publish(cmd)
self.pub_viz_gaze_cmd.publish(cmd.target_point)
self.idle_timecount = 0
self.idle_timeout = random.randrange(
IDLE_TIMEOUT_MEAN/GAZE_CONTROLLER_PERIOD, (IDLE_TIMEOUT_MEAN+2.0)/GAZE_CONTROLLER_PERIOD)
elif self.current_state == GazeState.GLANCE:
if not self.glance_played:
req = ReadDataRequest()
req.perception_name = 'loud_sound_detection'
req.query = '{}'
req.data.append('xyz')
req.data.append('frame_id')
response = self.rd_memory['social_events_memory'](req)
if not response.result:
self.current_state = self.last_state
return
result_data = json.loads(response.data)
cmd = GazeCommand()
cmd.target_point.header.frame_id = 'base_footprint'#result_data['frame_id']
cmd.target_point.point.x = 1.0
cmd.target_point.point.z = 0.6 + (random.randrange(0, 30) / 100.0)
if result_data['xyz'][1] < -0.2: #Right Side
cmd.target_point.point.y = -1.0 * random.randrange(10, 20) / 10.0
else:
cmd.target_point.point.y = random.randrange(10, 20) / 10.0
cmd.max_speed = 1.0
self.pub_gaze_cmd.publish(cmd)
self.pub_viz_gaze_cmd.publish(cmd.target_point)
rospy.loginfo('\033[92m[%s]\033[0m changed the state - [GLANCE]...'%rospy.get_name())
self.glance_timecount = 0
self.glance_timeout = random.randrange(
GLANCE_TIMEOUT_MEAN/GAZE_CONTROLLER_PERIOD, (GLANCE_TIMEOUT_MEAN+1.0)/GAZE_CONTROLLER_PERIOD)
self.glance_played = True
else:
self.glance_timecount += 1
if self.glance_timecount > self.glance_timeout:
self.glance_played = False
self.glance_timecount = 0
self.lock.acquire()
self.current_state = self.last_state
self.lock.release()
rospy.loginfo('\033[92m[%s]\033[0m return from GLANCE to last state...'%rospy.get_name())
elif self.current_state == GazeState.FOCUSING:
target_type = ''
target_name = ''
try:
target_type, target_name = self.focusing_target.split(':')
except ValueError:
with self.lock:
self.current_state = self.last_state
return
req = ReadDataRequest()
req.perception_name = target_type
req.query = '{"name": "%s"}'%target_name
req.data.append('xyz')
req.data.append('frame_id')
response = self.rd_memory['environmental_memory'](req)
if response.result:
rospy.logdebug("read from environmental_memory for %s: %s"%(target_name, response.data))
result_data = json.loads(response.data)
cmd = GazeCommand()
cmd.target_point.header.frame_id = result_data['frame_id']
cmd.target_point.point.x = result_data['xyz'][0]
cmd.target_point.point.y = result_data['xyz'][1]
cmd.target_point.point.z = result_data['xyz'][2]
cmd.max_speed = 0.2
self.pub_gaze_cmd.publish(cmd)
self.pub_viz_gaze_cmd.publish(cmd.target_point)
else:
rospy.logwarn('Can not find the information of %s in memory...'%target_name)
with self.lock:
self.current_state = self.last_state
elif self.current_state == GazeState.TRACKING:
# 환경 메모리에서 사람들에 대한 정보를 받아옴
# 1명일때, 2명 이상일때 플래닝 필요
req = ReadDataRequest()
req.perception_name = 'face_detection'
req.query = '{}'
req.data.append('count')
response = self.rd_memory['social_events_memory'](req)
result_data = json.loads(response.data)
try:
if result_data['count'] == 0:
with self.lock:
self.current_state = self.last_state
return
else:
req = ReadDataRequest()
req.perception_name = 'persons'
req.query = '{}'
req.data = ['~']
response = self.rd_memory['environmental_memory'](req)
ret_data = json.loads(response.data)
try:
cmd = GazeCommand()
cmd.target_point.header.frame_id = ret_data[0]['frame_id']
cmd.target_point.point.x = ret_data[0]['xyz'][0]
cmd.target_point.point.y = ret_data[0]['xyz'][1]
cmd.target_point.point.z = ret_data[0]['xyz'][2]
cmd.max_speed = 0.2
self.pub_gaze_cmd.publish(cmd)
self.pub_viz_gaze_cmd.publish(cmd.target_point)
except KeyError:
pass
except KeyError:
pass
if __name__ == '__main__':
m = GazeNode()
| mhri/mhri | behaviors/src/gaze_node.py | Python | apache-2.0 | 9,941 |
# -*- coding: utf-8 -*-
# Generated by Django 1.10.5 on 2017-03-10 10:45
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('channels', '0067_auto_20170306_2042'),
]
operations = [
migrations.AlterField(
model_name='channel',
name='channel_type',
field=models.CharField(choices=[('AT', "Africa's Talking"), ('A', 'Android'), ('BM', 'Blackmyna'), ('CT', 'Clickatell'), ('DA', 'Dart Media'), ('DM', 'Dummy'), ('EX', 'External'), ('FB', 'Facebook'), ('FCM', 'Firebase Cloud Messaging'), ('GL', 'Globe Labs'), ('HX', 'High Connection'), ('H9', 'Hub9'), ('IB', 'Infobip'), ('JS', 'Jasmin'), ('JN', 'Junebug'), ('JNU', 'Junebug USSD'), ('KN', 'Kannel'), ('LN', 'Line'), ('M3', 'M3 Tech'), ('MB', 'Mblox'), ('NX', 'Nexmo'), ('PL', 'Plivo'), ('RR', 'Red Rabbit'), ('SQ', 'Shaqodoon'), ('SC', 'SMSCentral'), ('ST', 'Start Mobile'), ('TG', 'Telegram'), ('T', 'Twilio'), ('TW', 'TwiML Rest API'), ('TMS', 'Twilio Messaging Service'), ('TT', 'Twitter'), ('VB', 'Verboice'), ('VI', 'Viber'), ('VP', 'Viber Public Channels'), ('VM', 'Vumi'), ('VMU', 'Vumi USSD'), ('YO', 'Yo!'), ('ZV', 'Zenvia')], default='A', help_text='Type of this channel, whether Android, Twilio or SMSC', max_length=3, verbose_name='Channel Type'),
),
]
| onaio/rapidpro | temba/channels/migrations/0068_junebug_ussd_channel_type.py | Python | agpl-3.0 | 1,381 |
# Copyright (c) 2011 OpenStack, LLC
# Copyright (c) 2015 Rushil Chugh
# Copyright (c) 2015 Clinton Knight
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Tests For HostManager
"""
import copy
import ddt
import mock
from oslo_config import cfg
from oslo_utils import timeutils
from six import moves
from manila import context
from manila import db
from manila import exception
from manila.scheduler.filters import base_host
from manila.scheduler import host_manager
from manila import test
from manila.tests.scheduler import fakes
from manila import utils
CONF = cfg.CONF
class FakeFilterClass1(base_host.BaseHostFilter):
def host_passes(self, host_state, filter_properties):
pass
class FakeFilterClass2(base_host.BaseHostFilter):
def host_passes(self, host_state, filter_properties):
pass
@ddt.ddt
class HostManagerTestCase(test.TestCase):
"""Test case for HostManager class."""
def setUp(self):
super(HostManagerTestCase, self).setUp()
self.host_manager = host_manager.HostManager()
self.fake_hosts = [host_manager.HostState('fake_host%s' % x)
for x in moves.range(1, 5)]
def test_choose_host_filters_not_found(self):
self.flags(scheduler_default_filters='FakeFilterClass3')
self.host_manager.filter_classes = [FakeFilterClass1,
FakeFilterClass2]
self.assertRaises(exception.SchedulerHostFilterNotFound,
self.host_manager._choose_host_filters, None)
def test_choose_host_filters(self):
self.flags(scheduler_default_filters=['FakeFilterClass2'])
self.host_manager.filter_classes = [FakeFilterClass1,
FakeFilterClass2]
# Test 'share' returns 1 correct function
filter_classes = self.host_manager._choose_host_filters(None)
self.assertEqual(1, len(filter_classes))
self.assertEqual('FakeFilterClass2', filter_classes[0].__name__)
def _verify_result(self, info, result):
for x in info['got_fprops']:
self.assertEqual(info['expected_fprops'], x)
self.assertEqual(set(info['expected_objs']), set(info['got_objs']))
self.assertEqual(set(info['got_objs']), set(result))
def test_get_filtered_hosts(self):
fake_properties = {'moo': 1, 'cow': 2}
info = {
'expected_objs': self.fake_hosts,
'expected_fprops': fake_properties,
}
with mock.patch.object(self.host_manager, '_choose_host_filters',
mock.Mock(return_value=[FakeFilterClass1])):
info['got_objs'] = []
info['got_fprops'] = []
def fake_filter_one(_self, obj, filter_props):
info['got_objs'].append(obj)
info['got_fprops'].append(filter_props)
return True
self.mock_object(FakeFilterClass1, '_filter_one', fake_filter_one)
result = self.host_manager.get_filtered_hosts(self.fake_hosts,
fake_properties)
self._verify_result(info, result)
self.host_manager._choose_host_filters.assert_called_once_with(
mock.ANY)
def test_update_service_capabilities_for_shares(self):
service_states = self.host_manager.service_states
self.assertDictMatch(service_states, {})
host1_share_capabs = dict(free_capacity_gb=4321, timestamp=1)
host2_share_capabs = dict(free_capacity_gb=5432, timestamp=1)
host3_share_capabs = dict(free_capacity_gb=6543, timestamp=1)
service_name = 'share'
with mock.patch.object(timeutils, 'utcnow',
mock.Mock(return_value=31337)):
self.host_manager.update_service_capabilities(
service_name, 'host1', host1_share_capabs)
timeutils.utcnow.assert_called_once_with()
with mock.patch.object(timeutils, 'utcnow',
mock.Mock(return_value=31338)):
self.host_manager.update_service_capabilities(
service_name, 'host2', host2_share_capabs)
timeutils.utcnow.assert_called_once_with()
with mock.patch.object(timeutils, 'utcnow',
mock.Mock(return_value=31339)):
self.host_manager.update_service_capabilities(
service_name, 'host3', host3_share_capabs)
timeutils.utcnow.assert_called_once_with()
# Make sure dictionary isn't re-assigned
self.assertEqual(service_states, self.host_manager.service_states)
# Make sure original dictionary wasn't copied
self.assertEqual(1, host1_share_capabs['timestamp'])
host1_share_capabs['timestamp'] = 31337
host2_share_capabs['timestamp'] = 31338
host3_share_capabs['timestamp'] = 31339
expected = {
'host1': host1_share_capabs,
'host2': host2_share_capabs,
'host3': host3_share_capabs,
}
self.assertDictMatch(service_states, expected)
def test_get_all_host_states_share(self):
fake_context = context.RequestContext('user', 'project')
topic = CONF.share_topic
tmp_pools = copy.deepcopy(fakes.SHARE_SERVICES_WITH_POOLS)
tmp_enable_pools = tmp_pools[:-2]
self.mock_object(
db, 'service_get_all_by_topic',
mock.Mock(return_value=tmp_enable_pools))
self.mock_object(utils, 'service_is_up', mock.Mock(return_value=True))
with mock.patch.dict(self.host_manager.service_states,
fakes.SHARE_SERVICE_STATES_WITH_POOLS):
# Get service
self.host_manager.get_all_host_states_share(fake_context)
# Disabled one service
tmp_enable_pools.pop()
self.mock_object(
db, 'service_get_all_by_topic',
mock.Mock(return_value=tmp_enable_pools))
# Get service again
self.host_manager.get_all_host_states_share(fake_context)
host_state_map = self.host_manager.host_state_map
self.assertEqual(3, len(host_state_map))
# Check that service is up
for i in moves.range(3):
share_node = fakes.SHARE_SERVICES_WITH_POOLS[i]
host = share_node['host']
self.assertEqual(share_node, host_state_map[host].service)
db.service_get_all_by_topic.assert_called_once_with(
fake_context, topic)
def test_get_pools_no_pools(self):
fake_context = context.RequestContext('user', 'project')
self.mock_object(utils, 'service_is_up', mock.Mock(return_value=True))
self.mock_object(
db, 'service_get_all_by_topic',
mock.Mock(return_value=fakes.SHARE_SERVICES_NO_POOLS))
host_manager.LOG.warning = mock.Mock()
with mock.patch.dict(self.host_manager.service_states,
fakes.SERVICE_STATES_NO_POOLS):
res = self.host_manager.get_pools(context=fake_context)
expected = [
{
'name': 'host1#AAA',
'host': 'host1',
'backend': None,
'pool': 'AAA',
'capabilities': {
'timestamp': None,
'share_backend_name': 'AAA',
'free_capacity_gb': 200,
'driver_version': None,
'total_capacity_gb': 512,
'reserved_percentage': 0,
'provisioned_capacity_gb': 312,
'max_over_subscription_ratio': 1.0,
'thin_provisioning': False,
'vendor_name': None,
'storage_protocol': None,
'driver_handles_share_servers': False,
'snapshot_support': False,
'consistency_group_support': False,
'dedupe': False,
'compression': False,
'replication_type': None,
'replication_domain': None,
},
}, {
'name': 'host2@back1#BBB',
'host': 'host2',
'backend': 'back1',
'pool': 'BBB',
'capabilities': {
'timestamp': None,
'share_backend_name': 'BBB',
'free_capacity_gb': 100,
'driver_version': None,
'total_capacity_gb': 256,
'reserved_percentage': 0,
'provisioned_capacity_gb': 400,
'max_over_subscription_ratio': 2.0,
'thin_provisioning': True,
'vendor_name': None,
'storage_protocol': None,
'driver_handles_share_servers': False,
'snapshot_support': True,
'consistency_group_support': False,
'dedupe': False,
'compression': False,
'replication_type': None,
'replication_domain': None,
},
}, {
'name': 'host2@back2#CCC',
'host': 'host2',
'backend': 'back2',
'pool': 'CCC',
'capabilities': {
'timestamp': None,
'share_backend_name': 'CCC',
'free_capacity_gb': 700,
'driver_version': None,
'total_capacity_gb': 10000,
'reserved_percentage': 0,
'provisioned_capacity_gb': 50000,
'max_over_subscription_ratio': 20.0,
'thin_provisioning': True,
'vendor_name': None,
'storage_protocol': None,
'driver_handles_share_servers': False,
'snapshot_support': True,
'consistency_group_support': False,
'dedupe': False,
'compression': False,
'replication_type': None,
'replication_domain': None,
},
},
]
self.assertIsInstance(res, list)
self.assertEqual(len(expected), len(res))
for pool in expected:
self.assertIn(pool, res)
def test_get_pools(self):
fake_context = context.RequestContext('user', 'project')
self.mock_object(utils, 'service_is_up', mock.Mock(return_value=True))
self.mock_object(
db, 'service_get_all_by_topic',
mock.Mock(return_value=fakes.SHARE_SERVICES_WITH_POOLS))
host_manager.LOG.warning = mock.Mock()
with mock.patch.dict(self.host_manager.service_states,
fakes.SHARE_SERVICE_STATES_WITH_POOLS):
res = self.host_manager.get_pools(fake_context)
expected = [
{
'name': 'host1@AAA#pool1',
'host': 'host1',
'backend': 'AAA',
'pool': 'pool1',
'capabilities': {
'pool_name': 'pool1',
'timestamp': None,
'share_backend_name': 'AAA',
'free_capacity_gb': 41,
'driver_version': None,
'total_capacity_gb': 51,
'reserved_percentage': 0,
'provisioned_capacity_gb': 10,
'max_over_subscription_ratio': 1.0,
'thin_provisioning': False,
'vendor_name': None,
'storage_protocol': None,
'driver_handles_share_servers': False,
'snapshot_support': True,
'consistency_group_support': False,
'dedupe': False,
'compression': False,
'replication_type': None,
'replication_domain': None,
},
}, {
'name': 'host2@BBB#pool2',
'host': 'host2',
'backend': 'BBB',
'pool': 'pool2',
'capabilities': {
'pool_name': 'pool2',
'timestamp': None,
'share_backend_name': 'BBB',
'free_capacity_gb': 42,
'driver_version': None,
'total_capacity_gb': 52,
'reserved_percentage': 0,
'provisioned_capacity_gb': 60,
'max_over_subscription_ratio': 2.0,
'thin_provisioning': True,
'vendor_name': None,
'storage_protocol': None,
'driver_handles_share_servers': False,
'snapshot_support': True,
'consistency_group_support': False,
'dedupe': False,
'compression': False,
'replication_type': None,
'replication_domain': None,
},
}, {
'name': 'host3@CCC#pool3',
'host': 'host3',
'backend': 'CCC',
'pool': 'pool3',
'capabilities': {
'pool_name': 'pool3',
'timestamp': None,
'share_backend_name': 'CCC',
'free_capacity_gb': 43,
'driver_version': None,
'total_capacity_gb': 53,
'reserved_percentage': 0,
'provisioned_capacity_gb': 100,
'max_over_subscription_ratio': 20.0,
'thin_provisioning': True,
'vendor_name': None,
'storage_protocol': None,
'driver_handles_share_servers': False,
'snapshot_support': True,
'consistency_group_support': 'pool',
'dedupe': False,
'compression': False,
'replication_type': None,
'replication_domain': None,
},
}, {
'name': 'host4@DDD#pool4a',
'host': 'host4',
'backend': 'DDD',
'pool': 'pool4a',
'capabilities': {
'pool_name': 'pool4a',
'timestamp': None,
'share_backend_name': 'DDD',
'free_capacity_gb': 441,
'driver_version': None,
'total_capacity_gb': 541,
'reserved_percentage': 0,
'provisioned_capacity_gb': 800,
'max_over_subscription_ratio': 2.0,
'thin_provisioning': True,
'vendor_name': None,
'storage_protocol': None,
'driver_handles_share_servers': False,
'snapshot_support': True,
'consistency_group_support': 'host',
'dedupe': False,
'compression': False,
'replication_type': None,
'replication_domain': None,
},
}, {
'name': 'host4@DDD#pool4b',
'host': 'host4',
'backend': 'DDD',
'pool': 'pool4b',
'capabilities': {
'pool_name': 'pool4b',
'timestamp': None,
'share_backend_name': 'DDD',
'free_capacity_gb': 442,
'driver_version': None,
'total_capacity_gb': 542,
'reserved_percentage': 0,
'provisioned_capacity_gb': 2000,
'max_over_subscription_ratio': 10.0,
'thin_provisioning': True,
'vendor_name': None,
'storage_protocol': None,
'driver_handles_share_servers': False,
'snapshot_support': True,
'consistency_group_support': 'host',
'dedupe': False,
'compression': False,
'replication_type': None,
'replication_domain': None,
},
},
]
self.assertIsInstance(res, list)
self.assertIsInstance(self.host_manager.host_state_map, dict)
self.assertEqual(len(expected), len(res))
for pool in expected:
self.assertIn(pool, res)
def test_get_pools_host_down(self):
fake_context = context.RequestContext('user', 'project')
mock_service_is_up = self.mock_object(utils, 'service_is_up')
self.mock_object(
db, 'service_get_all_by_topic',
mock.Mock(return_value=fakes.SHARE_SERVICES_NO_POOLS))
host_manager.LOG.warning = mock.Mock()
with mock.patch.dict(self.host_manager.service_states,
fakes.SERVICE_STATES_NO_POOLS):
# Initialize host data with all services present
mock_service_is_up.side_effect = [True, True, True]
# Call once to update the host state map
self.host_manager.get_pools(fake_context)
self.assertEqual(len(fakes.SHARE_SERVICES_NO_POOLS),
len(self.host_manager.host_state_map))
# Then mock one host as down
mock_service_is_up.side_effect = [True, True, False]
res = self.host_manager.get_pools(fake_context)
expected = [
{
'name': 'host1#AAA',
'host': 'host1',
'backend': None,
'pool': 'AAA',
'capabilities': {
'timestamp': None,
'driver_handles_share_servers': False,
'snapshot_support': False,
'share_backend_name': 'AAA',
'free_capacity_gb': 200,
'driver_version': None,
'total_capacity_gb': 512,
'reserved_percentage': 0,
'vendor_name': None,
'storage_protocol': None,
'provisioned_capacity_gb': 312,
'max_over_subscription_ratio': 1.0,
'thin_provisioning': False,
'consistency_group_support': False,
'dedupe': False,
'compression': False,
'replication_type': None,
'replication_domain': None,
},
}, {
'name': 'host2@back1#BBB',
'host': 'host2',
'backend': 'back1',
'pool': 'BBB',
'capabilities': {
'timestamp': None,
'driver_handles_share_servers': False,
'snapshot_support': True,
'share_backend_name': 'BBB',
'free_capacity_gb': 100,
'driver_version': None,
'total_capacity_gb': 256,
'reserved_percentage': 0,
'vendor_name': None,
'storage_protocol': None,
'provisioned_capacity_gb': 400,
'max_over_subscription_ratio': 2.0,
'thin_provisioning': True,
'consistency_group_support': False,
'dedupe': False,
'compression': False,
'replication_type': None,
'replication_domain': None,
},
},
]
self.assertIsInstance(res, list)
self.assertIsInstance(self.host_manager.host_state_map, dict)
self.assertEqual(len(expected), len(res))
self.assertEqual(len(expected),
len(self.host_manager.host_state_map))
for pool in expected:
self.assertIn(pool, res)
def test_get_pools_with_filters(self):
fake_context = context.RequestContext('user', 'project')
self.mock_object(utils, 'service_is_up', mock.Mock(return_value=True))
self.mock_object(
db, 'service_get_all_by_topic',
mock.Mock(return_value=fakes.SHARE_SERVICES_WITH_POOLS))
host_manager.LOG.warning = mock.Mock()
with mock.patch.dict(self.host_manager.service_states,
fakes.SHARE_SERVICE_STATES_WITH_POOLS):
res = self.host_manager.get_pools(
context=fake_context,
filters={'host': 'host2', 'pool': 'pool*'})
expected = [
{
'name': 'host2@BBB#pool2',
'host': 'host2',
'backend': 'BBB',
'pool': 'pool2',
'capabilities': {
'pool_name': 'pool2',
'timestamp': None,
'driver_handles_share_servers': False,
'snapshot_support': True,
'share_backend_name': 'BBB',
'free_capacity_gb': 42,
'driver_version': None,
'total_capacity_gb': 52,
'reserved_percentage': 0,
'provisioned_capacity_gb': 60,
'max_over_subscription_ratio': 2.0,
'thin_provisioning': True,
'vendor_name': None,
'storage_protocol': None,
'consistency_group_support': False,
'dedupe': False,
'compression': False,
'replication_type': None,
'replication_domain': None,
},
},
]
self.assertEqual(len(expected), len(res))
self.assertEqual(sorted(expected), sorted(res))
@ddt.data(
None,
{},
{'key1': 'value1'},
{'key1': 'value1', 'key2': 'value*'},
{'key1': '.*', 'key2': '.*'},
)
def test_passes_filters_true(self, filter):
data = {'key1': 'value1', 'key2': 'value2', 'key3': 'value3'}
self.assertTrue(self.host_manager._passes_filters(data, filter))
@ddt.data(
{'key1': 'value$'},
{'key4': 'value'},
{'key1': 'value1.+', 'key2': 'value*'},
)
def test_passes_filters_false(self, filter):
data = {'key1': 'value1', 'key2': 'value2', 'key3': 'value3'}
self.assertFalse(self.host_manager._passes_filters(data, filter))
class HostStateTestCase(test.TestCase):
"""Test case for HostState class."""
def test_update_from_share_capability_nopool(self):
fake_context = context.RequestContext('user', 'project', is_admin=True)
share_capability = {'total_capacity_gb': 0,
'free_capacity_gb': 100,
'reserved_percentage': 0,
'timestamp': None}
fake_host = host_manager.HostState('host1', share_capability)
self.assertIsNone(fake_host.free_capacity_gb)
fake_host.update_from_share_capability(share_capability,
context=fake_context)
# Backend level stats remain uninitialized
self.assertEqual(0, fake_host.total_capacity_gb)
self.assertIsNone(fake_host.free_capacity_gb)
# Pool stats has been updated
self.assertEqual(0, fake_host.pools['_pool0'].total_capacity_gb)
self.assertEqual(100, fake_host.pools['_pool0'].free_capacity_gb)
# Test update for existing host state
share_capability.update(dict(total_capacity_gb=1000))
fake_host.update_from_share_capability(share_capability,
context=fake_context)
self.assertEqual(1000, fake_host.pools['_pool0'].total_capacity_gb)
# Test update for existing host state with different backend name
share_capability.update(dict(share_backend_name='magic'))
fake_host.update_from_share_capability(share_capability,
context=fake_context)
self.assertEqual(1000, fake_host.pools['magic'].total_capacity_gb)
self.assertEqual(100, fake_host.pools['magic'].free_capacity_gb)
# 'pool0' becomes nonactive pool, and is deleted
self.assertRaises(KeyError, lambda: fake_host.pools['pool0'])
def test_update_from_share_capability_with_pools(self):
fake_context = context.RequestContext('user', 'project', is_admin=True)
fake_host = host_manager.HostState('host1#pool1')
self.assertIsNone(fake_host.free_capacity_gb)
capability = {
'share_backend_name': 'Backend1',
'vendor_name': 'OpenStack',
'driver_version': '1.1',
'storage_protocol': 'NFS_CIFS',
'pools': [
{'pool_name': 'pool1',
'total_capacity_gb': 500,
'free_capacity_gb': 230,
'allocated_capacity_gb': 270,
'qos': 'False',
'reserved_percentage': 0,
'dying_disks': 100,
'super_hero_1': 'spider-man',
'super_hero_2': 'flash',
'super_hero_3': 'neoncat',
},
{'pool_name': 'pool2',
'total_capacity_gb': 1024,
'free_capacity_gb': 1024,
'allocated_capacity_gb': 0,
'qos': 'False',
'reserved_percentage': 0,
'dying_disks': 200,
'super_hero_1': 'superman',
'super_hero_2': 'Hulk',
}
],
'timestamp': None,
}
fake_host.update_from_share_capability(capability,
context=fake_context)
self.assertEqual('Backend1', fake_host.share_backend_name)
self.assertEqual('NFS_CIFS', fake_host.storage_protocol)
self.assertEqual('OpenStack', fake_host.vendor_name)
self.assertEqual('1.1', fake_host.driver_version)
# Backend level stats remain uninitialized
self.assertEqual(0, fake_host.total_capacity_gb)
self.assertIsNone(fake_host.free_capacity_gb)
# Pool stats has been updated
self.assertEqual(2, len(fake_host.pools))
self.assertEqual(500, fake_host.pools['pool1'].total_capacity_gb)
self.assertEqual(230, fake_host.pools['pool1'].free_capacity_gb)
self.assertEqual(1024, fake_host.pools['pool2'].total_capacity_gb)
self.assertEqual(1024, fake_host.pools['pool2'].free_capacity_gb)
capability = {
'share_backend_name': 'Backend1',
'vendor_name': 'OpenStack',
'driver_version': '1.0',
'storage_protocol': 'NFS_CIFS',
'pools': [
{'pool_name': 'pool3',
'total_capacity_gb': 10000,
'free_capacity_gb': 10000,
'allocated_capacity_gb': 0,
'qos': 'False',
'reserved_percentage': 0,
},
],
'timestamp': None,
}
# test update HostState Record
fake_host.update_from_share_capability(capability,
context=fake_context)
self.assertEqual('1.0', fake_host.driver_version)
# Non-active pool stats has been removed
self.assertEqual(1, len(fake_host.pools))
self.assertRaises(KeyError, lambda: fake_host.pools['pool1'])
self.assertRaises(KeyError, lambda: fake_host.pools['pool2'])
self.assertEqual(10000, fake_host.pools['pool3'].total_capacity_gb)
self.assertEqual(10000, fake_host.pools['pool3'].free_capacity_gb)
def test_update_from_share_unknown_capability(self):
share_capability = {
'total_capacity_gb': 'unknown',
'free_capacity_gb': 'unknown',
'allocated_capacity_gb': 1,
'reserved_percentage': 0,
'timestamp': None
}
fake_context = context.RequestContext('user', 'project', is_admin=True)
fake_host = host_manager.HostState('host1#_pool0')
self.assertIsNone(fake_host.free_capacity_gb)
fake_host.update_from_share_capability(share_capability,
context=fake_context)
# Backend level stats remain uninitialized
self.assertEqual(fake_host.total_capacity_gb, 0)
self.assertIsNone(fake_host.free_capacity_gb)
# Pool stats has been updated
self.assertEqual(fake_host.pools['_pool0'].total_capacity_gb,
'unknown')
self.assertEqual(fake_host.pools['_pool0'].free_capacity_gb,
'unknown')
def test_consume_from_share_capability(self):
fake_context = context.RequestContext('user', 'project', is_admin=True)
share_size = 10
free_capacity = 100
fake_share = {'id': 'foo', 'size': share_size}
share_capability = {
'total_capacity_gb': free_capacity * 2,
'free_capacity_gb': free_capacity,
'reserved_percentage': 0,
'timestamp': None
}
fake_host = host_manager.PoolState('host1', share_capability, '_pool0')
fake_host.update_from_share_capability(share_capability,
context=fake_context)
fake_host.consume_from_share(fake_share)
self.assertEqual(fake_host.free_capacity_gb,
free_capacity - share_size)
def test_consume_from_share_unknown_capability(self):
share_capability = {
'total_capacity_gb': 'unknown',
'free_capacity_gb': 'unknown',
'reserved_percentage': 0,
'timestamp': None
}
fake_context = context.RequestContext('user', 'project', is_admin=True)
fake_host = host_manager.PoolState('host1', share_capability, '_pool0')
share_size = 1000
fake_share = {'id': 'foo', 'size': share_size}
fake_host.update_from_share_capability(share_capability,
context=fake_context)
fake_host.consume_from_share(fake_share)
self.assertEqual(fake_host.total_capacity_gb, 'unknown')
self.assertEqual(fake_host.free_capacity_gb, 'unknown')
def test_consume_from_share_invalid_capacity(self):
fake_host = host_manager.PoolState('host1', {}, '_pool0')
fake_host.free_capacity_gb = 'invalid_foo_string'
self.assertRaises(exception.InvalidCapacity,
fake_host.consume_from_share, 'fake')
def test_repr(self):
capability = {
'share_backend_name': 'Backend1',
'vendor_name': 'OpenStack',
'driver_version': '1.0',
'storage_protocol': 'NFS_CIFS',
'total_capacity_gb': 20000,
'free_capacity_gb': 15000,
'allocated_capacity_gb': 5000,
'timestamp': None,
'reserved_percentage': 0,
}
fake_context = context.RequestContext('user', 'project', is_admin=True)
fake_host = host_manager.HostState('host1')
fake_host.update_from_share_capability(capability,
context=fake_context)
result = fake_host.__repr__()
expected = "host: 'host1', free_capacity_gb: None, " \
"pools: {'Backend1': host: 'host1#Backend1', " \
"free_capacity_gb: 15000, pools: None}"
self.assertEqual(expected, result)
@ddt.ddt
class PoolStateTestCase(test.TestCase):
"""Test case for HostState class."""
@ddt.data(
{
'share_capability':
{'total_capacity_gb': 1024, 'free_capacity_gb': 512,
'reserved_percentage': 0, 'timestamp': None,
'cap1': 'val1', 'cap2': 'val2'},
'instances':
[
{
'id': 1, 'host': 'host1',
'status': 'available',
'share_id': 11, 'size': 4,
'updated_at': timeutils.utcnow()
},
{
'id': 2, 'host': 'host1',
'status': 'available',
'share_id': 12, 'size': None,
'updated_at': timeutils.utcnow()
},
]
},
{
'share_capability':
{'total_capacity_gb': 1024, 'free_capacity_gb': 512,
'reserved_percentage': 0, 'timestamp': None,
'cap1': 'val1', 'cap2': 'val2'},
'instances': []
},
{
'share_capability':
{'total_capacity_gb': 1024, 'free_capacity_gb': 512,
'allocated_capacity_gb': 256, 'reserved_percentage': 0,
'timestamp': None, 'cap1': 'val1', 'cap2': 'val2'},
'instances':
[
{
'id': 1, 'host': 'host1',
'status': 'available',
'share_id': 11, 'size': 4,
'updated_at': timeutils.utcnow()
},
]
},
{
'share_capability':
{'total_capacity_gb': 1024, 'free_capacity_gb': 512,
'allocated_capacity_gb': 256, 'reserved_percentage': 0,
'timestamp': None, 'cap1': 'val1', 'cap2': 'val2'},
'instances': []
},
{
'share_capability':
{'total_capacity_gb': 1024, 'free_capacity_gb': 512,
'provisioned_capacity_gb': 256, 'reserved_percentage': 0,
'timestamp': None, 'cap1': 'val1', 'cap2': 'val2'},
'instances':
[
{
'id': 1, 'host': 'host1',
'status': 'available',
'share_id': 11, 'size': 1,
'updated_at': timeutils.utcnow()
},
]
},
{
'share_capability':
{'total_capacity_gb': 1024, 'free_capacity_gb': 512,
'allocated_capacity_gb': 256, 'provisioned_capacity_gb': 256,
'reserved_percentage': 0, 'timestamp': None, 'cap1': 'val1',
'cap2': 'val2'},
'instances':
[
{
'id': 1, 'host': 'host1',
'status': 'available',
'share_id': 11, 'size': 1,
'updated_at': timeutils.utcnow()
},
]
},
)
@ddt.unpack
def test_update_from_share_capability(self, share_capability, instances):
fake_context = context.RequestContext('user', 'project', is_admin=True)
self.mock_object(
db, 'share_instances_get_all_by_host',
mock.Mock(return_value=instances))
fake_pool = host_manager.PoolState('host1', None, 'pool0')
self.assertIsNone(fake_pool.free_capacity_gb)
fake_pool.update_from_share_capability(share_capability,
context=fake_context)
self.assertEqual('host1#pool0', fake_pool.host)
self.assertEqual('pool0', fake_pool.pool_name)
self.assertEqual(1024, fake_pool.total_capacity_gb)
self.assertEqual(512, fake_pool.free_capacity_gb)
self.assertDictMatch(share_capability, fake_pool.capabilities)
if 'provisioned_capacity_gb' not in share_capability:
db.share_instances_get_all_by_host.assert_called_once_with(
fake_context, fake_pool.host, with_share_data=True)
if len(instances) > 0:
self.assertEqual(4, fake_pool.provisioned_capacity_gb)
else:
self.assertEqual(0, fake_pool.provisioned_capacity_gb)
if 'allocated_capacity_gb' in share_capability:
self.assertEqual(share_capability['allocated_capacity_gb'],
fake_pool.allocated_capacity_gb)
elif 'allocated_capacity_gb' not in share_capability:
self.assertEqual(0, fake_pool.allocated_capacity_gb)
elif 'provisioned_capacity_gb' in share_capability and (
'allocated_capacity_gb' not in share_capability):
self.assertFalse(db.share_instances_get_all_by_host.called)
self.assertEqual(0, fake_pool.allocated_capacity_gb)
self.assertEqual(share_capability['provisioned_capacity_gb'],
fake_pool.provisioned_capacity_gb)
elif 'provisioned_capacity_gb' in share_capability and (
'allocated_capacity_gb' in share_capability):
self.assertFalse(db.share_instances_get_all_by_host.called)
self.assertEqual(share_capability['allocated_capacity_gb'],
fake_pool.allocated_capacity_gb)
self.assertEqual(share_capability['provisioned_capacity_gb'],
fake_pool.provisioned_capacity_gb)
| NetApp/manila | manila/tests/scheduler/test_host_manager.py | Python | apache-2.0 | 39,720 |
import _plotly_utils.basevalidators
class XValidator(_plotly_utils.basevalidators.DataArrayValidator):
def __init__(self, plotly_name="x", parent_name="bar", **kwargs):
super(XValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
anim=kwargs.pop("anim", True),
edit_type=kwargs.pop("edit_type", "calc+clearAxisTypes"),
role=kwargs.pop("role", "data"),
**kwargs
)
| plotly/python-api | packages/python/plotly/plotly/validators/bar/_x.py | Python | mit | 480 |
# -*- coding: utf-8 -*-
#
# Copyright (c) 2014-2015 Université Catholique de Louvain.
#
# This file is part of INGInious.
#
# INGInious is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published
# by the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# INGInious is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public
# License along with INGInious. If not, see <http://www.gnu.org/licenses/>.
""" Modified boxes """
from abc import ABCMeta, abstractmethod
import base64
import json
from common.tasks_code_boxes import TextBox, InputBox, MultilineBox, FileBox
from frontend.base import get_template_renderer
from frontend.parsable_text import ParsableText
class DisplayableBox(object):
""" A basic interface for displayable boxes """
__metaclass__ = ABCMeta
def __str__(self):
""" Get the html to show this box """
return self.show()
def __unicode__(self):
""" Get the html to show this box """
return self.show()
def adapt_input_for_backend(self, input_data):
""" Adapt the input from web.py for the backend """
return input_data
@abstractmethod
def show(self):
""" Get the html to show this box """
pass
class DisplayableTextBox(TextBox, DisplayableBox):
""" A displayable text box """
def __init__(self, problem, boxid, boxData):
TextBox.__init__(self, problem, boxid, boxData)
DisplayableBox.__init__(self)
self._content = ParsableText(self._content, "HTML" if "contentIsHTML" in boxData and boxData["contentIsHTML"] else "rst").parse()
def show(self):
""" Show TextBox """
return str(get_template_renderer('templates/tasks/').box_text(self._content))
class DisplayableFileBox(FileBox, DisplayableBox):
""" A displayable file box """
def adapt_input_for_backend(self, input_data):
try:
input_data[self.get_complete_id()] = {"filename": input_data[self.get_complete_id()].filename,
"value": base64.b64encode(input_data[self.get_complete_id()].value)}
except:
input_data[self.get_complete_id()] = {}
return input_data
def show(self):
""" Show FileBox """
return str(get_template_renderer('templates/tasks/').box_file(self.get_complete_id(), self._max_size, self._allowed_exts, json))
class DisplayableInputBox(InputBox, DisplayableBox):
""" A displayable input box """
def show(self):
""" Show InputBox """
return str(get_template_renderer('templates/tasks/').box_input(self.get_complete_id(), self._input_type, self._max_chars))
class DisplayableMultilineBox(MultilineBox, DisplayableBox):
""" A displayable multiline box """
def show(self):
""" Show MultilineBox """
return str(get_template_renderer('templates/tasks/').box_multiline(self.get_complete_id(), self._lines, self._max_chars, self._language))
| layus/INGInious | frontend/custom/tasks_code_boxes.py | Python | agpl-3.0 | 3,326 |
##############################################################################
# Copyright (c) 2013-2017, Lawrence Livermore National Security, LLC.
# Produced at the Lawrence Livermore National Laboratory.
#
# This file is part of Spack.
# Created by Todd Gamblin, [email protected], All rights reserved.
# LLNL-CODE-647188
#
# For details, see https://github.com/spack/spack
# Please also see the NOTICE and LICENSE files for our notice and the LGPL.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License (as
# published by the Free Software Foundation) version 2.1, February 1999.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and
# conditions of the GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
##############################################################################
from spack import *
class Fastphase(Package):
"""Software for haplotype reconstruction, and estimating missing genotypes
from population data."""
homepage = "http://stephenslab.uchicago.edu/software.html"
url = "http://scheet.org/code/Linuxfp.tar.gz"
version('2016-03-30', 'b48731eed9b8d0a5a321f970c5c20d8c')
def install(self, spec, prefix):
mkdirp(prefix.bin)
install('fastPHASE', prefix.bin)
| skosukhin/spack | var/spack/repos/builtin/packages/fastphase/package.py | Python | lgpl-2.1 | 1,682 |
# -*- coding: UTF-8 -*-
"""Common Python2/3 compatibility functions.
"""
from six import binary_type, string_types, text_type, u
from .constants import PYTHON3
__all__ = __features__ = ["b", "binary_type", "byteindex", "execfile",
"ensure_binary", "ensure_str", "iterbytes",
"string_types", "text_type", "u"]
# see: http://python3porting.com/problems.html
byteindex = lambda d, i=None: d[i] if PYTHON3 else ord(d[i])
def b(s):
"""
Similar to six.b function, because the behavior of 'b' in Python2/3 is not
exactly the same. This makes 'b' behave in Python 3 like in Python 2.
"""
if PYTHON3:
try:
return s.encode("latin-1")
except:
pass
try:
return s.encode("utf-8")
except:
pass
return s
def ensure_binary(s, encoding='utf-8', errors='strict'):
"""
Identical to six.ensure_binary. Copied here to avoid messing up with six
version errors.
"""
if isinstance(s, text_type):
return s.encode(encoding, errors)
elif isinstance(s, binary_type):
return s
else:
raise TypeError("not expecting type '%s'" % type(s))
def ensure_str(s, encoding='utf-8', errors='strict'):
"""
Similar to six.ensure_str. Adapted here to avoid messing up with six version
errors.
"""
if not PYTHON3 and isinstance(s, text_type):
return s.encode(encoding, errors)
elif PYTHON3 and isinstance(s, binary_type):
try:
return s.decode(encoding, errors)
except:
return s.decode("latin-1")
elif not isinstance(s, (text_type, binary_type)):
raise TypeError("not expecting type '%s'" % type(s))
return s
def execfile(source, globals=None, locals=None):
with open(source) as f:
content = f.read()
exec(content, globals, locals)
if PYTHON3:
__all__ += ["execfile"]
def iterbytes(text):
"""
Bytes iterator. If a string is provided, it will automatically be converted
to bytes.
"""
if isinstance(text, string_types):
text = b(text)
for c in text:
yield c if PYTHON3 else ord(c)
| dhondta/tinyscript | tinyscript/helpers/compat.py | Python | agpl-3.0 | 2,210 |
from __future__ import unicode_literals
# -*- coding: utf-8 -*-
#
# pypackager documentation build configuration file, created by
# sphinx-quickstart on Wed May 29 12:34:22 2013.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys, os
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
# -- General configuration -----------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['sphinx.ext.autodoc']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = '{{ package_name }}'
copyright = '{{ year }}, {{ author.name }}'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '0.0'
# The full version, including alpha/beta/rc tags.
release = '0.0.1'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
#keep_warnings = False
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'default'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = '{{ package_name }}doc'
# -- Options for LaTeX output --------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index', '{{ package_name }}.tex', '{{ package_name }} Documentation',
'{{ author.name }}', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output --------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', '{{ package_name }}', u'{{ package_name }} Documentation',
['{{ author.name }}'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output ------------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', '{{ package_name }}', u'{{ package_name }} Documentation',
'{{ auhtor.name }}', '{{ package_name }}', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#texinfo_no_detailmenu = False
| fcurella/python-packager | pypackager/template/docs/conf.py | Python | mit | 8,109 |
# del name
x = 1
print(x)
del x
try:
print(x)
except NameError:
print("NameError")
try:
del x
except: # NameError:
# FIXME uPy returns KeyError for this
print("NameError")
class C:
def f():
pass
| rubencabrera/micropython | tests/basics/del_name.py | Python | mit | 229 |
# Copied from django with some modifications
import copy
class MultiValueDict(dict):
"""
A subclass of dictionary customized to handle multiple values for the
same key.
>>> d = MultiValueDict({'name': ['Adrian', 'Simon'], 'position': ['Developer']})
>>> d['name']
'Simon'
>>> d.getlist('name')
['Adrian', 'Simon']
>>> d.get('lastname', 'nonexistent')
'nonexistent'
>>> d.setlist('lastname', ['Holovaty', 'Willison'])
This class exists to solve the irritating problem raised by cgi.parse_qs,
which returns a list for every key, even though most Web forms submit
single name-value pairs.
"""
def __init__(self, key_to_list_mapping=()):
super(MultiValueDict, self).__init__(key_to_list_mapping)
def __repr__(self):
return "<%s: %s>" % (self.__class__.__name__,
super(MultiValueDict, self).__repr__())
def __getitem__(self, key):
"""
Returns the last data value for this key, or [] if it's an empty list;
raises KeyError if not found.
"""
try:
list_ = super(MultiValueDict, self).__getitem__(key)
except KeyError:
raise MultiValueDictKeyError("Key %r not found in %r" % (key, self))
try:
return list_[-1]
except IndexError:
return []
def __setitem__(self, key, value):
super(MultiValueDict, self).__setitem__(key, [value])
def __copy__(self):
return self.__class__([
(k, v[:])
for k, v in self.lists()
])
def __deepcopy__(self, memo=None):
if memo is None:
memo = {}
result = self.__class__()
memo[id(self)] = result
for key, value in dict.items(self):
dict.__setitem__(result, copy.deepcopy(key, memo),
copy.deepcopy(value, memo))
return result
def __getstate__(self):
obj_dict = self.__dict__.copy()
obj_dict['_data'] = dict([(k, self.getlist(k)) for k in self])
return obj_dict
def __setstate__(self, obj_dict):
data = obj_dict.pop('_data', {})
for k, v in data.items():
self.setlist(k, v)
self.__dict__.update(obj_dict)
def get(self, key, default=None):
"""
Returns the last data value for the passed key. If key doesn't exist
or value is an empty list, then default is returned.
"""
try:
val = self[key]
except KeyError:
return default
if val == []:
return default
return val
def getlist(self, key):
"""
Returns the list of values for the passed key. If key doesn't exist,
then an empty list is returned.
"""
try:
return super(MultiValueDict, self).__getitem__(key)
except KeyError:
return []
def setlist(self, key, list_):
super(MultiValueDict, self).__setitem__(key, list_)
def setdefault(self, key, default=None):
if key not in self:
self[key] = default
return self[key]
def setlistdefault(self, key, default_list=()):
if key not in self:
self.setlist(key, default_list)
return self.getlist(key)
def appendlist(self, key, value):
"""Appends an item to the internal list associated with key."""
self.setlistdefault(key, [])
super(MultiValueDict, self).__setitem__(key, self.getlist(key) + [value])
def items(self):
"""
Returns a list of (key, value) pairs, where value is the last item in
the list associated with the key.
"""
return [(key, self[key]) for key in self.keys()]
def iteritems(self):
"""
Yields (key, value) pairs, where value is the last item in the list
associated with the key.
"""
for key in self.keys():
yield (key, self[key])
def lists(self):
"""Returns a list of (key, list) pairs."""
return super(MultiValueDict, self).items()
def iterlists(self):
"""Yields (key, list) pairs."""
return super(MultiValueDict, self).iteritems()
def values(self):
"""Returns a list of the last value on every key list."""
return [self[key] for key in self.keys()]
def itervalues(self):
"""Yield the last value on every key list."""
for key in self.iterkeys():
yield self[key]
def copy(self):
"""Returns a shallow copy of this object."""
return copy(self)
def update(self, *args, **kwargs):
"""
update() extends rather than replaces existing key lists.
Also accepts keyword args.
"""
if len(args) > 1:
raise TypeError("update expected at most 1 arguments, got %d" % len(args))
if args:
other_dict = args[0]
if isinstance(other_dict, MultiValueDict):
for key, value_list in other_dict.lists():
self.setlistdefault(key, []).extend(value_list)
else:
try:
for key, value in other_dict.items():
self.setlistdefault(key, []).append(value)
except TypeError:
raise ValueError("MultiValueDict.update() takes either a MultiValueDict or dictionary")
for key, value in kwargs.iteritems():
self.setlistdefault(key, []).append(value)
| RaceList/tours | util.py | Python | mit | 5,548 |
import os
import codecs
from setuptools import setup
def read(*parts):
filename = os.path.join(os.path.dirname(__file__), *parts)
with codecs.open(filename, encoding='utf-8') as fp:
return fp.read()
setup(
name="django-queued-storage",
use_scm_version=True,
setup_requires=['setuptools_scm'],
url='https://github.com/jazzband/django-queued-storage',
license='BSD',
description="Queued remote storage for Django.",
long_description=read('README.rst'),
author='Jannis Leidel',
author_email='[email protected]',
packages=['queued_storage'],
classifiers=[
'Development Status :: 4 - Beta',
'Framework :: Django',
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
'Topic :: Utilities',
],
install_requires=[
'six>=1.10.0',
'django-celery>=3.1,<3.2',
'django-appconf >= 0.4',
],
zip_safe=False,
)
| melfelr/django-queued-storage | setup.py | Python | bsd-3-clause | 1,198 |
"""Functions for handling encoded word headers.
Copyright 2012 Steve Jones
This file is part of mdtools.
mdtools is free software: you can redistribute it and/or modify it under the
terms of the GNU General Public License as published by the Free Software
Foundation, either version 3 of the License, or (at your option) any later
version.
mdtools is distributed in the hope that it will be useful, but WITHOUT ANY
WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR
A PARTICULAR PURPOSE. See the GNU General Public License for more details.
You should have received a copy of the GNU General Public License along with
mdtools. If not, see <http://www.gnu.org/licenses/>.
"""
import re
from base64 import b64decode
from quopri import decodestring as qpdecode
encoded_word_re = re.compile("=\\?([^?]+)\\?([qQbB])\\?([^?]+)\\?=")
def replace_encoded_word(match):
charset = match.group(1)
encoding = match.group(2).upper()
body = match.group(3)
if encoding == "B":
if len(body) % 4 != 0:
body += "=" * (4 - (len(body) % 4))
return b64decode(body).decode(charset)
elif encoding == "Q":
return qpdecode(body).decode(charset)
else:
raise Exception("Not implemented")
newline_re = re.compile("\r?\n\s(\s*)")
def decode_encoded_word(encoded):
encoded = newline_re.sub("\\1", encoded)
encoded = encoded_word_re.sub(replace_encoded_word, encoded)
if isinstance(encoded, str):
encoded = encoded.decode("ascii", "replace")
return encoded
| SteveJones/mdtools | encoded_word.py | Python | gpl-3.0 | 1,556 |
from decimal import Decimal
from django.apps import apps
from django.core import checks
from django.db import models
from django.test import TestCase, skipIfDBFeature
from django.test.utils import isolate_apps
from .models import Bar, FkToChar, Foo, PrimaryKeyCharModel
class ForeignKeyTests(TestCase):
def test_callable_default(self):
"""A lazy callable may be used for ForeignKey.default."""
a = Foo.objects.create(id=1, a='abc', d=Decimal('12.34'))
b = Bar.objects.create(b='bcd')
self.assertEqual(b.a, a)
@skipIfDBFeature('interprets_empty_strings_as_nulls')
def test_empty_string_fk(self):
"""
Empty strings foreign key values don't get converted to None (#19299).
"""
char_model_empty = PrimaryKeyCharModel.objects.create(string='')
fk_model_empty = FkToChar.objects.create(out=char_model_empty)
fk_model_empty = FkToChar.objects.select_related('out').get(id=fk_model_empty.pk)
self.assertEqual(fk_model_empty.out, char_model_empty)
@isolate_apps('model_fields')
def test_warning_when_unique_true_on_fk(self):
class Foo(models.Model):
pass
class FKUniqueTrue(models.Model):
fk_field = models.ForeignKey(Foo, models.CASCADE, unique=True)
model = FKUniqueTrue()
expected_warnings = [
checks.Warning(
'Setting unique=True on a ForeignKey has the same effect as using a OneToOneField.',
hint='ForeignKey(unique=True) is usually better served by a OneToOneField.',
obj=FKUniqueTrue.fk_field.field,
id='fields.W342',
)
]
warnings = model.check()
self.assertEqual(warnings, expected_warnings)
def test_related_name_converted_to_text(self):
rel_name = Bar._meta.get_field('a').remote_field.related_name
self.assertIsInstance(rel_name, str)
def test_abstract_model_pending_operations(self):
"""
Foreign key fields declared on abstract models should not add lazy
relations to resolve relationship declared as string (#24215).
"""
pending_ops_before = list(apps._pending_operations.items())
class AbstractForeignKeyModel(models.Model):
fk = models.ForeignKey('missing.FK', models.CASCADE)
class Meta:
abstract = True
self.assertIs(AbstractForeignKeyModel._meta.apps, apps)
self.assertEqual(
pending_ops_before,
list(apps._pending_operations.items()),
'Pending lookup added for a foreign key on an abstract model'
)
@isolate_apps('model_fields', 'model_fields.tests')
def test_abstract_model_app_relative_foreign_key(self):
class AbstractReferent(models.Model):
reference = models.ForeignKey('Referred', on_delete=models.CASCADE)
class Meta:
app_label = 'model_fields'
abstract = True
def assert_app_model_resolved(label):
class Referred(models.Model):
class Meta:
app_label = label
class ConcreteReferent(AbstractReferent):
class Meta:
app_label = label
self.assertEqual(ConcreteReferent._meta.get_field('reference').related_model, Referred)
assert_app_model_resolved('model_fields')
assert_app_model_resolved('tests')
@isolate_apps('model_fields')
def test_to_python(self):
class Foo(models.Model):
pass
class Bar(models.Model):
fk = models.ForeignKey(Foo, models.CASCADE)
self.assertEqual(Bar._meta.get_field('fk').to_python('1'), 1)
| reinout/django | tests/model_fields/test_foreignkey.py | Python | bsd-3-clause | 3,762 |
import os
import logging
from collections import defaultdict
from fs.errors import ResourceNotFound
from lml.plugin import PluginManager
from moban import constants, exceptions
from moban.core import utils
from moban.externals import reporter, file_system
from moban.deprecated import deprecated_moban_path_notation
from moban.core.context import Context
from moban.core.strategy import Strategy
from moban.core.hashstore import HASH_STORE
from moban.core.definitions import TemplateTarget
from moban.externals.buffered_writer import BufferedWriter
LOG = logging.getLogger(__name__)
class MobanFactory(PluginManager):
def __init__(self):
super(MobanFactory, self).__init__(constants.TEMPLATE_ENGINE_EXTENSION)
self.extensions = defaultdict(set)
self.options_registry = {}
def register_extensions(self, extensions):
for user_template_type in extensions.keys():
template_type = self.get_primary_key(user_template_type)
LOG.debug(
"Registering extensions: {0}={1}".format(
user_template_type, extensions[user_template_type]
)
)
if template_type in self.extensions:
self.extensions[template_type] = self.extensions[
user_template_type
].union(extensions[user_template_type])
else:
self.extensions[template_type] = extensions[user_template_type]
def register_options(self, template_types):
# need the value of 'template_types'
# see test_get_user_defined_engine for help
self.options_registry.update(template_types)
def get_engine(self, template_type, template_dirs, context_dirs):
template_dirs = list(expand_template_directories(template_dirs))
template_dirs = utils.verify_the_existence_of_directories(
template_dirs
)
if template_type in self.options_registry:
custom_engine_spec = self.options_registry[template_type]
engine_cls = self.load_me_now(
custom_engine_spec[constants.TEMPLATE_TYPES_BASE_TYPE]
)
options = custom_engine_spec[constants.TEMPLATE_TYPES_OPTIONS]
else:
engine_cls = self.load_me_now(template_type)
engine_extensions = self.extensions.get(template_type)
if engine_extensions:
options = dict(extensions=list(engine_extensions))
else:
options = dict()
template_fs = file_system.get_multi_fs(template_dirs)
engine = engine_cls(template_fs, options)
return MobanEngine(template_fs, context_dirs, engine)
def get_primary_key(self, template_type):
for key, item in self.options_registry.items():
if template_type in item[constants.TEMPLATE_TYPES_FILE_EXTENSIONS]:
return key
return super(MobanFactory, self).get_primary_key(template_type)
def all_types(self):
return list(self.registry.keys()) + list(self.options_registry.keys())
def raise_exception(self, key):
raise exceptions.NoThirdPartyEngine(key)
class MobanEngine(object):
def __init__(self, template_fs, context_dirs, engine):
context_dirs = expand_template_directory(context_dirs)
self.context = Context(context_dirs)
self.template_fs = template_fs
self.engine = engine
self.templated_count = 0
self.file_count = 0
self.buffered_writer = BufferedWriter()
self.engine_action = getattr(
engine,
"ACTION_IN_PRESENT_CONTINUOUS_TENSE",
constants.LABEL_MOBAN_ACTION_IN_PRESENT_CONTINUOUS_TENSE,
)
self.engine_actioned = getattr(
engine,
"ACTION_IN_PAST_TENSE",
constants.LABEL_MOBAN_ACTION_IN_PAST_TENSE,
)
self.fall_out_targets = []
def report(self):
if self.templated_count == 0:
reporter.report_no_action()
elif self.templated_count == self.file_count:
reporter.report_full_run(self.engine_actioned, self.file_count)
else:
reporter.report_partial_run(
self.engine_actioned, self.templated_count, self.file_count
)
def number_of_templated_files(self):
return self.templated_count
def render_to_file(self, template_file, data_file, output_file):
data = self.context.get_data(data_file)
template = self.engine.get_template(template_file)
try:
template_abs_path = self.template_fs.geturl(
template_file, purpose="fs"
)
except ResourceNotFound:
template_abs_path = template_file
flag = self.apply_template(
template_abs_path, template, data, output_file
)
if flag:
reporter.report_templating(
self.engine_action, template_file, output_file
)
self.templated_count += 1
self.file_count += 1
self.buffered_writer.close()
def render_string_to_file(
self, template_in_string, data_file, output_file
):
template = self.engine.get_template_from_string(template_in_string)
template_abs_path = f"{template_in_string[:10]}..."
data = self.context.get_data(data_file)
flag = self.apply_template(
template_abs_path, template, data, output_file
)
if flag:
reporter.report_templating(
self.engine_action, template_abs_path, output_file
)
self.templated_count += 1
self.file_count += 1
self.buffered_writer.close()
def apply_template(self, template_abs_path, template, data, output_file):
# render the content
rendered_content = self.engine.apply_template(
template, data, output_file
)
# convert to utf8 if not already
if not isinstance(rendered_content, bytes):
rendered_content = rendered_content.encode("utf-8")
# attempt to output to the file and printing to stdout instead
# if not found
try:
# check if any of the files have changed
flag = HASH_STORE.is_file_changed(
output_file, rendered_content, template_abs_path
)
# if they have re-render things
if flag:
# write the content to the output file
self.buffered_writer.write_file_out(
output_file, rendered_content
)
# attempt to copy the file permissions of the template
# file to the output file
# if it isn't an archive proceed or stdout
if (
not file_system.is_zip_alike_url(output_file)
and output_file != "-"
):
try:
file_system.file_permissions_copy(
template_abs_path, output_file
)
except exceptions.NoPermissionsNeeded:
# HttpFs does not have getsyspath
# zip, tar have no permission
# win32 does not work
pass
return flag
except exceptions.FileNotFound:
# the template is a string from command line
LOG.info(f"{template_abs_path} is not a file")
self.buffered_writer.write_file_out(output_file, rendered_content)
return True
def render_to_files(self, array_of_template_targets):
sta = Strategy(array_of_template_targets)
sta.process()
choice = sta.what_to_do()
if choice == Strategy.DATA_FIRST:
self._render_with_finding_data_first(sta.data_file_index)
else:
self._render_with_finding_template_first(sta.template_file_index)
self.buffered_writer.close()
def _render_with_finding_template_first(self, template_file_index):
for (template_file, data_output_pairs) in template_file_index.items():
try:
template = self.engine.get_template(template_file)
template_abs_path = self.template_fs.geturl(
template_file, purpose="fs"
)
for (data_file, output) in data_output_pairs:
data = self.context.get_data(data_file)
flag = self.apply_template(
template_abs_path, template, data, output
)
if flag:
reporter.report_templating(
self.engine_action, template_file, output
)
self.templated_count += 1
self.file_count += 1
except exceptions.PassOn as e:
LOG.info(e)
for (data_file, output) in data_output_pairs:
self.fall_out_targets.append(
TemplateTarget(
template_file,
data_file,
output,
template_type=constants.TEMPLATE_COPY,
)
)
reporter.report_info_message(
f"{self.engine_action} is switched to copy:"
+ f" {template_file} to {output}"
)
continue
def _render_with_finding_data_first(self, data_file_index):
for (data_file, template_output_pairs) in data_file_index.items():
data = self.context.get_data(data_file)
for (template_file, output) in template_output_pairs:
try:
template = self.engine.get_template(template_file)
if isinstance(template, bool):
if template:
reporter.report_templating(
self.engine_action, template_file, None
)
self.templated_count += 1
else:
template_abs_path = self.template_fs.geturl(
template_file, purpose="fs"
)
flag = self.apply_template(
template_abs_path, template, data, output
)
if flag:
reporter.report_templating(
self.engine_action, template_file, output
)
self.templated_count += 1
self.file_count += 1
except exceptions.PassOn:
self.fall_out_targets.append(
TemplateTarget(
template_file,
data_file,
output,
template_type=constants.TEMPLATE_COPY,
)
)
reporter.report_info_message(
f"{self.engine_action} is switched to copy:"
+ f" {template_file} to {output}"
)
continue
def expand_template_directories(dirs):
LOG.debug(f"Expanding {dirs}...")
if not isinstance(dirs, list):
dirs = [dirs]
for directory in dirs:
yield expand_template_directory(directory)
def expand_template_directory(directory):
LOG.debug(f"Expanding {directory}...")
translated_directory = None
if ":" in directory and directory[1] != ":" and "://" not in directory:
translated_directory = deprecated_moban_path_notation(directory)
elif "://" in directory:
translated_directory = directory
else:
# local template path
translated_directory = os.path.normcase(os.path.abspath(directory))
translated_directory = file_system.fs_url(translated_directory)
return translated_directory
| chfw/moban | moban/core/moban_factory.py | Python | mit | 12,277 |
#!/usr/bin/env python
import ConfigParser, os, smtplib, time
try:
from email.mime.text import MIMEText
except ImportError:
from email.MIMEText import MIMEText
config = ConfigParser.ConfigParser({'expectedRunningTime' : '-1', 'mailServer' : '', 'runningTimeAllowedDelay' : '0'})
config.read([ os.path.join(os.path.expanduser('~'), '.pygrrc'), os.path.join(os.path.expanduser('~'), 'pygr.cfg'), '.pygrrc', 'pygr.cfg' ])
expectedRunningTime = config.get('megatests', 'expectedRunningTime')
logdir = config.get('megatests', 'logDir')
mailsender = config.get('megatests', 'mailFrom')
mailserver = config.get('megatests', 'mailServer')
maillist_fail = config.get('megatests', 'mailTo_failed')
maillist_pass = config.get('megatests', 'mailTo_ok')
runningTimeAllowedDelay = config.get('megatests', 'runningTimeAllowedDelay')
timeStr = time.ctime()
dateStr = ' '.join([ix for ix in timeStr.split(' ') if ':' not in ix])
# Gather the runner script's output
os.chdir(logdir)
sendStr = 'MEGATEST report, generated ' + timeStr + '\n\n'
sendStr += 'Test started: ' + open('tmp1_megatest.log', 'r').readlines()[0]
sendStr += 'PYTHONPATH = ' + open('tmp3_megatest.log', 'r').read() + '\n'
sendStr += 'Output of standard tests:\n' + ''.join(open('tmp2_megatest.log', 'r').readlines()[-5:]) + '\n\n'
sendStr += 'Output of megatests:\n' + ''.join(open('tmp4_megatest.log', 'r').readlines()[-5:]) + '\n\n'
sendStr += 'Test finished: ' + open('tmp5_megatest.log', 'r').readlines()[0] + '\n'
# Try to determine whether the test has failed or not
nError = 0
abnormalStop = 0
# Compare running time with expectations, mark test as failed if it took
# significantly longer than it should (some latitude is given to account
# for fluctuations due to machine/network/... load).
# Unlike later on, increment abnormalStop first and decrement it in case
# of failure - it's cleaner than the other way around.
abnormalStop += 1
expectedRunningTime = float(expectedRunningTime)
if expectedRunningTime >= 0.:
startTime = int(open('tmp1_megatest.log', 'r').readlines()[1].split(':')[1].strip())
endTime = int(open('tmp5_megatest.log', 'r').readlines()[1].split(':')[1].strip())
if runningTimeAllowedDelay[-1] == '%':
maxRunningTime = expectedRunningTime * (1 + float(runningTimeAllowedDelay[:-1]) / 100.)
else:
maxRunningTime = expectedRunningTime + float(runningTimeAllowedDelay)
runMinutes = (endTime - startTime) / 60.
if runMinutes > maxRunningTime:
sendStr += '\n#####################################################################\n'
sendStr += ('ERROR: megatests took %s minutes to complete, expected %s minutes' % (runMinutes, expectedRunningTime))
sendStr += '\n#####################################################################\n'
abnormalStop -= 1
for lines in sendStr.splitlines():
# Standard-test output
if lines[:4] == 'INFO' and 'passed' in lines and 'failed' in lines and 'skipped' in lines:
nError += int(lines[18:].split(',')[1].strip().split(' ')[0])
abnormalStop += 1
# Megatest output
if lines[:6] == 'FINAL:':
nError += int(lines[7:30].split(' ')[0])
abnormalStop += 1
if nError == 0 and abnormalStop == 3:
maillist = maillist_pass
else:
maillist = maillist_fail
# Create and send the message
msg = MIMEText(sendStr)
msg['From'] = mailsender
msg['To'] = maillist
msg['Subject'] = 'Megatest on ' + dateStr + ' with ' + str(nError) + ' Errors'
s = smtplib.SMTP(mailserver)
s.connect()
s.sendmail(mailsender, maillist.replace(',', ' ').split(), msg.as_string())
s.close()
| ctb/pygr | tests/tools/send_megatest_email.py | Python | bsd-3-clause | 3,607 |
###
# Use functions that contain SELECT. INSERT, UPDATE, DELETE
# on a table for which the USER has GRANTs (possible).
###
import os, sys
try:
from MonetDBtesting import process
except ImportError:
import process
clt = process.client('sql', user = 'my_user', passwd = 'p1',
stdin = open(os.path.join(os.getenv('RELSRCDIR'), os.pardir, 'test_privs2.sql')),
stdout = process.PIPE, stderr = process.PIPE)
out, err = clt.communicate()
sys.stdout.write(out)
sys.stderr.write(err)
| zyzyis/monetdb | sql/test/Users/Tests/test_privs2_p1.SQL.py | Python | mpl-2.0 | 527 |
# coding=utf-8
"""Episode classes."""
from __future__ import unicode_literals
import logging
import os.path
import re
import time
import traceback
import warnings
from builtins import str
from datetime import date, datetime
import knowit
from medusa import (
app,
db,
helpers,
network_timezones,
notifiers,
post_processor,
subtitles,
)
from medusa.common import (
ARCHIVED,
DOWNLOADED,
FAILED,
NAMING_DUPLICATE,
NAMING_EXTEND,
NAMING_LIMITED_EXTEND,
NAMING_LIMITED_EXTEND_E_LOWER_PREFIXED,
NAMING_LIMITED_EXTEND_E_UPPER_PREFIXED,
NAMING_SEPARATED_REPEAT,
Quality,
SKIPPED,
SNATCHED,
SNATCHED_BEST,
SNATCHED_PROPER,
UNAIRED,
UNSET,
WANTED,
statusStrings,
)
from medusa.helper.common import (
dateFormat,
dateTimeFormat,
episode_num,
remove_extension,
replace_extension,
sanitize_filename,
try_int,
)
from medusa.helper.exceptions import (
EpisodeDeletedException,
EpisodeNotFoundException,
MultipleEpisodesInDatabaseException,
NoNFOException,
ex,
)
from medusa.indexers.api import indexerApi
from medusa.indexers.config import indexerConfig
from medusa.indexers.exceptions import (
IndexerEpisodeNotFound,
IndexerError,
IndexerSeasonNotFound,
)
from medusa.logger.adapters.style import BraceAdapter
from medusa.name_parser.parser import (
InvalidNameException,
InvalidShowException,
NameParser,
)
from medusa.sbdatetime import sbdatetime
from medusa.scene_numbering import (
get_scene_absolute_numbering,
get_scene_numbering,
)
from medusa.search.queue import FailedQueueItem
from medusa.tv.base import Identifier, TV
from six import itervalues, viewitems
try:
import xml.etree.cElementTree as ETree
except ImportError:
import xml.etree.ElementTree as ETree
log = BraceAdapter(logging.getLogger(__name__))
log.logger.addHandler(logging.NullHandler())
class EpisodeNumber(Identifier):
"""Episode Number: season/episode, absolute or air by date."""
date_fmt = '%Y-%m-%d'
regex = re.compile(r'\b(?:(?P<air_date>\d{4}-\d{2}-\d{2})|'
r'(?:s(?P<season>\d{1,4}))(?:e(?P<episode>\d{1,4}))|'
r'(?:e(?P<abs_episode>\d{1,4})))\b', re.IGNORECASE)
@classmethod
def from_slug(cls, slug):
"""Create episode number from slug. E.g.: s01e02."""
match = cls.regex.match(slug)
if match:
try:
result = {k: int(v) if k != 'air_date' else datetime.strptime(v, cls.date_fmt)
for k, v in viewitems(match.groupdict()) if v is not None}
if result:
if 'air_date' in result:
return AirByDateNumber(**result)
if 'season' in result and 'episode' in result:
return RelativeNumber(**result)
if 'abs_episode' in result:
return AbsoluteNumber(**result)
except ValueError:
pass
class RelativeNumber(Identifier):
"""Regular episode number: season and episode."""
def __init__(self, season, episode):
"""Constructor.
:param season:
:type season: int
:param episode:
:type episode: int
"""
self.season = season
self.episode = episode
def __bool__(self):
"""Magic method."""
return self.season is not None and self.episode is not None
def __repr__(self):
"""Magic method."""
return '<RelativeNumber [s{0:02d}e{1:02d}]>'.format(self.season, self.episode)
def __str__(self):
"""Magic method."""
return 's{0:02d}e{1:02d}'.format(self.season, self.episode)
def __hash__(self):
"""Magic method."""
return hash((self.season, self.episode))
def __eq__(self, other):
"""Magic method."""
return isinstance(other, RelativeNumber) and (
self.season == other.season and self.episode == other.episode)
class AbsoluteNumber(EpisodeNumber):
"""Episode number class that handles absolute episode numbers."""
def __init__(self, abs_episode):
"""Constructor.
:param abs_episode:
:type abs_episode: int
"""
self.episode = abs_episode
def __bool__(self):
"""Magic method."""
return self.episode is not None
def __repr__(self):
"""Magic method."""
return '<AbsoluteNumber [e{0:02d}]>'.format(self.episode)
def __str__(self):
"""Magic method."""
return 'e{0:02d}'.format(self.episode)
def __hash__(self):
"""Magic method."""
return hash(self.episode)
def __eq__(self, other):
"""Magic method."""
return isinstance(other, AbsoluteNumber) and self.episode == other.episode
class AirByDateNumber(EpisodeNumber):
"""Episode number class that handles air-by-date episode numbers."""
def __init__(self, air_date):
"""Constructor.
:param air_date:
:type air_date: datetime
"""
self.air_date = air_date
def __bool__(self):
"""Magic method."""
return self.air_date is not None
def __repr__(self):
"""Magic method."""
return '<AirByDateNumber [{0!r}]>'.format(self.air_date)
def __str__(self):
"""Magic method."""
return self.air_date.strftime(self.date_fmt)
def __hash__(self):
"""Magic method."""
return hash(self.air_date)
def __eq__(self, other):
"""Magic method."""
return isinstance(other, AirByDateNumber) and self.air_date == other.air_date
class Episode(TV):
"""Represent a TV Show episode."""
__refactored = {
'show': 'series',
}
def __init__(self, series, season, episode, filepath=''):
"""Instantiate a Episode with database information."""
super(Episode, self).__init__(
int(series.indexer) if series else 0,
int(series.indexerid) if series else 0,
{'series', 'related_episodes', 'wanted_quality'})
self.series = series
self.name = ''
self.season = season
self.episode = episode
self.slug = 's{season:02d}e{episode:02d}'.format(season=self.season, episode=self.episode)
self.absolute_number = 0
self.description = ''
self.subtitles = []
self.subtitles_searchcount = 0
self.subtitles_lastsearch = str(datetime.min)
self.airdate = date.fromordinal(1)
self.hasnfo = False
self.hastbn = False
self._status = UNSET
self.quality = Quality.NA
self.file_size = 0
self.release_name = ''
self.is_proper = False
self.version = 0
self.release_group = ''
self._location = filepath
self._scene_season = None
self._scene_episode = 0
self._scene_absolute_number = 0
self.manually_searched = False
self.related_episodes = []
self.wanted_quality = []
self.watched = False
if series:
self._specify_episode(self.season, self.episode)
self.check_for_meta_files()
def __getattr__(self, item):
"""Get attribute values for deprecated attributes."""
try:
return super(Episode, self).__getattribute__(item)
except AttributeError as error:
try:
refactor = self.__refactored[item]
except KeyError:
raise error
else:
warnings.warn(
'{item} is deprecated, use {refactor} instead \n{trace}'.format(
item=item, refactor=refactor, trace=traceback.print_stack(),
),
DeprecationWarning
)
return super(Episode, self).__getattribute__(refactor)
def __eq__(self, other):
"""Override default equalize implementation."""
return all([self.series.identifier == other.series.identifier,
self.season == other.season,
self.episode == other.episode])
@classmethod
def find_by_series_and_episode(cls, series, episode_number):
"""Find Episode based on series and episode number.
:param series:
:type series: medusa.tv.series.Series
:param episode_number:
:type episode_number: EpisodeNumber
:return:
:rtype: medusa.tv.Episode
"""
if isinstance(episode_number, RelativeNumber):
episode = series.get_episode(season=episode_number.season, episode=episode_number.episode)
elif isinstance(episode_number, AbsoluteNumber):
episode = series.get_episode(absolute_number=episode_number.episode)
elif isinstance(episode_number, AirByDateNumber):
episode = series.get_episode(air_date=episode_number.air_date)
else:
# if this happens then it's a bug!
raise ValueError
if episode:
return episode
@staticmethod
def from_filepath(filepath):
"""Return an Episode for the given filepath.
IMPORTANT: The filepath is not kept in the Episode.location
Episode.location should only be set after it's post-processed and it's in the correct location.
As of now, Episode is also not cached in Series.episodes since this method is only used during postpone PP.
Goal here is to slowly move to use this method to create TVEpisodes. New parameters might be introduced.
:param filepath:
:type filepath: str
:return:
:rtype: Episode
"""
try:
parse_result = NameParser().parse(filepath, cache_result=True)
results = []
if parse_result.series.is_anime and parse_result.ab_episode_numbers:
episodes = (parse_result.series.get_episode(absolute_number=episode_number, should_cache=False)
for episode_number in parse_result.ab_episode_numbers)
results = [ep for ep in episodes if ep is not None]
if not parse_result.series.is_anime and parse_result.episode_numbers:
episodes = (parse_result.series.get_episode(season=parse_result.season_number,
episode=episode_number, should_cache=False)
for episode_number in parse_result.episode_numbers)
results = [ep for ep in episodes if ep is not None]
for episode in results:
episode.related_episodes = list(results[1:])
return episode # only root episode has related_episodes
except (InvalidNameException, InvalidShowException):
log.warning('Cannot create Episode from path {path}',
{'path': filepath})
@property
def identifier(self):
"""Return the episode identifier.
:return:
:rtype: string
"""
if self.series.air_by_date and self.airdate != date.fromordinal(1):
return self.airdate.strftime(dateFormat)
if self.series.is_anime and self.absolute_number is not None:
return 'e{0:02d}'.format(self.absolute_number)
return 's{0:02d}e{1:02d}'.format(self.season, self.episode)
@property
def location(self):
"""Return the location.
:return:
:rtype: location
"""
return self._location
@location.setter
def location(self, value):
old_location = os.path.normpath(self._location)
new_location = os.path.normpath(value)
if value and self.is_location_valid(new_location):
self.file_size = os.path.getsize(new_location)
else:
self._location = ''
self.file_size = 0
return
if new_location == old_location:
return
log.debug('{id}: Setter sets location to {location}',
{'id': self.series.series_id, 'location': new_location})
self._location = new_location
@property
def indexer_name(self):
"""Return the indexer name identifier. Example: tvdb."""
return indexerConfig[self.indexer].get('identifier')
@property
def air_date(self):
"""Return air date from the episode."""
if self.airdate == date.min:
return None
date_parsed = sbdatetime.convert_to_setting(
network_timezones.parse_date_time(
date.toordinal(self.airdate),
self.series.airs,
self.series.network)
)
return date_parsed.isoformat()
@property
def status(self):
"""Return the episode status."""
return self._status
@status.setter
def status(self, value):
self._status = value
@property
def status_name(self):
"""Return the status name."""
return statusStrings[self.status]
@property
def scene_season(self):
"""Return the scene season."""
if self._scene_season is None:
return self.season
return self._scene_season
@scene_season.setter
def scene_season(self, value):
"""Set the scene season."""
self._scene_season = try_int(value, None)
@property
def scene_episode(self):
"""Return the scene episode."""
if not self._scene_episode:
return self.episode
return self._scene_episode
@scene_episode.setter
def scene_episode(self, value):
"""Set the scene episode."""
self._scene_episode = try_int(value, 0)
@property
def scene_absolute_number(self):
"""Return the scene absolute number."""
if not self._scene_absolute_number:
return self.absolute_number
return self._scene_absolute_number
@scene_absolute_number.setter
def scene_absolute_number(self, value):
"""Set the scene absolute number."""
self._scene_absolute_number = try_int(value, 0)
@property
def quality_name(self):
"""Return the status name."""
return Quality.qualityStrings[self.quality]
def is_location_valid(self, location=None):
"""Whether the location is a valid file.
:param location:
:type location: str
:return:
:rtype: bool
"""
return os.path.isfile(location or self._location)
def metadata(self):
"""Return the video metadata."""
try:
return knowit.know(self.location)
except knowit.KnowitException as error:
log.warning(
'An error occurred while parsing: {path}\n'
'KnowIt reported:\n{report}', {
'path': self.location,
'report': error,
})
return {}
def refresh_subtitles(self):
"""Look for subtitles files and refresh the subtitles property."""
current_subtitles = subtitles.get_current_subtitles(self)
ep_num = (episode_num(self.season, self.episode) or
episode_num(self.season, self.episode, numbering='absolute'))
if self.subtitles == current_subtitles:
log.debug(
'{id}: No changed subtitles for {series} {ep}. Current subtitles: {subs}', {
'id': self.series.series_id,
'series': self.series.name,
'ep': ep_num,
'subs': current_subtitles
}
)
else:
log.debug(
'{id}: Subtitle changes detected for {series} {ep}.'
' Current subtitles: {subs}', {
'id': self.series.series_id,
'series': self.series.name,
'ep': ep_num,
'subs': current_subtitles
}
)
self.subtitles = current_subtitles if current_subtitles else []
log.debug('{id}: Saving subtitles changes to database',
{'id': self.series.series_id})
self.save_to_db()
def download_subtitles(self, lang=None):
"""Download subtitles.
:param lang:
:type lang: string
"""
if not self.is_location_valid():
log.debug(
'{id}: {series} {ep} does not exist, unable to download subtitles', {
'id': self.series.series_id,
'series': self.series.name,
'ep': (episode_num(self.season, self.episode) or
episode_num(self.season, self.episode, numbering='absolute')),
}
)
return
new_subtitles = subtitles.download_subtitles(self, lang=lang)
if new_subtitles:
self.subtitles = subtitles.merge_subtitles(self.subtitles, new_subtitles)
self.subtitles_searchcount += 1 if self.subtitles_searchcount else 1
self.subtitles_lastsearch = datetime.now().strftime(dateTimeFormat)
log.debug('{id}: Saving last subtitles search to database',
{'id': self.series.series_id})
self.save_to_db()
if new_subtitles:
subtitle_list = ', '.join([subtitles.name_from_code(code) for code in new_subtitles])
log.info(
'{id}: Downloaded {subs} subtitles for {series} {ep}', {
'id': self.series.series_id,
'subs': subtitle_list,
'series': self.series.name,
'ep': (episode_num(self.season, self.episode) or
episode_num(self.season, self.episode, numbering='absolute')),
}
)
notifiers.notify_subtitle_download(self, subtitle_list)
else:
log.info(
'{id}: No subtitles found for {series} {ep}', {
'id': self.series.series_id,
'series': self.series.name,
'ep': (episode_num(self.season, self.episode) or
episode_num(self.season, self.episode, numbering='absolute')),
}
)
return new_subtitles
def check_for_meta_files(self):
"""Check Whether metadata files has changed. And write the current set self.hasnfo and set.hastbn.
:return: Whether a database update should be done on the episode.
:rtype: bool
"""
oldhasnfo = self.hasnfo
oldhastbn = self.hastbn
all_nfos = []
all_tbns = []
# check for nfo and tbn
if not self.is_location_valid():
return False
for metadata_provider in itervalues(app.metadata_provider_dict):
if metadata_provider.episode_metadata:
new_result = metadata_provider.has_episode_metadata(self)
else:
new_result = False
all_nfos.append(new_result)
if metadata_provider.episode_thumbnails:
new_result = metadata_provider.has_episode_thumb(self)
else:
new_result = False
all_tbns.append(new_result)
self.hasnfo = any(all_nfos)
self.hastbn = any(all_tbns)
changed = oldhasnfo != self.hasnfo or oldhastbn != self.hastbn
if changed:
self.save_to_db()
return changed
def _specify_episode(self, season, episode):
sql_results = self.load_from_db(season, episode)
if not sql_results:
# only load from NFO if we didn't load from DB
if self.is_location_valid():
try:
self.__load_from_nfo(self.location)
except NoNFOException:
log.error(
'{id}: There was an error loading the NFO for {series} {ep}', {
'id': self.series.series_id,
'series': self.series.name,
'ep': episode_num(season, episode),
}
)
# if we tried loading it from NFO and didn't find the NFO, try the Indexers
if not self.hasnfo:
try:
result = self.load_from_indexer(season, episode)
except EpisodeDeletedException:
result = False
# if we failed SQL *and* NFO, Indexers then fail
if not result:
raise EpisodeNotFoundException('{id}: Unable to find {series} {ep}'.format
(id=self.series.series_id, series=self.series.name,
ep=episode_num(season, episode)))
def load_from_db(self, season, episode):
"""Load episode information from database.
:param season:
:type season: int
:param episode:
:type episode: int
:return:
:rtype: bool
"""
if not self.dirty:
return True
main_db_con = db.DBConnection()
sql_results = main_db_con.select(
'SELECT '
' * '
'FROM '
' tv_episodes '
'WHERE '
' indexer = ? '
' AND showid = ? '
' AND season = ? '
' AND episode = ?', [self.series.indexer, self.series.series_id, season, episode])
if len(sql_results) > 1:
raise MultipleEpisodesInDatabaseException('Your DB has two records for the same series somehow.')
elif not sql_results:
log.debug(
'{id}: {series} {ep} not found in the database', {
'id': self.series.series_id,
'series': self.series.name,
'ep': episode_num(self.season, self.episode),
}
)
return False
else:
self.name = sql_results[0]['name'] or ''
self.season = season
self.episode = episode
self.absolute_number = sql_results[0]['absolute_number']
self.description = sql_results[0]['description'] or ''
if sql_results[0]['subtitles']:
self.subtitles = sql_results[0]['subtitles'].split(',')
self.subtitles_searchcount = sql_results[0]['subtitles_searchcount']
self.subtitles_lastsearch = sql_results[0]['subtitles_lastsearch']
self.airdate = date.fromordinal(int(sql_results[0]['airdate']))
self.status = int(sql_results[0]['status'] or UNSET)
self.quality = int(sql_results[0]['quality'] or Quality.NA)
self.file_size = int(sql_results[0]['file_size'] or 0)
if sql_results[0]['release_name'] is not None:
self.release_name = sql_results[0]['release_name']
if sql_results[0]['is_proper']:
self.is_proper = int(sql_results[0]['is_proper'])
if sql_results[0]['version']:
self.version = int(sql_results[0]['version'])
if sql_results[0]['release_group'] is not None:
self.release_group = sql_results[0]['release_group']
if sql_results[0]['location']:
self._location = sql_results[0]['location']
self.manually_searched = bool(sql_results[0]['manually_searched'])
self.watched = bool(sql_results[0]['watched'])
self.indexerid = int(sql_results[0]['indexerid'])
self.indexer = int(sql_results[0]['indexer'])
self.scene_season = sql_results[0]['scene_season']
self.scene_episode = sql_results[0]['scene_episode']
self.scene_absolute_number = sql_results[0]['scene_absolute_number']
if self.scene_absolute_number == 0:
self.scene_absolute_number = get_scene_absolute_numbering(
self.series,
self.absolute_number
)
if self.series.is_scene:
self._load_scene_numbering()
self.reset_dirty()
return True
def _load_scene_numbering(self):
scene_mapping = get_scene_numbering(
self.series, self.season, self.episode
)
if all([scene_mapping[0] is not None, scene_mapping[1]]):
self.scene_season = scene_mapping[0]
self.scene_episode = scene_mapping[1]
def set_indexer_data(self, season=None, indexer_api=None):
"""Set episode information from indexer.
:param season:
:param indexer_api:
:rtype: bool
"""
if season is None:
season = self.season
if indexer_api is None or indexer_api.indexer != self.series.indexer_api.indexer:
api = self.series.indexer_api
else:
api = indexer_api
try:
api._get_episodes(self.series.series_id, aired_season=season)
except IndexerError as error:
log.warning(
'{id}: {indexer} threw up an error: {error_msg}', {
'id': self.series.series_id,
'indexer': indexerApi(self.indexer).name,
'error_msg': ex(error),
}
)
return False
return True
def load_from_indexer(self, season=None, episode=None, tvapi=None, cached_season=None):
"""Load episode information from indexer.
:param season:
:type season: int
:param episode:
:type episode: int
:param tvapi:
:param cached_season:
:return:
:rtype: bool
"""
if season is None:
season = self.season
if episode is None:
episode = self.episode
try:
if cached_season:
my_ep = cached_season[episode]
else:
series = self.series.indexer_api[self.series.series_id]
my_ep = series[season][episode]
except (IndexerError, IOError) as error:
log.warning(
'{id}: {indexer} threw up an error: {error_msg}', {
'id': self.series.series_id,
'indexer': indexerApi(self.indexer).name,
'error_msg': ex(error),
}
)
# if the episode is already valid just log it, if not throw it up
if self.name:
log.debug(
'{id}: {indexer} timed out but we have enough info from other sources, allowing the error', {
'id': self.series.series_id,
'indexer': indexerApi(self.indexer).name,
}
)
return
else:
log.warning(
'{id}: {indexer} timed out, unable to create the episode', {
'id': self.series.series_id,
'indexer': indexerApi(self.indexer).name,
}
)
return False
except (IndexerEpisodeNotFound, IndexerSeasonNotFound):
log.debug(
'{id}: Unable to find the episode on {indexer}. Deleting it from db', {
'id': self.series.series_id,
'indexer': indexerApi(self.indexer).name,
}
)
# if I'm no longer on the Indexers but I once was then delete myself from the DB
if self.indexerid != -1:
self.delete_episode()
return
if getattr(my_ep, 'episodename', None) is None:
log.info(
'{id}: {series} {ep} has no name on {indexer}. Setting to an empty string', {
'id': self.series.series_id,
'series': self.series.name,
'ep': episode_num(season, episode),
'indexer': indexerApi(self.indexer).name,
}
)
setattr(my_ep, 'episodename', '')
if getattr(my_ep, 'absolute_number', None) is None:
log.debug(
'{id}: {series} {ep} has no absolute number on {indexer}', {
'id': self.series.series_id,
'series': self.series.name,
'ep': episode_num(season, episode),
'indexer': indexerApi(self.indexer).name,
}
)
else:
self.absolute_number = int(my_ep['absolute_number'])
log.debug(
'{id}: {series} {ep} has absolute number: {absolute} ', {
'id': self.series.series_id,
'series': self.series.name,
'ep': episode_num(season, episode),
'absolute': self.absolute_number,
}
)
self.name = getattr(my_ep, 'episodename', '')
self.season = season
self.episode = episode
self.scene_absolute_number = get_scene_absolute_numbering(
self.series,
self.absolute_number
)
# TODO: Just me not understanding. If we're getting the show info from the indexer.
# Why are we trying to get the scene_season and scene_episode from the db?
self._load_scene_numbering()
self.description = getattr(my_ep, 'overview', '')
firstaired = getattr(my_ep, 'firstaired', None)
if not firstaired or firstaired == '0000-00-00':
firstaired = str(date.fromordinal(1))
raw_airdate = [int(x) for x in firstaired.split('-')]
try:
self.airdate = date(raw_airdate[0], raw_airdate[1], raw_airdate[2])
except (ValueError, IndexError):
log.warning(
'{id}: Malformed air date of {aired} retrieved from {indexer} for {series} {ep}', {
'id': self.series.series_id,
'aired': firstaired,
'indexer': indexerApi(self.indexer).name,
'series': self.series.name,
'ep': episode_num(season, episode),
}
)
# if I'm incomplete on the indexer but I once was complete then just delete myself from the DB for now
if self.indexerid != -1:
self.delete_episode()
return False
# early conversion to int so that episode doesn't get marked dirty
self.indexerid = getattr(my_ep, 'id', None)
if self.indexerid is None:
log.error(
'{id}: Failed to retrieve ID from {indexer}', {
'id': self.series.series_id,
'aired': firstaired,
'indexer': indexerApi(self.indexer).name,
}
)
if self.indexerid != -1:
self.delete_episode()
return False
if self.location:
log.debug(
'{id}: {series} {ep} status is {status!r}. Location: {location}', {
'id': self.series.series_id,
'series': self.series.name,
'ep': episode_num(season, episode),
'status': statusStrings[self.status],
'location': self.location,
}
)
if not os.path.isfile(self.location):
if (self.airdate >= date.today() or self.airdate == date.fromordinal(1)) and \
self.status in (UNSET, UNAIRED, WANTED):
# Need to check if is UNAIRED otherwise code will step into second 'IF'
# and make episode as default_ep_status
# If is a leaked episode and user manually snatched, it will respect status
# If is a fake (manually snatched), when user set as FAILED, status will be WANTED
# and code below will make it UNAIRED again
self.status = UNAIRED
log.debug(
'{id}: {series} {ep} airs in the future or has no air date, marking it {status}', {
'id': self.series.series_id,
'series': self.series.name,
'ep': episode_num(season, episode),
'status': statusStrings[self.status],
}
)
elif self.status in (UNSET, UNAIRED):
# Only do UNAIRED/UNSET, it could already be snatched/ignored/skipped,
# or downloaded/archived to disconnected media
self.status = self.series.default_ep_status if self.season > 0 else SKIPPED # auto-skip specials
log.debug(
'{id}: {series} {ep} has already aired, marking it {status}', {
'id': self.series.series_id,
'series': self.series.name,
'ep': episode_num(season, episode),
'status': statusStrings[self.status],
}
)
else:
log.debug(
'{id}: {series} {ep} status untouched: {status}', {
'id': self.series.series_id,
'series': self.series.name,
'ep': episode_num(season, episode),
'status': statusStrings[self.status],
}
)
# Update the episode's status/quality if a file exists and the status is not SNATCHED|DOWNLOADED|ARCHIVED
elif helpers.is_media_file(self.location):
if self.status not in [SNATCHED, SNATCHED_PROPER, SNATCHED_BEST, DOWNLOADED, ARCHIVED]:
self.update_status_quality(self.location)
else:
log.debug(
'{id}: {series} {ep} status untouched: {status}', {
'id': self.series.series_id,
'series': self.series.name,
'ep': episode_num(season, episode),
'status': statusStrings[self.status],
}
)
# shouldn't get here probably
else:
log.warning(
'{id}: {series} {ep} status changed from {old_status} to UNSET', {
'id': self.series.series_id,
'series': self.series.name,
'ep': episode_num(season, episode),
'old_status': statusStrings[self.status],
}
)
self.status = UNSET
self.save_to_db()
def __load_from_nfo(self, location):
if not self.series.is_location_valid():
log.warning('{id}: The series location {location} is missing, unable to load metadata',
{'id': self.series.series_id, 'location': location})
return
log.debug('{id}: Loading episode details from the NFO file associated with {location}',
{'id': self.series.series_id, 'location': location})
self.location = location
if self.location != '':
if self.status == UNSET and helpers.is_media_file(self.location):
self.update_status_quality(self.location)
nfo_file = replace_extension(self.location, 'nfo')
log.debug('{id}: Using NFO name {nfo}',
{'id': self.series.series_id, 'nfo': nfo_file})
if os.path.isfile(nfo_file):
try:
series_xml = ETree.ElementTree(file=nfo_file)
except (SyntaxError, ValueError) as error:
log.error('{id}: Error loading the NFO, backing up the NFO and skipping for now: {error_msg}',
{'id': self.series.series_id, 'error_msg': ex(error)})
try:
os.rename(nfo_file, nfo_file + '.old')
except Exception as error:
log.warning('{id}: Error renaming the NFO. Delete it or fix it: {error_msg}',
{'id': self.series.series_id, 'error_msg': ex(error)})
raise NoNFOException('Error in NFO format')
for ep_details in list(series_xml.iter('episodedetails')):
if (ep_details.findtext('season') is None or int(ep_details.findtext('season')) != self.season or
ep_details.findtext('episode') is None or
int(ep_details.findtext('episode')) != self.episode):
log.debug(
'{id}: NFO has an <episodedetails> block for a different episode -'
' wanted {ep_wanted} but got {ep_found}', {
'id': self.series.series_id,
'ep_wanted': episode_num(self.season, self.episode),
'ep_found': episode_num(ep_details.findtext('season'),
ep_details.findtext('episode')),
}
)
continue
if ep_details.findtext('title') is None or ep_details.findtext('aired') is None:
raise NoNFOException('Error in NFO format (missing episode title or airdate)')
self.name = ep_details.findtext('title')
self.episode = int(ep_details.findtext('episode'))
self.season = int(ep_details.findtext('season'))
self.scene_absolute_number = get_scene_absolute_numbering(
self.series,
self.absolute_number
)
self._load_scene_numbering()
self.description = ep_details.findtext('plot')
if self.description is None:
self.description = ''
if ep_details.findtext('aired'):
raw_airdate = [int(x) for x in ep_details.findtext('aired').split('-')]
self.airdate = date(raw_airdate[0], raw_airdate[1], raw_airdate[2])
else:
self.airdate = date.fromordinal(1)
self.hasnfo = True
else:
self.hasnfo = False
self.hastbn = bool(os.path.isfile(replace_extension(nfo_file, 'tbn')))
self.save_to_db()
def __str__(self):
"""Represent a string.
:return:
:rtype: unicode
"""
result = ''
result += '%r - %r - %r\n' % (self.series.name, episode_num(self.season, self.episode), self.name)
result += 'location: %r\n' % self.location
result += 'description: %r\n' % self.description
result += 'subtitles: %r\n' % ','.join(self.subtitles)
result += 'subtitles_searchcount: %r\n' % self.subtitles_searchcount
result += 'subtitles_lastsearch: %r\n' % self.subtitles_lastsearch
result += 'airdate: %r (%r)\n' % (self.airdate.toordinal(), self.airdate)
result += 'hasnfo: %r\n' % self.hasnfo
result += 'hastbn: %r\n' % self.hastbn
result += 'status: %r\n' % self.status
result += 'quality: %r\n' % self.quality
return result
def to_json(self, detailed=True):
"""Return the json representation."""
data = {}
data['identifier'] = self.identifier
data['id'] = {self.indexer_name: self.indexerid}
data['slug'] = self.slug
data['season'] = self.season
data['episode'] = self.episode
if self.absolute_number:
data['absoluteNumber'] = self.absolute_number
data['airDate'] = self.air_date
data['title'] = self.name
data['description'] = self.description
data['title'] = self.name
data['subtitles'] = self.subtitles
data['status'] = self.status_name
data['watched'] = bool(self.watched)
data['quality'] = self.quality
data['release'] = {}
data['release']['name'] = self.release_name
data['release']['group'] = self.release_group
data['release']['proper'] = self.is_proper
data['release']['version'] = self.version
data['scene'] = {}
data['scene']['season'] = self.scene_season
data['scene']['episode'] = self.scene_episode
if self.scene_absolute_number:
data['scene']['absoluteNumber'] = self.scene_absolute_number
data['file'] = {}
data['file']['location'] = self.location
data['file']['name'] = os.path.basename(self.location)
if self.file_size:
data['file']['size'] = self.file_size
data['content'] = {}
data['content']['hasNfo'] = self.hasnfo
data['content']['hasTbn'] = self.hastbn
if detailed:
data['statistics'] = {}
data['statistics']['subtitleSearch'] = {}
data['statistics']['subtitleSearch']['last'] = self.subtitles_lastsearch
data['statistics']['subtitleSearch']['count'] = self.subtitles_searchcount
data['wantedQualities'] = self.wanted_quality
data['related'] = self.related_episodes
if self.file_size:
# Used by the test-rename vue component.
data['file']['properPath'] = self.proper_path()
return data
def create_meta_files(self):
"""Create episode metadata files."""
if not self.series.is_location_valid():
log.warning('{id}: The series directory is missing, unable to create metadata',
{'id': self.series.series_id})
return
for metadata_provider in itervalues(app.metadata_provider_dict):
self.__create_nfo(metadata_provider)
self.__create_thumbnail(metadata_provider)
if self.check_for_meta_files():
log.debug('{id}: Saving metadata changes to database',
{'id': self.series.series_id})
def __create_nfo(self, metadata_provider):
result = False
# You may only call .values() on metadata_provider_dict! As on values() call the indexer_api attribute
# is reset. This will prevent errors, when using multiple indexers and caching.
result = metadata_provider.create_episode_metadata(self) or result
return result
def __create_thumbnail(self, metadata_provider):
result = False
# You may only call .values() on metadata_provider_dict! As on values() call the indexer_api attribute
# is reset. This will prevent errors, when using multiple indexers and caching.
result = metadata_provider.create_episode_thumb(self) or result
return result
def delete_episode(self):
"""Delete episode from database."""
log.debug(
'{id}: Deleting {series} {ep} from the DB', {
'id': self.series.series_id,
'series': self.series.name,
'ep': episode_num(self.season, self.episode),
}
)
# remove myself from the series dictionary
if self.series.get_episode(self.season, self.episode, no_create=True) == self:
log.debug('{id}: Removing episode from series',
{'id': self.series.series_id})
del self.series.episodes[self.season][self.episode]
# delete myself from the DB
log.debug('{id}: Deleting episode from the database',
{'id': self.series.series_id})
main_db_con = db.DBConnection()
main_db_con.action(
'DELETE FROM tv_episodes '
'WHERE showid = ?'
' AND season = ?'
' AND episode = ?',
[self.series.series_id, self.season, self.episode]
)
raise EpisodeDeletedException()
def get_sql(self):
"""Create SQL queue for this episode if any of its data has been changed since the last save."""
if not self.dirty:
log.debug('{id}: Not creating SQL query - record is not dirty',
{'id': self.series.series_id})
return
try:
main_db_con = db.DBConnection()
rows = main_db_con.select(
'SELECT '
' episode_id, '
' subtitles '
'FROM '
' tv_episodes '
'WHERE '
' indexer = ?'
' AND showid = ? '
' AND season = ? '
' AND episode = ?',
[self.series.indexer, self.series.series_id, self.season, self.episode])
ep_id = None
if rows:
ep_id = int(rows[0]['episode_id'])
if ep_id:
# use a custom update method to get the data into the DB for existing records.
# Multi or added subtitle or removed subtitles
if app.SUBTITLES_MULTI or not rows[0]['subtitles'] or not self.subtitles:
sql_query = [
'UPDATE '
' tv_episodes '
'SET '
' indexerid = ?, '
' indexer = ?, '
' name = ?, '
' description = ?, '
' subtitles = ?, '
' subtitles_searchcount = ?, '
' subtitles_lastsearch = ?, '
' airdate = ?, '
' hasnfo = ?, '
' hastbn = ?, '
' status = ?, '
' quality = ?, '
' location = ?, '
' file_size = ?, '
' release_name = ?, '
' is_proper = ?, '
' showid = ?, '
' season = ?, '
' episode = ?, '
' absolute_number = ?, '
' version = ?, '
' release_group = ?, '
' manually_searched = ?, '
' watched = ? '
'WHERE '
' episode_id = ?',
[self.indexerid, self.indexer, self.name, self.description, ','.join(self.subtitles),
self.subtitles_searchcount, self.subtitles_lastsearch, self.airdate.toordinal(), self.hasnfo,
self.hastbn, self.status, self.quality, self.location, self.file_size, self.release_name,
self.is_proper, self.series.series_id, self.season, self.episode, self.absolute_number,
self.version, self.release_group, self.manually_searched, self.watched, ep_id]]
else:
# Don't update the subtitle language when the srt file doesn't contain the
# alpha2 code, keep value from subliminal
sql_query = [
'UPDATE '
' tv_episodes '
'SET '
' indexerid = ?, '
' indexer = ?, '
' name = ?, '
' description = ?, '
' subtitles_searchcount = ?, '
' subtitles_lastsearch = ?, '
' airdate = ?, '
' hasnfo = ?, '
' hastbn = ?, '
' status = ?, '
' quality = ?, '
' location = ?, '
' file_size = ?, '
' release_name = ?, '
' is_proper = ?, '
' showid = ?, '
' season = ?, '
' episode = ?, '
' absolute_number = ?, '
' version = ?, '
' release_group = ?, '
' manually_searched = ?, '
' watched = ? '
'WHERE '
' episode_id = ?',
[self.indexerid, self.indexer, self.name, self.description,
self.subtitles_searchcount, self.subtitles_lastsearch, self.airdate.toordinal(), self.hasnfo,
self.hastbn, self.status, self.quality, self.location, self.file_size, self.release_name,
self.is_proper, self.series.series_id, self.season, self.episode, self.absolute_number,
self.version, self.release_group, self.manually_searched, self.watched, ep_id]]
else:
# use a custom insert method to get the data into the DB.
sql_query = [
'INSERT OR IGNORE INTO '
' tv_episodes '
' (episode_id, '
' indexerid, '
' indexer, '
' name, '
' description, '
' subtitles, '
' subtitles_searchcount, '
' subtitles_lastsearch, '
' airdate, '
' hasnfo, '
' hastbn, '
' status, '
' quality, '
' location, '
' file_size, '
' release_name, '
' is_proper, '
' showid, '
' season, '
' episode, '
' absolute_number, '
' version, '
' release_group, '
' manually_searched, '
' watched) '
'VALUES '
' ((SELECT episode_id FROM tv_episodes WHERE indexer = ? AND showid = ? AND season = ? AND episode = ?), '
' ?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?);',
[self.series.indexer, self.series.series_id, self.season, self.episode, self.indexerid, self.series.indexer, self.name,
self.description, ','.join(self.subtitles), self.subtitles_searchcount, self.subtitles_lastsearch,
self.airdate.toordinal(), self.hasnfo, self.hastbn, self.status, self.quality, self.location,
self.file_size, self.release_name, self.is_proper, self.series.series_id, self.season, self.episode,
self.absolute_number, self.version, self.release_group, self.manually_searched, self.watched]]
except Exception as error:
log.error('{id}: Error while updating database: {error_msg!r}',
{'id': self.series.series_id, 'error_msg': error})
self.reset_dirty()
return
self.reset_dirty()
return sql_query
def save_to_db(self):
"""Save this episode to the database if any of its data has been changed since the last save."""
if not self.dirty:
return
log.debug('{id}: Saving episode to database: {show} {ep}',
{'id': self.series.series_id,
'show': self.series.name,
'ep': episode_num(self.season, self.episode)})
new_value_dict = {
'indexerid': self.indexerid,
'name': self.name,
'description': self.description,
'subtitles': ','.join(self.subtitles),
'subtitles_searchcount': self.subtitles_searchcount,
'subtitles_lastsearch': self.subtitles_lastsearch,
'airdate': self.airdate.toordinal(),
'hasnfo': self.hasnfo,
'hastbn': self.hastbn,
'status': self.status,
'quality': self.quality,
'location': self.location,
'file_size': self.file_size,
'release_name': self.release_name,
'is_proper': self.is_proper,
'absolute_number': self.absolute_number,
'version': self.version,
'release_group': self.release_group,
'manually_searched': self.manually_searched,
'watched': self.watched,
}
control_value_dict = {
'indexer': self.series.indexer,
'showid': self.series.series_id,
'season': self.season,
'episode': self.episode,
}
# use a custom update/insert method to get the data into the DB
main_db_con = db.DBConnection()
main_db_con.upsert('tv_episodes', new_value_dict, control_value_dict)
self.reset_dirty()
def full_path(self):
"""Return episode full path.
:return:
:rtype: str
"""
if self.location is None or self.location == '':
return None
else:
return os.path.join(self.series.location, self.location)
def pretty_name(self):
"""Return the name of this episode in a "pretty" human-readable format.
Used for logging and notifications and such.
:return: A string representing the episode's name and season/ep numbers
:rtype: str
"""
if self.series.anime and not self.series.scene:
return self._format_pattern('%SN - %AB - %EN')
elif self.series.air_by_date:
return self._format_pattern('%SN - %AD - %EN')
return self._format_pattern('%SN - S%0SE%0E - %EN')
def pretty_name_with_quality(self):
"""Return the name of this episode in a "pretty" human-readable format, with quality information.
Used for notifications.
:return: A string representing the episode's name, season/ep numbers and quality
:rtype: str
"""
if self.series.anime and not self.series.scene:
return self._format_pattern('%SN - %AB - %EN - %QN')
elif self.series.air_by_date:
return self._format_pattern('%SN - %AD - %EN - %QN')
return self._format_pattern('%SN - %Sx%0E - %EN - %QN')
def __ep_name(self):
"""Return the name of the episode to use during renaming.
Combines the names of related episodes.
Eg. "Ep Name (1)" and "Ep Name (2)" becomes "Ep Name"
"Ep Name" and "Other Ep Name" becomes "Ep Name & Other Ep Name"
:return:
:rtype: str
"""
multi_name_regex = r'(.*) \(\d{1,2}\)'
self.related_episodes = sorted(self.related_episodes, key=lambda rel: rel.episode)
if not self.related_episodes:
good_name = self.name
else:
single_name = True
cur_good_name = None
for cur_name in [self.name] + [x.name for x in self.related_episodes]:
match = re.match(multi_name_regex, cur_name)
if not match:
single_name = False
break
if cur_good_name is None:
cur_good_name = match.group(1)
elif cur_good_name != match.group(1):
single_name = False
break
if single_name:
good_name = cur_good_name
else:
good_name = self.name
for rel_ep in self.related_episodes:
good_name += ' & ' + rel_ep.name
return good_name
def __replace_map(self, show_name=None):
"""Generate a replacement map for this episode.
Maps all possible custom naming patterns to the correct value for this episode.
:return: A dict with patterns as the keys and their replacement values as the values.
:rtype: dict (str -> str)
"""
ep_name = self.__ep_name()
def dot(name):
return helpers.sanitize_scene_name(name)
def us(name):
return re.sub('[ -]', '_', name)
def release_name(name):
if name:
name = remove_extension(name)
return name
def release_group(series, name):
if name:
name = remove_extension(name)
else:
return ''
try:
parse_result = NameParser(series=series, naming_pattern=True).parse(name)
except (InvalidNameException, InvalidShowException) as error:
log.debug('Unable to parse release_group: {error_msg}',
{'error_msg': ex(error)})
return ''
if not parse_result.release_group:
return ''
return parse_result.release_group.strip('.- []{}')
series_name = self.series.name
if show_name:
series_name = show_name
if app.NAMING_STRIP_YEAR:
series_name = re.sub(r'\(\d+\)$', '', series_name).rstrip()
# try to get the release group
rel_grp = {
app.UNKNOWN_RELEASE_GROUP: app.UNKNOWN_RELEASE_GROUP
}
if hasattr(self, 'location'): # from the location name
rel_grp['location'] = release_group(self.series, self.location)
if not rel_grp['location']:
del rel_grp['location']
if hasattr(self, 'release_group'): # from the release group field in db
rel_grp['database'] = self.release_group.strip('.- []{}')
if not rel_grp['database']:
del rel_grp['database']
if hasattr(self, 'release_name'): # from the release name field in db
rel_grp['release_name'] = release_group(self.series, self.release_name)
if not rel_grp['release_name']:
del rel_grp['release_name']
# use release_group, release_name, location in that order
if 'database' in rel_grp:
relgrp = 'database'
elif 'release_name' in rel_grp:
relgrp = 'release_name'
elif 'location' in rel_grp:
relgrp = 'location'
else:
relgrp = app.UNKNOWN_RELEASE_GROUP
# try to get the release encoder to comply with scene naming standards
name = self.release_name.replace(rel_grp[relgrp], '') or remove_extension(os.path.basename(self.location))
encoder = Quality.scene_quality_from_name(name, self.quality)
if encoder:
log.debug('Found codec for {series} {ep}',
{'series': series_name, 'ep': ep_name})
return {
'%SN': series_name,
'%S.N': dot(series_name),
'%S_N': us(series_name),
'%EN': ep_name,
'%E.N': dot(ep_name),
'%E_N': us(ep_name),
'%QN': Quality.qualityStrings[self.quality],
'%Q.N': dot(Quality.qualityStrings[self.quality]),
'%Q_N': us(Quality.qualityStrings[self.quality]),
'%SQN': Quality.scene_quality_strings[self.quality] + encoder,
'%SQ.N': dot(Quality.scene_quality_strings[self.quality] + encoder),
'%SQ_N': us(Quality.scene_quality_strings[self.quality] + encoder),
'%S': str(self.season),
'%0S': '%02d' % self.season,
'%E': str(self.episode),
'%0E': '%02d' % self.episode,
'%XS': str(self.scene_season),
'%0XS': '%02d' % self.scene_season,
'%XE': str(self.scene_episode),
'%0XE': '%02d' % self.scene_episode,
'%AB': '%(#)03d' % {'#': self.absolute_number},
'%XAB': '%(#)03d' % {'#': self.scene_absolute_number},
'%RN': release_name(self.release_name),
'%RG': rel_grp[relgrp],
'%CRG': rel_grp[relgrp].upper(),
'%AD': str(self.airdate).replace('-', ' '),
'%A.D': str(self.airdate).replace('-', '.'),
'%A_D': us(str(self.airdate)),
'%A-D': str(self.airdate),
'%Y': str(self.airdate.year),
'%M': str(self.airdate.month),
'%D': str(self.airdate.day),
'%ADb': str(self.airdate.strftime('%b')),
'%CY': str(date.today().year),
'%CM': str(date.today().month),
'%CD': str(date.today().day),
'%SY': str(self.series.start_year),
'%0M': '%02d' % self.airdate.month,
'%0D': '%02d' % self.airdate.day,
'%RT': 'PROPER' if self.is_proper else '',
}
@staticmethod
def __format_string(pattern, replace_map):
"""Replace all template strings with the correct value.
:param pattern:
:type pattern: str
:param replace_map:
:type replace_map: dict (str -> str)
:return:
:rtype: str
"""
result_name = pattern
# do the replacements
for cur_replacement in sorted(list(replace_map), reverse=True):
result_name = result_name.replace(cur_replacement, sanitize_filename(replace_map[cur_replacement]))
result_name = result_name.replace(cur_replacement.lower(),
sanitize_filename(replace_map[cur_replacement].lower()))
return result_name
def _format_pattern(self, pattern=None, multi=None, anime_type=None, show_name=None):
"""Manipulate an episode naming pattern and then fills the template in.
:param pattern:
:type pattern: str
:param multi:
:type multi: bool
:param anime_type:
:type anime_type: int
:return:
:rtype: str
"""
if pattern is None:
pattern = app.NAMING_PATTERN
if multi is None:
multi = app.NAMING_MULTI_EP
if app.NAMING_CUSTOM_ANIME:
if anime_type is None:
anime_type = app.NAMING_ANIME
else:
anime_type = 3
replace_map = self.__replace_map(show_name=show_name)
result_name = pattern
# if there's no release group in the db, let the user know we replaced it
if replace_map['%RG'] and replace_map['%RG'] != app.UNKNOWN_RELEASE_GROUP:
if not hasattr(self, 'release_group') or not self.release_group:
log.debug('{id}: Episode has no release group, replacing it with {rg}',
{'id': self.series.series_id, 'rg': replace_map['%RG']})
self.release_group = replace_map['%RG'] # if release_group is not in the db, put it there
# if there's no release name then replace it with a reasonable facsimile
if not replace_map['%RN']:
if self.series.air_by_date or self.series.sports:
result_name = result_name.replace('%RN', '%S.N.%A.D.%E.N-' + replace_map['%RG'])
result_name = result_name.replace('%rn', '%s.n.%A.D.%e.n-' + replace_map['%RG'].lower())
elif anime_type != 3:
result_name = result_name.replace('%RN', '%S.N.%AB.%E.N-' + replace_map['%RG'])
result_name = result_name.replace('%rn', '%s.n.%ab.%e.n-' + replace_map['%RG'].lower())
else:
result_name = result_name.replace('%RN', '%S.N.S%0SE%0E.%E.N-' + replace_map['%RG'])
result_name = result_name.replace('%rn', '%s.n.s%0se%0e.%e.n-' + replace_map['%RG'].lower())
if not replace_map['%RT']:
result_name = re.sub('([ _.-]*)%RT([ _.-]*)', r'\2', result_name)
# split off ep name part only
name_groups = re.split(r'[\\/]', result_name)
# figure out the double-ep numbering style for each group, if applicable
for cur_name_group in name_groups:
season_ep_regex = r"""
(?P<pre_sep>[ _.-]*)
((?:s(?:eason|eries)?\s*)?%0?S(?![._]?N))
(.*?)
(%0?E(?![._]?N))
(?P<post_sep>[ _.-]*)
"""
ep_only_regex = r'(E?%0?E(?![._]?N))'
# try the normal way
season_ep_match = re.search(season_ep_regex, cur_name_group, re.I | re.X)
ep_only_match = re.search(ep_only_regex, cur_name_group, re.I | re.X)
# if we have a season and episode then collect the necessary data
if season_ep_match:
season_format = season_ep_match.group(2)
ep_sep = season_ep_match.group(3)
ep_format = season_ep_match.group(4)
sep = season_ep_match.group('pre_sep')
if not sep:
sep = season_ep_match.group('post_sep')
if not sep:
sep = ' '
# force 2-3-4 format if they chose to extend
if multi in (NAMING_EXTEND, NAMING_LIMITED_EXTEND, NAMING_LIMITED_EXTEND_E_UPPER_PREFIXED, NAMING_LIMITED_EXTEND_E_LOWER_PREFIXED):
ep_sep = '-'
regex_used = season_ep_regex
# if there's no season then there's not much choice so we'll just force them to use 03-04-05 style
elif ep_only_match:
season_format = ''
ep_sep = '-'
ep_format = ep_only_match.group(1)
sep = ''
regex_used = ep_only_regex
else:
continue
# we need at least this much info to continue
if not ep_sep or not ep_format:
continue
# start with the ep string, eg. E03
ep_string = self.__format_string(ep_format.upper(), replace_map)
for other_ep in self.related_episodes:
# for limited extend we only append the last ep
if multi in (NAMING_LIMITED_EXTEND, NAMING_LIMITED_EXTEND_E_UPPER_PREFIXED, NAMING_LIMITED_EXTEND_E_LOWER_PREFIXED) and \
other_ep != self.related_episodes[-1]:
continue
elif multi == NAMING_DUPLICATE:
# add " - S01"
ep_string += sep + season_format
elif multi == NAMING_SEPARATED_REPEAT:
ep_string += sep
# add "E04"
ep_string += ep_sep
if multi == NAMING_LIMITED_EXTEND_E_UPPER_PREFIXED:
ep_string += 'E'
elif multi == NAMING_LIMITED_EXTEND_E_LOWER_PREFIXED:
ep_string += 'e'
ep_string += other_ep.__format_string(ep_format.upper(), other_ep.__replace_map())
if anime_type != 3:
if self.absolute_number == 0:
cur_absolute_number = self.episode
else:
cur_absolute_number = self.absolute_number
if self.season != 0: # don't set absolute numbers if we are on specials !
if anime_type == 1: # this crazy person wants both ! (note: +=)
ep_string += sep + '%(#)03d' % {
'#': cur_absolute_number}
elif anime_type == 2: # total anime freak only need the absolute number ! (note: =)
ep_string = '%(#)03d' % {'#': cur_absolute_number}
for rel_ep in self.related_episodes:
if rel_ep.absolute_number != 0:
ep_string += '-' + '%(#)03d' % {'#': rel_ep.absolute_number}
else:
ep_string += '-' + '%(#)03d' % {'#': rel_ep.episode}
regex_replacement = None
if anime_type == 2 and regex_used != ep_only_regex:
regex_replacement = r'\g<pre_sep>' + ep_string + r'\g<post_sep>'
elif season_ep_match:
regex_replacement = r'\g<pre_sep>\g<2>\g<3>' + ep_string + r'\g<post_sep>'
elif ep_only_match:
regex_replacement = ep_string
if regex_replacement:
# fill out the template for this piece and then insert this piece into the actual pattern
cur_name_group_result = re.sub('(?i)(?x)' + regex_used, regex_replacement, cur_name_group)
# cur_name_group_result = cur_name_group.replace(ep_format, ep_string)
result_name = result_name.replace(cur_name_group, cur_name_group_result)
parsed_result_name = self.__format_string(result_name, replace_map)
# With the episode name filenames tend to grow very large. Worst case scenario we even need to add `-thumb.jpg`
# to the filename. To make sure we stay under the 255 character limit, we're working with 244 chars, taking into
# account the thumbnail.
if len(parsed_result_name) > 244 and any(['%E.N' in result_name, '%EN' in result_name, '%E_N' in result_name]):
for remove_pattern in ('%E.N', '%EN', '%E_N'):
result_name = result_name.replace(remove_pattern, '')
# The Episode name can be appended with a - or . in between. Therefor we're removing it.
# Creating a clean filename.
result_name = result_name.strip('-. ')
parsed_result_name = self.__format_string(result_name, replace_map)
log.debug('{id}: Cutting off the episode name, as the total filename is too long. > 255 chars.',
{'id': self.series.series_id})
log.debug('{id}: Formatting pattern: {pattern} -> {result}',
{'id': self.series.series_id, 'pattern': result_name, 'result': parsed_result_name})
return parsed_result_name
def proper_path(self):
"""Figure out the path where this episode SHOULD be according to the renaming rules, relative from the series dir.
:return:
:rtype: str
"""
anime_type = app.NAMING_ANIME
if not self.series.is_anime:
anime_type = 3
result = self.formatted_filename(anime_type=anime_type)
# if they want us to flatten it and we're allowed to flatten it then we will
if not self.series.season_folders and not app.NAMING_FORCE_FOLDERS:
return result
# if not we append the folder on and use that
else:
result = os.path.join(self.formatted_dir(), result)
return result
def formatted_dir(self, pattern=None, multi=None):
"""Just the folder name of the episode.
:param pattern:
:type pattern: str
:param multi:
:type multi: bool
:return:
:rtype: str
"""
if pattern is None:
# we only use ABD if it's enabled, this is an ABD series, AND this is not a multi-ep
if self.series.air_by_date and app.NAMING_CUSTOM_ABD and not self.related_episodes:
pattern = app.NAMING_ABD_PATTERN
elif self.series.sports and app.NAMING_CUSTOM_SPORTS and not self.related_episodes:
pattern = app.NAMING_SPORTS_PATTERN
elif self.series.anime and app.NAMING_CUSTOM_ANIME:
pattern = app.NAMING_ANIME_PATTERN
else:
pattern = app.NAMING_PATTERN
# split off the dirs only, if they exist
name_groups = re.split(r'[\\/]', pattern)
if len(name_groups) == 1:
return ''
else:
return self._format_pattern(os.sep.join(name_groups[:-1]), multi)
def formatted_filename(self, pattern=None, multi=None, anime_type=None):
"""Just the filename of the episode, formatted based on the naming settings.
:param pattern:
:type pattern: str
:param multi:
:type multi: bool
:param anime_type:
:type anime_type: int
:return:
:rtype: str
"""
if pattern is None:
# we only use ABD if it's enabled, this is an ABD series, AND this is not a multi-ep
if self.series.air_by_date and app.NAMING_CUSTOM_ABD and not self.related_episodes:
pattern = app.NAMING_ABD_PATTERN
elif self.series.sports and app.NAMING_CUSTOM_SPORTS and not self.related_episodes:
pattern = app.NAMING_SPORTS_PATTERN
elif self.series.anime and app.NAMING_CUSTOM_ANIME:
pattern = app.NAMING_ANIME_PATTERN
else:
pattern = app.NAMING_PATTERN
# split off the dirs only, if they exist
name_groups = re.split(r'[\\/]', pattern)
return sanitize_filename(self._format_pattern(name_groups[-1], multi, anime_type))
def formatted_search_string(self, pattern=None, multi=None, anime_type=None, title=None):
"""The search template, formatted based on the tv_show's episode_search_template setting.
:param pattern:
:type pattern: str
:param multi:
:type multi: bool
:param anime_type:
:type anime_type: int
:return:
:rtype: str
"""
# split off the dirs only, if they exist
name_groups = re.split(r'[\\/]', pattern)
return sanitize_filename(self._format_pattern(name_groups[-1], multi, anime_type, show_name=title))
def rename(self):
"""Rename an episode file and all related files to the location and filename as specified in naming settings."""
if not self.is_location_valid():
log.warning('{id} Skipping rename, location does not exist: {location}',
{'id': self.indexerid, 'location': self.location})
return
proper_path = self.proper_path()
absolute_proper_path = os.path.join(self.series.location, proper_path)
absolute_current_path_no_ext, file_ext = os.path.splitext(self.location)
absolute_current_path_no_ext_length = len(absolute_current_path_no_ext)
related_subs = []
current_path = absolute_current_path_no_ext
if absolute_current_path_no_ext.startswith(self.series.location):
current_path = absolute_current_path_no_ext[len(self.series.location):]
log.debug(
'{id}: Renaming/moving episode from the base path {location} to {new_location}', {
'id': self.indexerid,
'location': self.location,
'new_location': absolute_proper_path,
}
)
# if it's already named correctly then don't do anything
if proper_path == current_path:
log.debug(
'{id}: File {location} is already named correctly, skipping', {
'id': self.indexerid,
'location': self.location,
'new_location': absolute_proper_path,
}
)
return
related_files = post_processor.PostProcessor(self.location).list_associated_files(
self.location, subfolders=True)
# This is wrong. Cause of pp not moving subs.
if self.series.subtitles and app.SUBTITLES_DIR != '':
related_subs = post_processor.PostProcessor(
self.location).list_associated_files(app.SUBTITLES_DIR, subfolders=True, subtitles_only=True)
log.debug(
'{id} Files associated to {location}: {related_files}', {
'id': self.indexerid,
'location': self.location,
'related_files': related_files
}
)
# move the ep file
result = helpers.rename_ep_file(self.location, absolute_proper_path, absolute_current_path_no_ext_length)
# move related files
for cur_related_file in related_files:
# We need to fix something here because related files can be in subfolders
# and the original code doesn't handle this (at all)
cur_related_dir = os.path.dirname(os.path.abspath(cur_related_file))
subfolder = cur_related_dir.replace(os.path.dirname(os.path.abspath(self.location)), '')
# We now have a subfolder. We need to add that to the absolute_proper_path.
# First get the absolute proper-path dir
proper_related_dir = os.path.dirname(os.path.abspath(absolute_proper_path + file_ext))
proper_related_path = absolute_proper_path.replace(proper_related_dir, proper_related_dir + subfolder)
cur_result = helpers.rename_ep_file(cur_related_file, proper_related_path,
absolute_current_path_no_ext_length + len(subfolder))
if not cur_result:
log.warning('{id}: Unable to rename file {cur_file}',
{'id': self.indexerid, 'cur_file': cur_related_file})
for cur_related_sub in related_subs:
absolute_proper_subs_path = os.path.join(app.SUBTITLES_DIR, self.formatted_filename())
cur_result = helpers.rename_ep_file(cur_related_sub, absolute_proper_subs_path,
absolute_current_path_no_ext_length)
if not cur_result:
log.warning('{id}: Unable to rename file {cur_file}',
{'id': self.indexerid, 'cur_file': cur_related_sub})
# save the ep
with self.lock:
if result:
self.location = absolute_proper_path + file_ext
for rel_ep in self.related_episodes:
rel_ep.location = absolute_proper_path + file_ext
# in case something changed with the metadata just do a quick check
for cur_ep in [self] + self.related_episodes:
cur_ep.check_for_meta_files()
# save any changes to the database
sql_l = []
with self.lock:
for rel_ep in [self] + self.related_episodes:
sql_l.append(rel_ep.get_sql())
if sql_l:
main_db_con = db.DBConnection()
main_db_con.mass_action(sql_l)
def airdate_modify_stamp(self):
"""Make the modify date and time of a file reflect the series air date and time.
Note: Also called from post_processor
"""
if not all([app.AIRDATE_EPISODES, self.airdate, self.location,
self.series, self.series.airs, self.series.network]):
return
try:
airdate_ordinal = self.airdate.toordinal()
if airdate_ordinal < 1:
return
airdatetime = network_timezones.parse_date_time(airdate_ordinal, self.series.airs, self.series.network)
if app.FILE_TIMESTAMP_TIMEZONE == 'local':
airdatetime = airdatetime.astimezone(network_timezones.app_timezone)
filemtime = datetime.fromtimestamp(
os.path.getmtime(self.location)).replace(tzinfo=network_timezones.app_timezone)
if filemtime != airdatetime:
airdatetime = airdatetime.timetuple()
log.debug(
'{id}: About to modify date of {location} to series air date {air_date}', {
'id': self.series.series_id,
'location': self.location,
'air_date': time.strftime('%b %d,%Y (%H:%M)', airdatetime),
}
)
try:
if helpers.touch_file(self.location, time.mktime(airdatetime)):
log.info(
'{id}: Changed modify date of {location} to series air date {air_date}', {
'id': self.series.series_id,
'location': os.path.basename(self.location),
'air_date': time.strftime('%b %d,%Y (%H:%M)', airdatetime),
}
)
else:
log.warning(
'{id}: Unable to modify date of {location} to series air date {air_date}', {
'id': self.series.series_id,
'location': os.path.basename(self.location),
'air_date': time.strftime('%b %d,%Y (%H:%M)', airdatetime),
}
)
except Exception:
log.warning(
'{id}: Failed to modify date of {location} to series air date {air_date}', {
'id': self.series.series_id,
'location': os.path.basename(self.location),
'air_date': time.strftime('%b %d,%Y (%H:%M)', airdatetime),
}
)
except Exception:
log.warning(
'{id}: Failed to modify date of {location}', {
'id': self.series.series_id,
'location': os.path.basename(self.location),
}
)
def update_status_quality(self, filepath):
"""Update the episode status and quality according to the file information.
The status should only be changed if either the size or the filename changed.
:param filepath: Path to the new episode file.
"""
old_status, old_quality = self.status, self.quality
old_location = self.location
# Changing the name of the file might also change its quality
same_name = old_location and os.path.normpath(old_location) == os.path.normpath(filepath)
old_size = self.file_size
# Setting a location to episode, will get the size of the filepath
with self.lock:
self.location = filepath
# If size from given filepath is 0 it means we couldn't determine file size
same_size = old_size > 0 and self.file_size > 0 and self.file_size == old_size
if not same_size or not same_name:
log.debug(
'{name}: The old episode had a different file associated with it, '
're-checking the quality using the new filename {filepath}',
{'name': self.series.name, 'filepath': filepath}
)
new_quality = Quality.name_quality(filepath, self.series.is_anime)
if old_status in (SNATCHED, SNATCHED_PROPER, SNATCHED_BEST) or (
old_status == DOWNLOADED and old_location
) or (
old_status == WANTED and not old_location
) or (
# For example when removing an existing show (keep files)
# and re-adding it. The status is SKIPPED just after adding it.
old_status == SKIPPED and not old_location
):
new_status = DOWNLOADED
else:
new_status = ARCHIVED
with self.lock:
self.status = new_status
self.quality = new_quality
if not same_name:
# Reset release name as the name changed
self.release_name = ''
log.debug(
"{name}: Setting the status from '{status_old}' to '{status_new}' and"
" quality '{quality_old}' to '{quality_new}' based on file: {filepath}", {
'name': self.series.name,
'status_old': statusStrings[old_status],
'status_new': statusStrings[new_status],
'quality_old': Quality.qualityStrings[old_quality],
'quality_new': Quality.qualityStrings[new_quality],
'filepath': filepath,
}
)
else:
log.debug(
"{name}: Not changing current status '{status_old}' or"
" quality '{quality_old}' based on file: {filepath}", {
'name': self.series.name,
'status_old': statusStrings[old_status],
'quality_old': Quality.qualityStrings[old_quality],
'filepath': filepath,
}
)
def mass_update_episode_status(self, new_status):
"""
Change the status of an episode, with a number of additional actions depending old -> new status.
:param new_status: New status value.
:type new_status: int
:returns: The episodes update sql to be used in a mass action.
"""
with self.lock:
if self.status == UNAIRED:
log.warning('Refusing to change status of {series} {episode} because it is UNAIRED',
{'series': self.series.name, 'episode': self.slug})
return
snatched_qualities = [SNATCHED, SNATCHED_PROPER, SNATCHED_BEST]
if new_status == DOWNLOADED and not (
self.status in snatched_qualities + [DOWNLOADED]
or os.path.isfile(self.location)):
log.warning('Refusing to change status of {series} {episode} to DOWNLOADED'
" because it's not SNATCHED/DOWNLOADED or the file is missing",
{'series': self.series.name, 'episode': self.slug})
return
if new_status == FAILED:
if self.status not in snatched_qualities + [DOWNLOADED, ARCHIVED]:
log.warning('Refusing to change status of {series} {episode} to FAILED'
" because it's not SNATCHED/DOWNLOADED/ARCHIVED",
{'series': self.series.name, 'episode': self.slug})
return
else:
cur_failed_queue_item = FailedQueueItem(self.series, [self])
app.forced_search_queue_scheduler.action.add_item(cur_failed_queue_item)
if new_status == WANTED:
if self.status in [DOWNLOADED, ARCHIVED]:
log.debug('Removing release_name of {series} {episode} as episode was changed to WANTED',
{'series': self.series.name, 'episode': self.slug})
self.release_name = ''
if self.manually_searched:
log.debug("Resetting 'manually searched' flag of {series} {episode}"
' as episode was changed to WANTED',
{'series': self.series.name, 'episode': self.slug})
self.manually_searched = False
self.status = new_status
# Make sure to run the collected sql through a mass action.
return self.get_sql()
| pymedusa/Medusa | medusa/tv/episode.py | Python | gpl-3.0 | 87,172 |
"""
ydf/utils
~~~~~~~~~
Contains utility functions that have no better home.
"""
def merge_maps(*maps):
"""
Merge the given a sequence of :class:`~collections.Mapping` instances.
:param maps: Sequence of mapping instance to merge together.
:return: A :class:`dict` containing all merged maps.
"""
merged = {}
for m in maps:
merged.update(m)
return merged
| ahawker/ydf | ydf/utils.py | Python | apache-2.0 | 411 |
# -*- coding: utf-8 -*-
###############################################################################
#
# Copyright (C) 2001-2014 Micronaet SRL (<http://www.micronaet.it>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published
# by the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
###############################################################################
import os
import sys
import openerp.netsvc as netsvc
import logging
from openerp import tools
from openerp.osv import osv, fields
from datetime import datetime, timedelta
from openerp.tools import DEFAULT_SERVER_DATE_FORMAT, DEFAULT_SERVER_DATETIME_FORMAT, DATETIME_FORMATS_MAP, float_compare
import openerp.addons.decimal_precision as dp
from openerp.tools.translate import _
from utility import no_establishment_group
_logger = logging.getLogger(__name__)
class product_ul_extra(osv.osv):
''' Extra fields for product.product object
'''
_name = "product.ul"
_inherit = "product.ul"
# TODO maybe a scheduled action (not yes scheduled):
def import_ul(self, cr, uid, file_name_package, context=None):
''' load accounting list of ul (from files for now)
'''
if not file_name_package:
return False
filename = os.path.expanduser(file_name_package)
for line in open(filename, 'r'):
try:
code = line[:10].strip()
name = "%s [%s]"%(line[10:40].strip().title(), code)
product_code = line[40:47].strip()
linked_product_id = self.pool.get('product.product').search(cr, uid, [
('default_code','=',product_code)
], context=context)
if not linked_product_id:
# log error
continue # jump line
linked_product_id=linked_product_id[0]
ul_id = self.search(cr, uid, [
('code', '=', code)], context=context)
data = {
'code': code,
'name': name,
'linked_product_id': linked_product_id,
'type': 'unit',
}
if ul_id:
self.write(cr, uid, ul_id, data, context=context)
else:
self.create(cr, uid, data, context=context)
except:
break
return True
_columns = {
'code': fields.char('Code', size=10, required=False, readonly=False),
'linked_product_id': fields.many2one('product.product', 'Product linked', required=False, help="Used for unload package product after lavoration"),
}
class product_product_extra(osv.osv):
''' Extra fields for product.product object
'''
_name = "product.product"
_inherit = "product.product"
# -------------
# Override ORM:
# -------------
def fields_view_get(self, cr, uid, view_id=None, view_type='form', context=None, toolbar=False):
"""
Return a view and fields for current model. where view will be depends on {view_type}.
@param cr: cursor to database
@param uid: id of current user
@param view_id: list of fields, which required to read signatures
@param view_type: defines a view type. it can be one of (form, tree, graph, calender, gantt, search, mdx)
@param context: context arguments, like lang, time zone
@param toolbar: contains a list of reports, wizards, and links related to current model
@return: returns a dict that contains definition for fields, views, and toolbars
"""
if view_type == 'form' and no_establishment_group(self, cr, uid, context=context):
toolbar = False
return super(product_product_extra, self).fields_view_get(
cr, uid, view_id, view_type, context=context, toolbar=toolbar)
# -------------------------------------------------------------------------
# Scheduled actions
# -------------------------------------------------------------------------
def schedule_etl_product_state_mssql(self, cr, uid, verbose=True, as_dict=True, file_name_package=False, context=None):
''' Import from MSSQL DB linked to Company AQ_QUANTITY elements
'''
_logger.info("Start import packages list")
try:
cursor = self.pool.get('micronaet.accounting').get_product_package_columns(cr, uid, context=context)
if not cursor or not file_name_package:
_logger.error("Unable to connect no importation of package list for product!")
else:
# Import Product UL from file:
ul_pool = self.pool.get('product.ul')
ul_pool.import_ul(cr, uid, file_name_package, context=context)
ul_ids = ul_pool.search(cr, uid, [], context=context)
# Cool but doesn't work in 2.6:
#codepackage_2_id = {item.code: item.id for item in ul_pool.browse(cr, uid, ul_ids, context=context)}
codepackage_2_id = {}
for item in ul_pool.browse(cr, uid, ul_ids, context=context):
codepackage_2_id[item.code] = item.id
# Get list of package with the ID (used in product-package populate operations)
for record in cursor:
try:
code = record['COLUMN_NAME'].strip() # no "NGD_"
if code[:4] != "NGD_":
continue # jump no field NGD_
code = code[4:]
pul_id = codepackage_2_id.get(code, False)
#pul_id = ul_pool.search(cr, uid, [
# ('code', '=', code)], context=context)
if not pul_id:
# codepackage_2_id[code] = pul_id[0]
#else:
_logger.error("UL code not found: '%s'" % (code))
except:
_logger.error(sys.exc_info())
# Start importation product-package:
_logger.info("Start import packages for product")
product_packaging_pool = self.pool.get("product.packaging")
product_pool = self.pool.get("product.product")
cursor = self.pool.get('micronaet.accounting').get_product_package(cr, uid, context=context)
if not cursor:
_logger.error("Unable to connect no importation of package list for product!")
for product_package in cursor: # loop on all product elements with package
product_code = product_package['CKY_ART'].strip()
product_ids = product_pool.search(cr, uid, [
('default_code','=',product_code)], context=context)
if not product_ids:
_logger.error("Product not found, code: '%s'" % (product_code))
continue # next record!
product_id = product_ids[0]
for key in codepackage_2_id.keys(): # loop on all elements/columns (package NGD_* *=code of package)
try:
code = "NGD_" + key
qty = product_package.get(code, 0.0) # Q. is the value of the fields NDG_code!
if qty > 0.0: # search if present and > 0
ul = codepackage_2_id.get(key,False)
if not ul:
_logger.error("UL: '%s' not found (used in product: '%s')"%(key, product_code,))
continue # next record (jump this)!
# search if package is yet present:
ul_ids = product_packaging_pool.search(cr, uid, [
('product_id','=',product_id),
('ul','=',ul),
]) #('code','=',key)
if ul_ids: # modify
res = product_packaging_pool.write(
cr, uid, ul_ids, {'qty': qty},
context=context)
else: # create
item_id = product_packaging_pool.create(cr, uid, {
'product_id': product_id,
'ul': ul,
'qty': qty,
}, context=context)
except:
_logger.error(sys.exc_info())
except:
_logger.error("Error import package during status importation!")
# Start syncro product state:
_logger.info("Start syncro product state")
cursor = self.pool.get('micronaet.accounting').get_product_quantity(cr, uid, 1, 9, context=context) # current year always 9
if not cursor:
_logger.error("Unable to connect no importation of product state quantity!")
# Verbose variables:
total = 0
records = 0
verbose_quantity = 100
# TODO Rewrite using base_mssql_accounting
try:
for record in cursor:
try:
records += 1
default_code = record['CKY_ART'].strip()
item_id = self.search(cr, uid, [('default_code','=',default_code)], context=context)
if item_id:
accounting_qty = (record['NQT_INV'] or 0.0) + (record['NQT_CAR'] or 0.0) - (record['NQT_SCAR'] or 0.0)
modify = self.write(cr, uid, item_id, {'accounting_qty': accounting_qty,}, context=context)
total+=1
if verbose and (records % verbose_quantity == 0): _logger.info("%s Record product state read [updated: %s]!"%(records, total))
except:
_logger.error("ETL MSSQL: Error update product state! [%s]"%(sys.exc_info()))
_logger.info("Importation product state terminated! [Records: %s Imported: %s]!"%(records, total))
except:
_logger.error(sys.exc_info())
return False
return True
# Fields functions:
def _function_linked_accounting_qty(self, cr, uid, ids, field, args, context=None):
""" Calculate total of sale order line for used for accounting store
"""
res = dict.fromkeys(ids, 0)
sol_pool = self.pool.get('sale.order.line')
sol_ids = sol_pool.search(cr, uid, [('product_id','in',ids),('use_accounting_qty','=',True)], context=context)
for line in sol_pool.browse(cr, uid, sol_ids, context=context):
try:
res[line.product_id.id] += line.product_uom_qty or 0.0
except:
pass # no error!
return res
_columns = {
'accounting_qty': fields.float('Account quantity', digits=(16, 3)),
'linked_accounting_qty': fields.function(_function_linked_accounting_qty, method=True, type='float', string='OC qty linked to store', store=False, multi=False),
'minimum_qty': fields.float('Minimum alert quantity', digits=(16, 3)),
'maximum_qty': fields.float('Maximum alert quantity', digits=(16, 3)),
'not_in_status': fields.boolean('Not in status', help='If checked in webkit report of status doesn\'t appear'),
#'to_produce': fields.boolean('To produce', help='If checked this product appear on list of os lines during creation of production orders'),
'is_pallet': fields.boolean('Is a pallet', help='The product is a pallet '),
'pallet_max_weight': fields.float('Pallet weight', digits=(16, 3), help='Max weight of the load on this pallet'),
}
_defaults = {
'accounting_qty': lambda *a: 0.0,
'minimum_qty': lambda *a: 0.0,
'not_in_status': lambda *a: False,
'is_pallet': lambda *a: False,
}
# ID function:
def get_partner_id(self, cr, uid, ref, context=None):
''' Get OpenERP ID for res.partner with passed accounting reference
'''
partner_id=self.pool.get("res.partner").search(cr, uid, ["|","|",('mexal_c','=',ref),('mexal_d','=',ref),('mexal_s','=',ref)], context=context)
return partner_id[0] if partner_id else False
def browse_partner_id(self, cr, uid, item_id, context=None):
''' Return browse obj for partner id
'''
browse_ids = self.pool.get('res.partner').browse(cr, uid, [item_id], context=context)
return browse_ids[0] if browse_ids else False
def browse_partner_ref(self, cr, uid, ref, context=None):
''' Get OpenERP ID for res.partner with passed accounting reference
'''
partner_id = self.pool.get("res.partner").search(cr, uid, ["|","|",('mexal_c','=',ref),('mexal_d','=',ref),('mexal_s','=',ref)], context=context)
return self.pool.get('res.partner').browse(cr, uid, partner_id[0], context=context) if partner_id else False
def get_product_id(self, cr, uid, ref, context=None):
''' Get OpenERP ID for product.product with passed accounting reference
'''
item_id = self.pool.get('product.product').search(cr, uid, [('default_code', '=', ref)], context=context)
return item_id[0] if item_id else False
def browse_product_id(self, cr, uid, item_id, context=None):
''' Return browse obj for product id
'''
browse_ids = self.pool.get('product.product').browse(cr, uid, [item_id], context=context)
return browse_ids[0] if browse_ids else False
def browse_product_ref(self, cr, uid, ref, context=None):
''' Return browse obj for product ref
Create a minimal product with code ref for not jump oc line creation
(after normal sync of product will update all the fields not present
'''
item_id = self.pool.get('product.product').search(cr, uid, [('default_code', '=', ref)], context=context)
if not item_id:
try:
uom_id = self.pool.get('product.uom').search(cr, uid, [('name', '=', 'kg')],context=context)
uom_id = uom_id[0] if uom_id else False
item_id=self.pool.get('product.product').create(cr,uid,{
'name': ref,
'name_template': ref,
'mexal_id': ref,
'default_code': ref,
'sale_ok': True,
'type': 'consu',
'standard_price': 0.0,
'list_price': 0.0,
'description_sale': ref, # preserve original name (not code + name)
'description': ref,
'uos_id': uom_id,
'uom_id': uom_id,
'uom_po_id': uom_id,
'supply_method': 'produce',
}, context=context)
except:
return False # error creating product
else:
item_id=item_id[0] # first
return self.pool.get('product.product').browse(cr, uid, item_id, context=context)
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| cherrygirl/micronaet7 | production_line/product.py | Python | agpl-3.0 | 16,351 |
import hashlib
import string
import random
class Security(object):
def __init__(self, dependencies):
self.config = dependencies.getConfiguration()
def hashPassword(self, password, secret=None):
if secret is None:
secret = self.config.password_secret
secretpw = secret + password
return hashlib.sha256(secretpw.encode('utf-8')).hexdigest()
def generateSecret(self):
chars = string.ascii_lowercase + string.ascii_uppercase + string.digits
charlist = [random.SystemRandom().choice(chars) for _ in range(24)]
return ''.join(charlist)
def generateSecrets(self, password):
secrets = {'ticket_secret': '', 'password_secret': '',
'hashed_password': ''}
if not password == '':
secrets['ticket_secret'] = self.generateSecret()
pwSecret = self.generateSecret()
secrets['password_secret'] = pwSecret
secrets['hashed_password'] = self.hashPassword(password, pwSecret)
return secrets
def generateShortUuid(self):
chars = string.ascii_lowercase
charlist = [random.SystemRandom().choice(chars) for _ in range(6)]
return ''.join(charlist)
| ilogue/scrolls | scrolls/security.py | Python | mit | 1,232 |
#!/usr/bin/env python
"""
master script to control different operations in training examples generating pipeline.
usage:
python experiment_run.py <YAML config> -h
sample yaml config file located at config/
requirement:
pygridtools for distributed computing
packages/modules depends on the operation
"""
import os
import sys
import yaml
try:
import libpyjobrunner as pg
except:
sys.stdout.write('warning: pygridtools are not available, distributed computing task will be disrupted\n')
from optparse import OptionParser
from signal_labels import experiment_details_db as expdb
assert sys.version_info[:2] >= ( 2, 4 )
def main():
"""
Managing the experiment run in different operation mode:
Options
-1 download_sra file from NCBI SRA service
-d decompose_sra decompress the SRA file
-a annotation fetch genome annotation file from public database servers mainly ensembl, phytozome
-g genome fetch genome sequence file from public database
manual cleaning of genome sequence and annotation
-2 genome_index create genome indices for STAR alignment
-i insert_size calculate the insert size based on the raw fastq files
-3 read_mapping aligning reads to the genome using STAR aligner
-m multi_map resolving the right place for read mapped in multiple location on the genome
-u uniq_read recover uniquely aligned reads from the star alignment
-4 trsk_pred transcript prediction using TranscriptSkimmer
-c cuff_pred transcript assembly by Cufflinks
-s stringtie_pred transcript assembly by StringTie
-5 filter_trsk applying filter to the trsk predicted gene models
-b filter_cuff applying filter to the cufflinks predicted gene models
-f filter_db applying filter to the online db genome annotations
-6 trsk_label generating labels for genomic signal based on the trsk feature annotation
-t cuff_label generating labels for genomic signal based on the cufflinks feature annotation
-p db_label generating labels for genomic signal based on online db annotations
"""
parser = OptionParser(usage='usage: %prog <YAML config> [required option]')
parser.add_option( "-1", "--download_sra", action="store_true", dest="download_sra", default=False, help="Download sra file based on run id from NCBI SRA/ENA repositories.")
parser.add_option( "-d", "--decompose_sra", action="store_true", dest="decompose_sra", default=False, help="Decompress the sra file according to the library type.")
parser.add_option( "-a", "--annotation", action="store_true", dest="annotation", default=False, help="Download genome annotation from public database resources.")
parser.add_option( "-g", "--genome", action="store_true", dest="genome", default=False, help="Download genome sequence from public database resources.")
parser.add_option( "-2", "--genome_index", action="store_true", dest="genome_index", default=False, help="Create STAR genome index based on genome sequence and annotations." )
parser.add_option( "-i", "--insert_size", action="store_true", dest="insert_size", default=False, help="Calculate the library insert size from fastq files.")
parser.add_option( "-3", "--read_mapping", action="store_true", dest="read_mapping", default=False, help="RNASeq read mapping to genome using STAR aligner." )
parser.add_option( "-m", "--multi_map", action="store_true", dest="multi_map", default=False, help="MMR on aligned reads to resolve multimapping of reads." )
parser.add_option( "-u", "--uniq_read", action="store_true", dest="uniq_read", default=False, help="Fetching uniquely mapped reads from bam file." )
parser.add_option( "-4", "--trsk_pred", action="store_true", dest="trsk_pred", default=False, help="Transcript prediction using TranscriptSkimmer." )
parser.add_option( "-c", "--cuff_pred", action="store_true", dest="cuff_pred", default=False, help="Transcript assembly using Cufflinks." )
parser.add_option( "-s", "--stringtie_pred", action="store_true", dest="stringtie_pred", default=False, help="Transcript assembly using StringTie." )
parser.add_option( "-5", "--filter_trsk", action="store_true", dest="filter_trsk", default=False, help="Apply filters to trsk predicted gene models." )
parser.add_option( "-b", "--filter_cuff", action="store_true", dest="filter_cuff", default=False, help="Apply filter to the cufflinks predicted gene models." )
parser.add_option( "-f", "--filter_db", action="store_true", dest="filter_db", default=False, help="Apply filter to the online db annotation gene models." )
parser.add_option( "-6", "--trsk_label", action="store_true", dest="trsk_label", default=False, help="Fetch label sequences from TranscriptSkimmer annotations." )
parser.add_option( "-t", "--cuff_label", action="store_true", dest="cuff_label", default=False, help="Fetch label sequences from cufflinks annotations." )
parser.add_option( "-p", "--db_label", action="store_true", dest="db_label", default=False, help="Fetch label sequences from public online db annotation files." )
( options, args ) = parser.parse_args()
try:
config_file = args[0]
except:
exit(__doc__)
if not (options.download_sra ^ options.decompose_sra ^ options.annotation ^ \
options.genome ^ options.genome_index ^ options.insert_size ^ \
options.read_mapping ^ options.multi_map ^ options.uniq_read ^ \
options.trsk_pred ^ options.cuff_pred ^ options.filter_trsk ^ \
options.trsk_label ^ options.filter_cuff ^ options.filter_db ^ \
options.cuff_label ^ options.db_label ^ options.stringtie_pred):
parser.print_help()
sys.exit(-1)
print('Using config file %s for the experiment.' % config_file)
if options.download_sra:
print 'Operation selected: Download sequencing reads file from ncbi-sra'
download_sra_data(config_file)
elif options.decompose_sra:
print 'Operation selected: Decompress sra file'
decompose_sra_file(config_file)
elif options.annotation:
print 'Operation selected: Downloading genome annotation file'
download_gtf(config_file)
elif options.genome:
print 'Operation selected: Downloading genome sequence file'
download_fasta(config_file)
elif options.genome_index:
print 'Operation selected: Create STAR genome index'
create_genome_index(config_file)
elif options.insert_size:
print 'Operation selected: Calculate the library insert size from sequencing \
read files'
calculate_insert_size(config_file)
elif options.read_mapping:
print 'Operation selected: Read alignment with STAR'
align_rnaseq_reads(config_file)
elif options.multi_map:
print 'Operation selected: Multiple read mapper resolution with MMR'
alignment_filter(config_file)
elif options.uniq_read:
print 'Operation selected: Find uniquely mapped reads from star alignment'
find_uniq_reads(config_file)
elif options.trsk_pred:
print 'Operation selected: Transcript prediction based on mapped RNASeq read \
data with TranscriptSkimmer'
transcript_prediction_trsk(config_file)
elif options.cuff_pred:
print 'Operation selected: Transcript assembly based on mapped RNASeq read data \
with Cufflinks'
transcript_prediction_cuff(config_file)
elif options.stringtie_pred:
print 'Operation selected: Transcript assembly based on mapped RNASeq read data \
with StringTie'
transcript_prediction_stringtie(config_file)
elif options.filter_trsk:
print 'Operation selected: Filter out gene models from TranscriptSkimmer \
predictions - criteria: splice-site consensus and length of the ORF.'
filter_genes(config_file, "trsk")
elif options.filter_cuff:
print 'Operation selected: Filter out gene models from cufflinks predictions - \
criteria: splice-site consensus, length of the ORF and read coverage to the \
region.'
filter_genes(config_file, "cufflinks")
elif options.filter_db:
print 'Operation selected: Filter out gene models from public database - criteria: \
splice-site consensus and length of the ORF'
filter_genes(config_file, "onlinedb")
elif options.trsk_label:
print 'Operation selected: Extract different genomic signal label sequences from \
TranscriptSkimmer.'
fetch_db_signals(config_file, "trsk")
elif options.cuff_label:
print 'Operation selected: Extract different genomic signal label sequences from \
cufflinks.'
fetch_db_signals(config_file, "cufflinks")
elif options.db_label:
print 'Operation selected: Extract different genomic signal label sequences from \
online database files.'
fetch_db_signals(config_file, "onlinedb")
def call_fetch_db_signals(args_list):
"""
wrapper for submitting jobs to pygrid
"""
from signal_labels import generate_genome_seq_labels as fetch_labels
fasta_file, gff_file, signal_type, count, poslabels_cnt, neglabels_cnt, flank_nts, out_dir = args_list
os.chdir(out_dir)
fetch_labels.main(fasta_file, gff_file, signal_type, count, poslabels_cnt, neglabels_cnt, flank_nts)
return "done"
def fetch_db_signals(yaml_config, data_method):
"""
get the genomic signal labels bases on the annotation from external database
"""
operation_seleted = "6"
orgdb = expdb.experiment_db(yaml_config, operation_seleted)
Jobs = []
for org_name, det in orgdb.items():
if data_method == "trsk":
gff_file = "%s/%s_trsk_genes.gff" % (det['read_assembly_dir'], org_name)
out_dir = "%s/trsk_4K_labels" % det['labels_dir']## new label sequence dir
elif data_method == "cufflinks":
gff_file = "%s/%s_cufflinks_genes.gff" % (det['read_assembly_dir'], org_name)
out_dir = "%s/cuff_4K_labels" % det['labels_dir']
elif data_method == "onlinedb":
gff_file = "%s/%s_%s.gff" % (det['read_assembly_dir'], org_name, det['genome_release_db']) ## db_anno
out_dir = "%s/jmlr_1K_sm_labels" % det['labels_dir']
if not os.path.isfile(gff_file):## check the file present or not
exit("error: genome annotation file missing %s" % gff_file)
if not os.path.exists(out_dir): ## create the new label sequence dir
os.makedirs(out_dir)
for the_file in os.listdir(out_dir): ## cleaning the existing one
file_path = os.path.join(out_dir, the_file)
try:
if os.path.isfile(file_path):
os.unlink(file_path)
except Exception, e:
print e
#import subprocess
## get the label count for each organisms, essentially the max number of genes available
#cmd = "grep -P \"\tgene\t\" %s | wc -l" % gff_file
#proc = subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
#count, err = proc.communicate()
#count = int(count.strip())
## depends on the genomic signal type
count = 5000
signal_type = "tss"
poslabels_cnt = 1000
neglabels_cnt = 3000
flank_nts = 1200
## arguments to pygrid
arg = [[det['fasta'], gff_file, signal_type, count, poslabels_cnt, neglabels_cnt, flank_nts, out_dir]]
job = pg.cBioJob(call_fetch_db_signals, arg)
## native specifications
job.mem="5gb"
job.vmem="5gb"
job.pmem="5gb"
job.pvmem="5gb"
job.nodes = 1
job.ppn = 1
job.walltime = "1:00:00"
Jobs.append(job)
print
print "sending genomic signal fetch jobs to worker"
print
processedJobs = pg.process_jobs(Jobs)
def call_filter_genes(args_list):
"""
wrapper for submitting jobs to pygrid
"""
from rnaseq_align_assembly import refine_transcript_models as filter_tool
gtf_file, fasta_file, result_file = args_list
filter_tool.filter_gene_models(gtf_file, fasta_file, result_file)
return "done"
def filter_genes(yaml_config, data_method):
"""
filter out invalid gene models from the provided genome annotation
"""
operation_seleted = "f"
orgdb = expdb.experiment_db(yaml_config, operation_seleted)
Jobs = []
for org_name, det in orgdb.items():
if data_method == "cufflinks":
gff_file = "%s/transcripts.gtf" % det['read_assembly_dir'] ## cufflinks run output file
outFile = "%s/%s_cufflinks_genes.gff" % (det['read_assembly_dir'], org_name) ## example: A_thaliana_cufflinks_genes.gff
elif data_method == "trsk":
gff_file = "%s/tmp_trsk_genes.gff" % det['read_assembly_dir'] ## trsk run output file
outFile = "%s/%s_trsk_genes.gff" % (det['read_assembly_dir'], org_name) ## example: A_thaliana_trsk_genes.gff
else:
gff_file = det['gtf'] ## public database genome annotation file
outFile = "%s/%s_%s.gff" % (det['read_assembly_dir'], org_name, det['genome_release_db']) ## example: A_thaliana_arabidopsis-tair10.gff
## arguments to pygrid
arg = [[gff_file, det['fasta'], outFile]]
job = pg.cBioJob(call_filter_genes, arg)
## native specifications
job.mem="6gb"
job.vmem="6gb"
job.pmem="6gb"
job.pvmem="6gb"
job.nodes = 1
job.ppn = 1
job.walltime = "2:00:00"
Jobs.append(job)
print
print "sending filter gene models jobs to worker"
print
processedJobs = pg.process_jobs(Jobs)
def call_transcript_prediction_cuff(args_list):
"""
wrapper for submitting jobs to pygrid
"""
from rnaseq_align_assembly import transcript_assembly as trassembly
org_db, num_threads = args_list
trassembly.run_cufflinks(org_db, num_threads)
return "done"
def transcript_prediction_cuff(yaml_config):
"""
transcript prediction using cufflinks
"""
operation_seleted = "c"
orgdb = expdb.experiment_db(yaml_config, operation_seleted)
Jobs = []
for org_name, det in orgdb.items():
## arguments to pygrid
arg = [[det, 4]]
job = pg.cBioJob(call_transcript_prediction_cuff, arg)
## native specifications
job.mem="96gb"
job.vmem="96gb"
job.pmem="24gb"
job.pvmem="24gb"
job.nodes = 1
job.ppn = 4
job.walltime = "32:00:00"
Jobs.append(job)
print
print "sending transcript assembly cufflinks jobs to worker"
print
processedJobs = pg.process_jobs(Jobs)
def call_transcript_prediction_trsk(args_list):
"""
wrapper for submitting jobs to pygrid
"""
from rnaseq_align_assembly import transcript_assembly as trassembly
org_db = args_list
trassembly.run_trsk(org_db)
return "done"
def transcript_prediction_trsk(yaml_config):
"""
transcript prediction using TranscriptSkimmer
"""
operation_seleted = "4"
orgdb = expdb.experiment_db(yaml_config, operation_seleted)
Jobs = []
for org_name, det in orgdb.items():
## arguments to pygrid
arg = [det]
job = pg.cBioJob(call_transcript_prediction_trsk, arg)
## native specifications
job.mem="32gb"
job.vmem="32gb"
job.pmem="32gb"
job.pvmem="32gb"
job.nodes = 1
job.ppn = 1
job.walltime = "9:00:00"
Jobs.append(job)
print
print "sending transcript assembly trsk jobs to worker"
print
local = True ## cluster compute switch
processedJobs = pg.process_jobs(Jobs, local=local)
def call_transcript_prediction_stringtie(args_list):
"""
wrapper for submitting jobs to pygrid
"""
from rnaseq_align_assembly import transcript_assembly as tsa
mmr_read_dir, organism_name, trans_pred_out_dir = args_list
tsa.run_stringtie(organism_name, mmr_read_dir, trans_pred_out_dir)
return "done"
def transcript_prediction_stringtie(yaml_config):
"""
transcript prediction using StringTie
"""
operation_seleted = "5"
orgdb = expdb.experiment_db(yaml_config, operation_seleted)
Jobs = []
for org_name, det in orgdb.items():
## arguments to pygrid
arg = [[det["read_map_dir"], det["short_name"], det["read_assembly_dir"]]]
job = pg.cBioJob(call_transcript_prediction_stringtie, arg)
cpus = 1
## native specifications
job.mem="12gb"
job.vmem="12gb"
job.pmem="12gb"
job.pvmem="12gb"
job.nodes = 1
job.ppn = cpus
job.walltime = "24:00:00"
Jobs.append(job)
print("\nsending transcript assembly stringtie jobs to worker\n")
local_compute = False ## switching between local multithreading and cluster computing
processedJobs = pg.process_jobs(Jobs, local=local_compute)
def find_uniq_reads(yaml_config):
"""
find uniquely mapped reads from a bam file
"""
operation_seleted = "u"
orgdb = expdb.experiment_db(yaml_config, operation_seleted)
print "NOT YET IMPLEMENTED."
sys.exit(0)
def call_alignment_filter(args_list):
"""
wrapper for submitting jobs to pygrid
"""
from rnaseq_align_assembly import star_align_rna as filter
org_name, out_dir, num_cpus = args_list
filter.run_mmr(org_name, out_dir, num_cpus)
return "done"
def alignment_filter(yaml_config):
"""
run multimapper resolution program
"""
operation_seleted = "m"
orgdb = expdb.experiment_db(yaml_config, operation_seleted)
Jobs = []
for org_name, det in orgdb.items():
num_cpus = 5
## arguments to pygrid
arg = [[det['short_name'], det['read_map_dir'], num_cpus]]
job = pg.cBioJob(call_alignment_filter, arg)
## native specifications
job.pmem="90gb"
job.pvmem="90gb"
job.mem="90gb"
job.vmem="90gb"
job.nodes = 1
job.ppn = num_cpus
job.walltime = "48:00:00"
Jobs.append(job)
print
print "sending multi map resolution jobs to worker"
print
processedJobs = pg.process_jobs(Jobs)
def call_align_reads(args_list):
"""
wrapper for submitting jobs to pygrid
"""
from rnaseq_align_assembly import star_align_rna as rnastar
org_db, read_type, max_mates_gap_length, num_cpus = args_list
rnastar.run_star_alignment(org_db, read_type, max_mates_gap_length, num_cpus)
return 'done'
def align_rnaseq_reads(yaml_config):
"""
wrapper for aligning rnaseq reads using
"""
operation_seleted = "3"
orgdb = expdb.experiment_db(yaml_config, operation_seleted)
Jobs = []
for org_name, det in orgdb.items():
## arguments to pygrid
lib_type = 'PE'
lib_type = 'SE' if len(det['fastq'])==1 else lib_type
## library insert size
lib_insert_size = 100000
num_cpu = 3
arg = [[det, lib_type, lib_insert_size, num_cpu]]
job = pg.cBioJob(call_align_reads, arg)
job.mem="90gb"
job.vmem="90gb"
job.pmem="30gb"
job.pvmem="30gb"
job.nodes = 1
job.ppn = num_cpu
job.walltime = "48:00:00"
Jobs.append(job)
print
print "sending read alignment with STAR jobs to worker"
print
processedJobs = pg.process_jobs(Jobs, local=False)
def calculate_insert_size(yaml_config):
"""
wrapper for calling calculate insert size function
"""
operation_seleted = "i"
orgdb = expdb.experiment_db(yaml_config, operation_seleted)
print "NOT YET IMPLEMENTED."
sys.exit(0)
def call_genome_index(args_list):
"""
wrapper for submitting jobs to pygrid
"""
from fetch_remote_data import prepare_data as ppd
fasta_file, out_dir, genome_anno, num_workers, onematelength = args_list
ppd.create_star_genome_index(fasta_file, out_dir, genome_anno, num_workers, onematelength)
return 'done'
def create_genome_index(yaml_config):
"""
wrapper for calling genome index function
"""
operation_seleted = "2"
orgdb = expdb.experiment_db(yaml_config, operation_seleted)
Jobs = []
for org_name, det in orgdb.items():
## arguments to pygrid
num_cpus = 4
arg = [[det['fasta'], det['genome_index_dir'], det['gtf'], num_cpus, det['read_length']-1]]
job = pg.cBioJob(call_genome_index, arg)
job.mem="46gb"
job.vmem="46gb"
job.pmem="46gb"
job.pvmem="46gb"
job.nodes = 1
job.ppn = num_cpus
job.walltime = "24:00:00"
Jobs.append(job)
print
print "sending star genome index jobs to worker"
print
processedJobs = pg.process_jobs(Jobs)
def call_download_sra_file(args_list):
"""
wrapper for submitting jobs to pygrid
"""
from fetch_remote_data import download_data as dld
sra_run_id, out_dir = args_list
dld.download_sra_file(sra_run_id, out_dir)
return 'done'
def download_sra_data(yaml_config):
"""
download sra file for the working organism
"""
operation_seleted = "1"
orgdb = expdb.experiment_db(yaml_config, operation_seleted)
Jobs = []
for org_name, det in orgdb.items():
## arguments to pygrid
arg = [[det['sra_run_id'], det['fastq_path']]]
job = pg.cBioJob(call_download_sra_file, arg)
job.mem="2gb"
job.vmem="2gb"
job.pmem="2gb"
job.pvmem="2gb"
job.nodes = 1
job.ppn = 1
job.walltime = "1:00:00"
Jobs.append(job)
print
print "sending download SRA file jobs to worker"
print
processedJobs = pg.process_jobs(Jobs)
def call_decompose_sra_file(args_list):
"""
wrapper for submitting jobs to pygrid
"""
from fetch_remote_data import download_data as dld
sra_file, out_dir = args_list
dld.decompress_sra_file(sra_file, out_dir)
return 'done'
def decompose_sra_file(yaml_config):
"""
decompress the .sra file from ncbi sra
"""
operation_seleted = "d"
orgdb = expdb.experiment_db(yaml_config, operation_seleted)
Jobs = []
for org_name, det in orgdb.items():
sra_file = "%s/%s.sra" % (det['fastq_path'], det['sra_run_id'])
if not os.path.isfile(sra_file):## check the file present or not
print "error: missing sequencing read file %s" % sra_file
sys.exit(0)
## TODO can be consider to the yaml file options
#library_type = "pe"
library_type = "pe"
compress_format = "gzip"
## arguments to pygrid
arg = [[sra_file, det['fastq_path']]]
job = pg.cBioJob(call_decompose_sra_file, arg)
job.mem="6gb"
job.vmem="6gb"
job.pmem="6gb"
job.pvmem="6gb"
job.nodes = 1
job.ppn = 1
job.walltime = "24:00:00"
Jobs.append(job)
print
print "sending decompress SRA file jobs to worker"
print
processedJobs = pg.process_jobs(Jobs)
def call_protists_fasta(args_list):
"""
wrapper for submitting jobs to pygrid
"""
from fetch_remote_data import download_data as dld
release_num, organism, genome_path = args_list
dld.fetch_ensembl_protists_fasta(release_num, organism, genome_path)
return 'done'
def call_fungi_fasta(args_list):
"""
wrapper for submitting jobs to pygrid
"""
from fetch_remote_data import download_data as dld
release_num, organism, genome_path = args_list
dld.fetch_ensembl_fungi_fasta(release_num, organism, genome_path)
return 'done'
def call_metazoa_fasta(args_list):
"""
wrapper for submitting jobs to pygrid
"""
from fetch_remote_data import download_data as dld
release_num, organism, genome_path = args_list
dld.fetch_ensembl_metazoa_fasta(release_num, organism, genome_path)
return 'done'
def shorten_org_name(org_name):
"""
assume full name and shorten, e.g.:
populus_trichocarpa --> Ptrichocarpa
"""
tokens = org_name.strip().split("_")
left, right = tokens[0], tokens[-1]
short_name = left[0].upper() + right.lower()
return short_name
def call_phytozome_fasta(args_list):
"""
wrapper for submitting jobs to pygrid
"""
from fetch_remote_data import download_data as dld
release_num, organism, genome_path = args_list
org_name = shorten_org_name(organism)
dld.fetch_phytozome_fasta(release_num, org_name, genome_path)
return 'done'
def call_ensembl_fasta(args_list):
"""
wrapper for submitting jobs to pygrid
"""
from fetch_remote_data import download_data as dld
release_num, organism, genome_path = args_list
dld.fetch_ensembl_fasta(release_num, organism, genome_path)
return 'done'
def download_fasta(yaml_config):
"""
download fasta file from remote data publishing services
"""
operation_seleted = "g"
orgdb = expdb.experiment_db(yaml_config, operation_seleted)
Jobs = []
for org_name, det in orgdb.items():
## arguments to pygrid
arg = [[det['release_nb'], det['long_name'], det['genome_dir']]]
if det['release_db'] == 'ensembl_metazoa_genome':
job = pg.cBioJob(call_metazoa_fasta, arg)
elif det['release_db'] == 'phytozome_genome':
job = pg.cBioJob(call_phytozome_fasta, arg)
elif det['release_db'] == 'ensembl_genome':
job = pg.cBioJob(call_ensembl_fasta, arg)
elif det['release_db'] == 'ensembl_fungi_genome':
job = pg.cBioJob(call_fungi_fasta, arg)
elif det['release_db'] == 'ensembl_protists_genome':
job = pg.cBioJob(call_protists_fasta, arg)
else:
exit("error: download fasta plugin for %s not available, module works with ensembl_genome, ensembl_metazoa_genome and phytozome_genome servers." % det['release_db'])
job.mem="2gb"
job.vmem="2gb"
job.pmem="2gb"
job.pvmem="2gb"
job.nodes = 1
job.ppn = 1
job.walltime = "2:00:00"
Jobs.append(job)
print
print "sending fasta download job to worker"
print
local_compute = True
processedJobs = pg.process_jobs(Jobs, local=local_compute)
def call_fungi_gtf(args_list):
"""
wrapper for submitting jobs to pygrid
"""
from fetch_remote_data import download_data as dld
release_num, organism, genome_path = args_list
dld.fetch_ensembl_fungi_gtf(release_num, organism, genome_path)
return 'done'
def call_metazoa_gtf(args_list):
"""
wrapper for submitting jobs to pygrid
"""
from fetch_remote_data import download_data as dld
release_num, organism, genome_path = args_list
dld.fetch_ensembl_metazoa_gtf(release_num, organism, genome_path)
return 'done'
def call_protists_gtf(args_list):
"""
wrapper for submitting jobs to pygrid
"""
from fetch_remote_data import download_data as dld
release_num, organism, genome_path = args_list
dld.fetch_ensembl_protists_gtf(release_num, organism, genome_path)
return 'done'
def call_phytozome_gtf(args_list):
"""
wrapper for submitting jobs to pygrid
"""
from fetch_remote_data import download_data as dld
release_num, organism, genome_path = args_list
short_name = shorten_org_name(organism)
dld.fetch_phytozome_gff(release_num, short_name, genome_path)
return 'done'
def call_ensembl_gtf(args_list):
"""
wrapper for submitting jobs to pygrid
"""
from fetch_remote_data import download_data as dld
release_num, organism, genome_path = args_list
dld.fetch_ensembl_gtf(release_num, organism, genome_path)
return 'done'
def download_gtf(yaml_config):
"""
download gtf/gff file from remote data publishing services
"""
operation_seleted = "a"
orgdb = expdb.experiment_db(yaml_config, operation_seleted)
Jobs = []
for org_name, det in orgdb.items():
## arguments to pygrid
arg = [[det['release_nb'], det['long_name'], det['genome_dir']]]
if det['release_db'] == 'ensembl_metazoa_genome':
job = pg.cBioJob(call_metazoa_gtf, arg)
elif det['release_db'] == 'phytozome_genome':
job = pg.cBioJob(call_phytozome_gtf, arg)
elif det['release_db'] == 'ensembl_genome':
job = pg.cBioJob(call_ensembl_gtf, arg)
elif det['release_db'] == 'ensembl_fungi_genome':
job = pg.cBioJob(call_fungi_gtf, arg)
elif det['release_db'] == 'ensembl_protists_genome':
job = pg.cBioJob(call_protists_gtf, arg)
else:
exit("error: download gtf plugin for %s not available, module works with ensembl_genome, ensembl_metazoa_genome and phytozome_genome servers." % det['release_db'])
job.mem="2gb"
job.vmem="2gb"
job.pmem="2gb"
job.pvmem="2gb"
job.nodes = 1
job.ppn = 1
job.walltime = "2:00:00"
Jobs.append(job)
print
print "sending gtf download job to worker"
print
local_compute = True
processedJobs = pg.process_jobs(Jobs, local=local_compute)
if __name__=="__main__":
main()
| vipints/genomeutils | experiment_run.py | Python | bsd-3-clause | 29,698 |
from .admin_dev_i18n import *
from .update_to_db import * | vinoth3v/In | In/stringer/admin/page/__init__.py | Python | apache-2.0 | 58 |
# Copyright 2010-2012 RethinkDB, all rights reserved.
import os
import socket
import time
import socket
# pythonic discriminated union I guess, this is kind of retarded.
# actually 0 need for a base class it's really more like a comment
# that happens to be runable code
class RunError(Exception):
def __init__(self, str):
self.str = str
def __str__(self):
return repr(self.str)
def ensure_socket(host, port):
start_time = time.time()
success = False
while (time.time() - start_time < 5 * 60): # give up after some number of seconds
try:
s = socket.create_connection((host, port))
success = True
break
except:
time.sleep(20)
pass
if not success:
raise RunError("Failed to create a connection.")
return s
class Refspec():
pass
class Tag(Refspec):
def __init__(self, tagname):
self.val = tagname
class Branch(Refspec):
def __init__(self, branchname):
self.val = branchname
def remove_local(string):
if (string[len(string) - len('.local'):] == '.local'):
return string[:len(string) - len('.local')]
else:
return string
def rpm_install(path):
return "rpm -i %s" % path
def rpm_get_binary(path):
return "rpm -qpil %s | grep /usr/bin" % path
def rpm_uninstall(cmd_name):
return "which %s | xargs readlink -f | xargs rpm -qf | xargs rpm -e" % cmd_name
def deb_install(path):
return "dpkg -i %s" % path
def deb_get_binary(path):
return "dpkg -c %s | grep /usr/bin/rethinkdb-.* | sed 's/^.*\(\/usr.*\)$/\\1/'" % path
def deb_uninstall(cmd_name):
return "which %s | xargs readlink -f | xargs dpkg -S | sed 's/^\(.*\):.*$/\\1/' | xargs dpkg -r" % cmd_name
class VM():
def __init__(self, uuid, hostname, username = 'rethinkdb', rootname = 'root', vbox_username = 'rethinkdb', vbox_hostname = 'deadshot', startup = True):
self.uuid = uuid
self.hostname = hostname
self.username = username
self.rootname = rootname
self.vbox_username = vbox_username
self.vbox_hostname = vbox_hostname
if (startup):
os.system("ssh %s@%s VBoxManage startvm %s --type headless" % (self.vbox_username, self.vbox_hostname, self.uuid))
start_time = time.time()
while (self.command("true") != 0) and time.time() - start_time < 5 * 60: # give up after some number of seconds
time.sleep(3)
if self.command("true") != 0:
raise RunError("Failed to connect to Virtual Machine %s." % uuid)
def __del__(self):
os.system("ssh %s@%s VBoxManage controlvm %s poweroff" % (self.vbox_username, self.vbox_hostname, self.uuid))
def command(self, cmd_str, root = False, bg = False):
str = "ssh -o ConnectTimeout=1000 %s@%s \"%s\"" % ((self.rootname if root else self.username), self.hostname, (cmd_str + ("&" if bg else ""))) + ("&" if bg else "")
print str
return os.system(str)
# send a file into the tmp directory of the vm
def copy_to_tmp(self, path):
str = "scp %s %s@%s:/tmp/" % (path, self.username, self.hostname)
assert(os.system(str) == 0)
def popen(self, cmd_str, mode):
#print cmd_str
return os.popen("ssh %s@%s \"%s\"" % (self.username, self.hostname, cmd_str), mode)
class target():
def __init__(self, build_uuid, build_hostname, username, build_cl, res_ext, install_cl_f, uninstall_cl_f, get_binary_f, vbox_username, vbox_hostname):
self.build_uuid = build_uuid
self.build_hostname = build_hostname
self.username = username
self.build_cl = build_cl
self.res_ext = res_ext
self.install_cl_f = install_cl_f # path -> install cmd
self.uninstall_cl_f = uninstall_cl_f
self.get_binary_f = get_binary_f
self.vbox_username = vbox_username # username and hostname for running VirtualBox through ssh
self.vbox_hostname = vbox_hostname
def start_vm(self):
return VM(self.build_uuid, self.build_hostname, self.username, vbox_username=self.vbox_username, vbox_hostname=self.vbox_hostname) # startup = True
def get_vm(self):
return VM(self.build_uuid, self.build_hostname, self.username, vbox_username=self.vbox_username, vbox_hostname=self.vbox_hostname, startup=False)
def interact(self, short_name):
build_vm = self.start_vm()
print "%s is now accessible via ssh at %s@%s" % (short_name, self.username, self.build_hostname)
print "Leave this process running in the background and when you're done interrupt it to clean up the virtual machine."
while True:
time.sleep(1)
def run(self, refspec, short_name):
def purge_installed_packages():
old_binaries_raw = build_vm.popen("ls /usr/bin/rethinkdb*", "r").readlines()
old_binaries = map(lambda x: x.strip('\n'), old_binaries_raw)
print "Binaries scheduled for removal: ", old_binaries
for old_binary in old_binaries:
build_vm.command(self.uninstall_cl_f(old_binary), True)
if (not os.path.exists("Built_Packages")):
os.mkdir("Built_Packages")
build_vm = self.start_vm()
def run_checked(cmd, root = False, bg = False):
res = build_vm.command(cmd, root, bg)
if res != 0:
raise RunError(cmd + " returned on %d exit." % res)
def run_unchecked(cmd, root = False, bg = False):
res = build_vm.command(cmd, root, bg)
if isinstance(refspec, Tag):
run_checked("cd rethinkdb && git fetch && git fetch origin tag %s && git checkout -f %s" % (refspec.val, refspec.val))
elif isinstance(refspec, Branch):
run_checked("cd rethinkdb && git fetch && git checkout -f %s && git pull" % refspec.val)
else:
raise RunError("Invalid refspec type, must be branch or tag.")
run_checked("cd rethinkdb/src &&" + self.build_cl)
dir = build_vm.popen("pwd", 'r').readline().strip('\n')
p = build_vm.popen("find rethinkdb/build/packages -regex .*\\\\\\\\.%s" % self.res_ext, 'r')
raw = p.readlines()
res_paths = map((lambda x: os.path.join(dir, x.strip('\n'))), raw)
print res_paths
dest = os.path.abspath("Built_Packages")
for path in res_paths:
purge_installed_packages()
if (not os.path.exists(os.path.join(dest, short_name))):
os.mkdir(os.path.join(dest, short_name))
# install antiquated packages here
# if os.path.exists('old_versions'):
# for old_version in os.listdir('old_versions'):
# pkg = os.listdir(os.path.join('old_versions', old_version, short_name))[0]
# build_vm.copy_to_tmp(os.path.join('old_versions', old_version, short_name, pkg))
# run_checked(self.install_cl_f(os.path.join('/tmp', pkg)), True)
# print "Installed: ", old_version
# install current versions
target_binary_name = build_vm.popen(self.get_binary_f(path), "r").readlines()[0].strip('\n')
print "Target binary name: ", target_binary_name
run_checked(self.install_cl_f(path), True)
# run smoke test
run_unchecked("rm -r test_data")
run_checked("rethinkdb --cluster-port 11211 --directory test_data", bg = True)
print "Starting tests..."
s = ensure_socket(build_vm.hostname, 11213)
from smoke_install_test import test_against
if (not test_against(build_vm.hostname, 11213)):
raise RunError("Tests failed")
s.send("rethinkdb shutdown\r\n")
scp_string = "scp %s@%s:%s %s" % (self.username, self.build_hostname, path, os.path.join(dest, short_name))
print scp_string
os.system(scp_string)
# the code below is not updated
# find legacy binaries
# leg_binaries_raw = build_vm.popen("ls /usr/bin/rethinkdb*", "r").readlines()
# leg_binaries = map(lambda x: x.strip('\n'), leg_binaries_raw)
# leg_binaries.remove('/usr/bin/rethinkdb') #remove the symbolic link
# leg_binaries.remove(target_binary_name)
# for leg_binary in leg_binaries:
# print "Testing migration %s --> %s..." % (leg_binary, target_binary_name)
# file_name = leg_binary.replace('/', '_').replace('-','_').replace('.', '_')
# # create the old data
# run_unchecked("rm %s_1 %s_2" % (file_name, file_name))
# run_checked("%s -p 11211 -f %s_1 -f %s_2" % (leg_binary, file_name, file_name), bg = True)
# s = ensure_socket(build_vm.hostname, 11211)
# from smoke_install_test import throw_migration_data
# throw_migration_data(build_vm.hostname, 11211)
# s.send("rethinkdb shutdown\r\n")
# # run migration
# run_unchecked("rm %s_mig_1 %s_mig_2 %s_intermediate" % ((file_name, ) * 3))
# run_checked("%s migrate --in -f %s_1 -f %s_2 --out -f %s_mig_1 -f %s_mig_2 --intermediate %s_intermediate" % ((target_binary_name,) + ((file_name,) * 5)))
# # check to see if the data is there
# run_checked("%s -p 11211 -f %s_mig_1 -f %s_mig_2" % (target_binary_name, file_name, file_name), bg = True)
# s = ensure_socket(build_vm.hostname, 11211)
# from smoke_install_test import check_migration_data
# check_migration_data(build_vm.hostname, 11211)
# s.send("rethinkdb shutdown\r\n")
# print "Done"
purge_installed_packages()
# clean up is used to just shutdown the machine, kind of a hack but oh well
def clean_up(self):
build_vm = get_vm()
return # this calls the build_vms __del__ method which shutsdown the machine
def build(targets):
os.mkdir("Built_Packages")
map((lambda x: x.run()), targets)
| KSanthanam/rethinkdb | scripts/VirtuaBuild/vm_build.py | Python | agpl-3.0 | 10,209 |
#!/usr/bin/env python
# -*- encoding: utf-8 -*-
"""
firebat-manager.test.models
~~~~~~~~~~~~~~~~~~~~~~~~~~~
Objects mapping for blueprint
"""
from datetime import datetime
from sqlalchemy import *
from .. import db
class Status(db.Model):
__tablename__ = 'status'
id = Column(Integer, primary_key=True)
name = Column(String(), unique=True)
def __init__(self, name=None):
self.name = name
def __repr__(self):
return '<Status %r>' % (self.name)
class Test(db.Model):
__tablename__ = 'test'
id = Column(Integer, primary_key=True)
celery_task_id = Column(String)
celery_out = Column(String)
status_id = Column(Integer, ForeignKey('status.id'))
added_at = Column(DateTime)
def __init__(self, id=None, name=None, status_id=None):
self.id = id
self.name = name
self.status_id = status_id
self.added_at = datetime.utcnow()
def __repr__(self):
return '<Test %r>' % (self.id)
| greggyNapalm/firebat-manager | firemanager/test/models.py | Python | bsd-3-clause | 986 |
from firehose.model import Issue, Message, File, Location, Point
import re
LINE_EXPR = re.compile(r"(?P<path>.*): (?P<severity>.*): (?P<msg>.*)")
def parse_desktop_file_validate(lines):
for line in lines:
info = LINE_EXPR.match(line).groupdict()
path = info['path']
message = info['msg']
severity = info['severity']
yield Issue(cwe=None,
testid=None,
location=Location(
file=File(path, None),
function=None,
point=None),
severity=severity,
message=Message(text=message),
notes=None,
trace=None)
| paultag/ethel | ethel/wrappers/desktop_file_validate.py | Python | mit | 735 |
#!/usr/bin/python
# Compresses the core Blockly files into a single JavaScript file.
#
# Copyright 2012 Google Inc.
# http://blockly.googlecode.com/
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# This script generates two versions of Blockly's core files:
# blockly_compressed.js
# blockly_uncompressed.js
# The compressed file is a concatenation of all of Blockly's core files which
# have been run through Google's Closure Compiler. This is done using the
# online API (which takes a few seconds and requires an Internet connection).
# The uncompressed file is a script that loads in each of Blockly's core files
# one by one. This takes much longer for a browser to load, but is useful
# when debugging code since line numbers are meaningful and variables haven't
# been renamed. The uncompressed file also allows for a faster developement
# cycle since there is no need to rebuild or recompile, just reload.
#
# This script also generates:
# blocks_compressed.js: The compressed Blockly language blocks.
# javascript_compressed.js: The compressed Javascript generator.
# python_compressed.js: The compressed Python generator.
# dart_compressed.js: The compressed Dart generator.
# msg/js/<LANG>.js for every language <LANG> defined in msg/js/<LANG>.json.
import errno, glob, httplib, json, os, re, subprocess, sys, threading, urllib
def import_path(fullpath):
"""Import a file with full path specification.
Allows one to import from any directory, something __import__ does not do.
Args:
fullpath: Path and filename of import.
Returns:
An imported module.
"""
path, filename = os.path.split(fullpath)
filename, ext = os.path.splitext(filename)
sys.path.append(path)
module = __import__(filename)
reload(module) # Might be out of date
del sys.path[-1]
return module
HEADER = ('// Do not edit this file; automatically generated by build.py.\n'
'"use strict";\n')
class Gen_uncompressed(threading.Thread):
"""Generate a JavaScript file that loads Blockly's raw files.
Runs in a separate thread.
"""
def __init__(self, search_paths):
threading.Thread.__init__(self)
self.search_paths = search_paths
def run(self):
target_filename = 'blockly_uncompressed.js'
f = open(target_filename, 'w')
f.write(HEADER)
f.write("""
window.BLOCKLY_DIR = (function() {
// Find name of current directory.
var scripts = document.getElementsByTagName('script');
var re = new RegExp('(.+)[\/]blockly_uncompressed\.js$');
for (var x = 0, script; script = scripts[x]; x++) {
var match = re.exec(script.src);
if (match) {
return match[1];
}
}
alert('Could not detect Blockly\\'s directory name.');
return '';
})();
window.BLOCKLY_BOOT = function() {
// Execute after Closure has loaded.
if (!window.goog) {
alert('Error: Closure not found. Read this:\\n' +
'http://code.google.com/p/blockly/wiki/Closure\\n');
}
// Build map of all dependencies (used and unused).
var dir = window.BLOCKLY_DIR.match(/[^\\/]+$/)[0];
""")
add_dependency = []
base_path = calcdeps.FindClosureBasePath(self.search_paths)
for dep in calcdeps.BuildDependenciesFromFiles(self.search_paths):
add_dependency.append(calcdeps.GetDepsLine(dep, base_path))
add_dependency = '\n'.join(add_dependency)
# Find the Blockly directory name and replace it with a JS variable.
# This allows blockly_uncompressed.js to be compiled on one computer and be
# used on another, even if the directory name differs.
m = re.search('[\\/]([^\\/]+)[\\/]core[\\/]blockly.js', add_dependency)
add_dependency = re.sub('([\\/])' + re.escape(m.group(1)) +
'([\\/]core[\\/])', '\\1" + dir + "\\2', add_dependency)
m = re.search('[\\/]([^\\/]+)[\\/]realtime[\\/]realtime.js', add_dependency)
add_dependency = re.sub('([\\/])' + re.escape(m.group(1)) +
'([\\/]realtime[\\/])', '\\1" + dir + "\\2', add_dependency)
f.write(add_dependency + '\n')
provides = []
for dep in calcdeps.BuildDependenciesFromFiles(self.search_paths):
if not dep.filename.startswith(os.pardir + os.sep): # '../'
provides.extend(dep.provides)
provides.sort()
f.write('\n')
f.write('// Load Blockly.\n')
for provide in provides:
f.write('goog.require(\'%s\');\n' % provide)
f.write("""
delete window.BLOCKLY_DIR;
delete window.BLOCKLY_BOOT;
};
// Delete any existing Closure (e.g. Soy's nogoog_shim).
document.write('<script type="text/javascript">var goog = undefined;</script>');
// Load fresh Closure Library.
document.write('<script type="text/javascript" src="' + window.BLOCKLY_DIR +
'/../closure-library-read-only/closure/goog/base.js"></script>');
document.write('<script type="text/javascript">window.BLOCKLY_BOOT()</script>');
""")
f.close()
print('SUCCESS: ' + target_filename)
class Gen_compressed(threading.Thread):
"""Generate a JavaScript file that contains all of Blockly's core and all
required parts of Closure, compiled together.
Uses the Closure Compiler's online API.
Runs in a separate thread.
"""
def __init__(self, search_paths):
threading.Thread.__init__(self)
self.search_paths = search_paths
def run(self):
self.gen_core()
self.gen_blocks()
self.gen_generator('javascript')
self.gen_generator('python')
self.gen_generator('dart')
def gen_core(self):
target_filename = 'blockly_compressed.js'
# Define the parameters for the POST request.
params = [
('compilation_level', 'SIMPLE_OPTIMIZATIONS'),
('use_closure_library', 'true'),
('output_format', 'json'),
('output_info', 'compiled_code'),
('output_info', 'warnings'),
('output_info', 'errors'),
('output_info', 'statistics'),
]
# Read in all the source files.
filenames = calcdeps.CalculateDependencies(self.search_paths,
[os.path.join('core', 'blockly.js')])
for filename in filenames:
# Filter out the Closure files (the compiler will add them).
if filename.startswith(os.pardir + os.sep): # '../'
continue
f = open(filename)
params.append(('js_code', ''.join(f.readlines())))
f.close()
self.do_compile(params, target_filename, filenames, '')
def gen_blocks(self):
target_filename = 'blocks_compressed.js'
# Define the parameters for the POST request.
params = [
('compilation_level', 'SIMPLE_OPTIMIZATIONS'),
('output_format', 'json'),
('output_info', 'compiled_code'),
('output_info', 'warnings'),
('output_info', 'errors'),
('output_info', 'statistics'),
]
# Read in all the source files.
# Add Blockly.Blocks to be compatible with the compiler.
params.append(('js_code', "goog.provide('Blockly.Blocks');"))
filenames = glob.glob(os.path.join('blocks', '*.js'))
for filename in filenames:
f = open(filename)
params.append(('js_code', ''.join(f.readlines())))
f.close()
# Remove Blockly.Blocks to be compatible with Blockly.
remove = "var Blockly={Blocks:{}};"
self.do_compile(params, target_filename, filenames, remove)
def gen_generator(self, language):
target_filename = language + '_compressed.js'
# Define the parameters for the POST request.
params = [
('compilation_level', 'SIMPLE_OPTIMIZATIONS'),
('output_format', 'json'),
('output_info', 'compiled_code'),
('output_info', 'warnings'),
('output_info', 'errors'),
('output_info', 'statistics'),
]
# Read in all the source files.
# Add Blockly.Generator to be compatible with the compiler.
params.append(('js_code', "goog.provide('Blockly.Generator');"))
filenames = glob.glob(
os.path.join('generators', language, '*.js'))
filenames.insert(0, os.path.join('generators', language + '.js'))
for filename in filenames:
f = open(filename)
params.append(('js_code', ''.join(f.readlines())))
f.close()
filenames.insert(0, '[goog.provide]')
# Remove Blockly.Generator to be compatible with Blockly.
remove = "var Blockly={Generator:{}};"
self.do_compile(params, target_filename, filenames, remove)
def do_compile(self, params, target_filename, filenames, remove):
# Send the request to Google.
headers = { "Content-type": "application/x-www-form-urlencoded" }
conn = httplib.HTTPConnection('closure-compiler.appspot.com')
conn.request('POST', '/compile', urllib.urlencode(params), headers)
response = conn.getresponse()
json_str = response.read()
conn.close()
# Parse the JSON response.
json_data = json.loads(json_str)
def file_lookup(name):
if not name.startswith('Input_'):
return '???'
n = int(name[6:])
return filenames[n]
if json_data.has_key('serverErrors'):
errors = json_data['serverErrors']
for error in errors:
print 'SERVER ERROR: %s' % target_filename
print error['error']
elif json_data.has_key('errors'):
errors = json_data['errors']
for error in errors:
print('FATAL ERROR')
print(error['error'])
print('%s at line %d:' % (
file_lookup(error['file']), error['lineno']))
print(error['line'])
print((' ' * error['charno']) + '^')
sys.exit(1)
else:
if json_data.has_key('warnings'):
warnings = json_data['warnings']
for warning in warnings:
print('WARNING')
print(warning['warning'])
print('%s at line %d:' % (
file_lookup(warning['file']), warning['lineno']))
print(warning['line'])
print((' ' * warning['charno']) + '^')
print()
if not json_data.has_key('compiledCode'):
print('FATAL ERROR: Compiler did not return compiledCode.')
sys.exit(1)
code = HEADER + '\n' + json_data['compiledCode']
code = code.replace(remove, '')
stats = json_data['statistics']
original_b = stats['originalSize']
compressed_b = stats['compressedSize']
if original_b > 0 and compressed_b > 0:
f = open(target_filename, 'w')
f.write(code)
f.close()
original_kb = int(original_b / 1024 + 0.5)
compressed_kb = int(compressed_b / 1024 + 0.5)
ratio = int(float(compressed_b) / float(original_b) * 100 + 0.5)
print('SUCCESS: ' + target_filename)
print('Size changed from %d KB to %d KB (%d%%).' % (
original_kb, compressed_kb, ratio))
else:
print 'UNKNOWN ERROR'
class Gen_langfiles(threading.Thread):
"""Generate JavaScript file for each natural language supported.
Runs in a separate thread.
"""
def __init__(self):
threading.Thread.__init__(self)
def _rebuild(self, srcs, dests):
# Determine whether any of the files in srcs is newer than any in dests.
try:
return (max(os.path.getmtime(src) for src in srcs) >
min(os.path.getmtime(dest) for dest in dests))
except OSError, e:
# Was a file not found?
if e.errno == errno.ENOENT:
# If it was a source file, we can't proceed.
if e.filename in srcs:
print('Source file missing: ' + e.filename)
sys.exit(1)
else:
# If a destination file was missing, rebuild.
return True
else:
print('Error checking file creation times: ' + e)
def run(self):
# The files msg/json/{en,qqq,synonyms}.json depend on msg/messages.js.
if self._rebuild([os.path.join('msg', 'messages.js')],
[os.path.join('msg', 'json', f) for f in
['en.json', 'qqq.json', 'synonyms.json']]):
try:
subprocess.check_call([
os.path.join('i18n', 'js_to_json.py'),
'--input_file', 'msg/messages.js',
'--output_dir', 'msg/json/',
'--quiet'])
except (subprocess.CalledProcessError, OSError), e:
# Documentation for subprocess.check_call says that CalledProcessError
# will be raised on failure, but I found that OSError is also possible.
print('Error running i18n/js_to_json.py: ', e)
sys.exit(1)
# Checking whether it is necessary to rebuild the js files would be a lot of
# work since we would have to compare each <lang>.json file with each
# <lang>.js file. Rebuilding is easy and cheap, so just go ahead and do it.
try:
# Use create_messages.py to create .js files from .json files.
cmd = [
os.path.join('i18n', 'create_messages.py'),
'--source_lang_file', os.path.join('msg', 'json', 'en.json'),
'--source_synonym_file', os.path.join('msg', 'json', 'synonyms.json'),
'--key_file', os.path.join('msg', 'json', 'keys.json'),
'--output_dir', os.path.join('msg', 'js'),
'--quiet']
json_files = glob.glob(os.path.join('msg', 'json', '*.json'))
json_files = [file for file in json_files if not
(file.endswith(('keys.json', 'synonyms.json', 'qqq.json')))]
cmd.extend(json_files)
subprocess.check_call(cmd)
except (subprocess.CalledProcessError, OSError), e:
print('Error running i18n/create_messages.py: ', e)
sys.exit(1)
# Output list of .js files created.
for f in json_files:
# This assumes the path to the current directory does not contain 'json'.
f = f.replace('json', 'js')
if os.path.isfile(f):
print('SUCCESS: ' + f)
else:
print('FAILED to create ' + f)
if __name__ == '__main__':
try:
calcdeps = import_path(os.path.join(os.path.pardir,
'closure-library-read-only', 'closure', 'bin', 'calcdeps.py'))
except ImportError:
print("""Error: Closure not found. Read this:
http://code.google.com/p/blockly/wiki/Closure""")
sys.exit(1)
search_paths = calcdeps.ExpandDirectories(
['core', 'realtime', os.path.join(os.path.pardir, 'closure-library-read-only')])
# Run both tasks in parallel threads.
# Uncompressed is limited by processor speed.
# Compressed is limited by network and server speed.
Gen_uncompressed(search_paths).start()
Gen_compressed(search_paths).start()
# This is run locally in a separate thread.
Gen_langfiles().start()
| sernaleon/charlie | Others/BlocklyProbatinas/hello/www/build.py | Python | apache-2.0 | 14,864 |
# Copyright 2018 The TensorFlow Probability Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Piecewise Rational Quadratic Spline bijector."""
import collections
import functools
import tensorflow.compat.v2 as tf
from tensorflow_probability.python.bijectors import bijector
from tensorflow_probability.python.bijectors import softplus as softplus_bijector
from tensorflow_probability.python.internal import assert_util
from tensorflow_probability.python.internal import dtype_util
from tensorflow_probability.python.internal import parameter_properties
from tensorflow_probability.python.internal import tensor_util
from tensorflow_probability.python.internal import tensorshape_util
def _ensure_at_least_1d(t):
t = tf.convert_to_tensor(t)
return t + tf.zeros([1], dtype=t.dtype)
def _padded(t, lhs, rhs=None):
"""Left pads and optionally right pads the innermost axis of `t`."""
lhs = tf.convert_to_tensor(lhs, dtype=t.dtype)
zeros = tf.zeros([tf.rank(t) - 1, 2], dtype=tf.int32)
lhs_paddings = tf.concat([zeros, [[1, 0]]], axis=0)
result = tf.pad(t, paddings=lhs_paddings, constant_values=lhs)
if rhs is not None:
rhs = tf.convert_to_tensor(rhs, dtype=t.dtype)
rhs_paddings = tf.concat([zeros, [[0, 1]]], axis=0)
result = tf.pad(result, paddings=rhs_paddings, constant_values=rhs)
return result
def _knot_positions(bin_sizes, range_min):
return _padded(tf.cumsum(bin_sizes, axis=-1) + range_min, lhs=range_min)
_SplineShared = collections.namedtuple(
'SplineShared', 'out_of_bounds,x_k,y_k,d_k,d_kp1,h_k,w_k,s_k')
class RationalQuadraticSpline(bijector.AutoCompositeTensorBijector):
"""A piecewise rational quadratic spline, as developed in [1].
This transformation represents a monotonically increasing piecewise rational
quadratic function. Outside of the bounds of `knot_x`/`knot_y`, the transform
behaves as an identity function.
Typically this bijector will be used as part of a chain, with splines for
trailing `x` dimensions conditioned on some of the earlier `x` dimensions, and
with the inverse then solved first for unconditioned dimensions, then using
conditioning derived from those inverses, and so forth. For example, if we
split a 15-D `xs` vector into 3 components, we may implement a forward and
inverse as follows:
```python
nsplits = 3
class SplineParams(tf.Module):
def __init__(self, nbins=32, interval_width=2, range_min=-1,
min_bin_width=1e-3, min_slope=1e-3):
self._nbins = nbins
self._interval_width = interval_width # Sum of bin widths.
self._range_min = range_min # Position of first knot.
self._min_bin_width = min_bin_width # Bin width lower bound.
self._min_slope = min_slope # Lower bound for slopes at internal knots.
self._built = False
self._bin_widths = None
self._bin_heights = None
self._knot_slopes = None
def __call__(self, x, nunits):
if not self._built:
def _bin_positions(x):
out_shape = tf.concat((tf.shape(x)[:-1], (nunits, self._nbins)), 0)
x = tf.reshape(x, out_shape)
return tf.math.softmax(x, axis=-1) * (
self._interval_width - self._nbins * self._min_bin_width
) + self._min_bin_width
def _slopes(x):
out_shape = tf.concat((
tf.shape(x)[:-1], (nunits, self._nbins - 1)), 0)
x = tf.reshape(x, out_shape)
return tf.math.softplus(x) + self._min_slope
self._bin_widths = tf.keras.layers.Dense(
nunits * self._nbins, activation=_bin_positions, name='w')
self._bin_heights = tf.keras.layers.Dense(
nunits * self._nbins, activation=_bin_positions, name='h')
self._knot_slopes = tf.keras.layers.Dense(
nunits * (self._nbins - 1), activation=_slopes, name='s')
self._built = True
return tfb.RationalQuadraticSpline(
bin_widths=self._bin_widths(x),
bin_heights=self._bin_heights(x),
knot_slopes=self._knot_slopes(x),
range_min=self._range_min)
xs = np.random.randn(3, 15).astype(np.float32) # Keras won't Dense(.)(vec).
splines = [SplineParams() for _ in range(nsplits)]
def spline_flow():
stack = tfb.Identity()
for i in range(nsplits):
stack = tfb.RealNVP(5 * i, bijector_fn=splines[i])(stack)
return stack
ys = spline_flow().forward(xs)
ys_inv = spline_flow().inverse(ys) # ys_inv ~= xs
```
For a one-at-a-time autoregressive flow as in [1], it would be profitable to
implement a mask over `xs` to parallelize either the inverse or the forward
pass and implement the other using a `tf.while_loop`. See
`tfp.bijectors.MaskedAutoregressiveFlow` for support doing so (paired with
`tfp.bijectors.Invert` depending which direction should be parallel).
#### References
[1]: Conor Durkan, Artur Bekasov, Iain Murray, George Papamakarios. Neural
Spline Flows. _arXiv preprint arXiv:1906.04032_, 2019.
https://arxiv.org/abs/1906.04032
"""
def __init__(self,
bin_widths,
bin_heights,
knot_slopes,
range_min=-1,
validate_args=False,
name=None):
"""Construct a new RationalQuadraticSpline bijector.
For each argument, the innermost axis indexes bins/knots and batch axes
index axes of `x`/`y` spaces. A `RationalQuadraticSpline` with a separate
transform for each of three dimensions might have `bin_widths` shaped
`[3, 32]`. To use the same spline for each of `x`'s three dimensions we may
broadcast against `x` and use a `bin_widths` parameter shaped `[32]`.
Parameters will be broadcast against each other and against the input
`x`/`y`s, so if we want fixed slopes, we can use kwarg `knot_slopes=1`.
A typical recipe for acquiring compatible bin widths and heights would be:
```python
nbins = unconstrained_vector.shape[-1]
range_min, range_max, min_bin_size = -1, 1, 1e-2
scale = range_max - range_min - nbins * min_bin_size
bin_widths = tf.math.softmax(unconstrained_vector) * scale + min_bin_size
```
Args:
bin_widths: The widths of the spans between subsequent knot `x` positions,
a floating point `Tensor`. Must be positive, and at least 1-D. Innermost
axis must sum to the same value as `bin_heights`. The knot `x` positions
will be a first at `range_min`, followed by knots at `range_min +
cumsum(bin_widths, axis=-1)`.
bin_heights: The heights of the spans between subsequent knot `y`
positions, a floating point `Tensor`. Must be positive, and at least
1-D. Innermost axis must sum to the same value as `bin_widths`. The knot
`y` positions will be a first at `range_min`, followed by knots at
`range_min + cumsum(bin_heights, axis=-1)`.
knot_slopes: The slope of the spline at each knot, a floating point
`Tensor`. Must be positive. `1`s are implicitly padded for the first and
last implicit knots corresponding to `range_min` and `range_min +
sum(bin_widths, axis=-1)`. Innermost axis size should be 1 less than
that of `bin_widths`/`bin_heights`, or 1 for broadcasting.
range_min: The `x`/`y` position of the first knot, which has implicit
slope `1`. `range_max` is implicit, and can be computed as `range_min +
sum(bin_widths, axis=-1)`. Scalar floating point `Tensor`.
validate_args: Toggles argument validation (can hurt performance).
name: Optional name scope for associated ops. (Defaults to
`'RationalQuadraticSpline'`).
"""
parameters = dict(locals())
with tf.name_scope(name or 'RationalQuadraticSpline') as name:
dtype = dtype_util.common_dtype(
[bin_widths, bin_heights, knot_slopes, range_min],
dtype_hint=tf.float32)
self._bin_widths = tensor_util.convert_nonref_to_tensor(
bin_widths, dtype=dtype, name='bin_widths')
self._bin_heights = tensor_util.convert_nonref_to_tensor(
bin_heights, dtype=dtype, name='bin_heights')
self._knot_slopes = tensor_util.convert_nonref_to_tensor(
knot_slopes, dtype=dtype, name='knot_slopes')
self._range_min = tensor_util.convert_nonref_to_tensor(
range_min, dtype=dtype, name='range_min')
super(RationalQuadraticSpline, self).__init__(
dtype=dtype,
forward_min_event_ndims=0,
validate_args=validate_args,
parameters=parameters,
name=name)
@classmethod
def _parameter_properties(cls, dtype):
return dict(
bin_widths=parameter_properties.ParameterProperties(
event_ndims=1,
shape_fn=parameter_properties.SHAPE_FN_NOT_IMPLEMENTED,
default_constraining_bijector_fn=parameter_properties
.BIJECTOR_NOT_IMPLEMENTED),
bin_heights=parameter_properties.ParameterProperties(
event_ndims=1,
shape_fn=parameter_properties.SHAPE_FN_NOT_IMPLEMENTED,
default_constraining_bijector_fn=parameter_properties
.BIJECTOR_NOT_IMPLEMENTED),
knot_slopes=parameter_properties.ParameterProperties(
event_ndims=1,
shape_fn=parameter_properties.SHAPE_FN_NOT_IMPLEMENTED,
default_constraining_bijector_fn=(
lambda: softplus_bijector.Softplus(low=dtype_util.eps(dtype)))),
range_min=parameter_properties.ParameterProperties(
shape_fn=parameter_properties.SHAPE_FN_NOT_IMPLEMENTED,))
@property
def bin_widths(self):
return self._bin_widths
@property
def bin_heights(self):
return self._bin_heights
@property
def knot_slopes(self):
return self._knot_slopes
@property
def range_min(self):
return self._range_min
@classmethod
def _is_increasing(cls):
return True
def _compute_shared(self, x=None, y=None):
"""Captures shared computations across forward/inverse/logdet.
Only one of `x` or `y` should be specified.
Args:
x: The `x` values we will search for.
y: The `y` values we will search for.
Returns:
data: A namedtuple with named fields containing shared computations.
"""
assert (x is None) != (y is None)
is_x = x is not None
range_min = tf.convert_to_tensor(self.range_min, name='range_min')
kx = _knot_positions(self.bin_widths, range_min)
ky = _knot_positions(self.bin_heights, range_min)
kd = _padded(_ensure_at_least_1d(self.knot_slopes), lhs=1, rhs=1)
kx_or_ky = kx if is_x else ky
kx_or_ky_min = kx_or_ky[..., 0]
kx_or_ky_max = kx_or_ky[..., -1]
x_or_y = x if is_x else y
out_of_bounds = (x_or_y <= kx_or_ky_min) | (x_or_y >= kx_or_ky_max)
x_or_y = tf.where(out_of_bounds, kx_or_ky_min, x_or_y)
shape = functools.reduce(
tf.broadcast_dynamic_shape,
(
tf.shape(x_or_y[..., tf.newaxis]), # Add a n_knots dim.
tf.shape(kx),
tf.shape(ky),
tf.shape(kd)))
bc_x_or_y = tf.broadcast_to(x_or_y, shape[:-1])
bc_kx = tf.broadcast_to(kx, shape)
bc_ky = tf.broadcast_to(ky, shape)
bc_kd = tf.broadcast_to(kd, shape)
bc_kx_or_ky = bc_kx if is_x else bc_ky
indices = tf.maximum(
tf.zeros([], dtype=tf.int64),
tf.searchsorted(
bc_kx_or_ky[..., :-1],
bc_x_or_y[..., tf.newaxis],
side='right',
out_type=tf.int64) - 1)
def gather_squeeze(params, indices):
rank = tensorshape_util.rank(indices.shape)
if rank is None:
raise ValueError('`indices` must have statically known rank.')
return tf.gather(params, indices, axis=-1, batch_dims=rank - 1)[..., 0]
x_k = gather_squeeze(bc_kx, indices)
x_kp1 = gather_squeeze(bc_kx, indices + 1)
y_k = gather_squeeze(bc_ky, indices)
y_kp1 = gather_squeeze(bc_ky, indices + 1)
d_k = gather_squeeze(bc_kd, indices)
d_kp1 = gather_squeeze(bc_kd, indices + 1)
h_k = y_kp1 - y_k
w_k = x_kp1 - x_k
s_k = h_k / w_k
return _SplineShared(
out_of_bounds=out_of_bounds,
x_k=x_k,
y_k=y_k,
d_k=d_k,
d_kp1=d_kp1,
h_k=h_k,
w_k=w_k,
s_k=s_k)
def _forward(self, x):
"""Compute the forward transformation (Appendix A.1)."""
d = self._compute_shared(x=x)
relx = (x - d.x_k) / d.w_k
spline_val = (
d.y_k + ((d.h_k * (d.s_k * relx**2 + d.d_k * relx * (1 - relx))) /
(d.s_k + (d.d_kp1 + d.d_k - 2 * d.s_k) * relx * (1 - relx))))
y_val = tf.where(d.out_of_bounds, x, spline_val)
return y_val
def _inverse(self, y):
"""Compute the inverse transformation (Appendix A.3)."""
d = self._compute_shared(y=y)
rely = tf.where(d.out_of_bounds, tf.zeros([], dtype=y.dtype), y - d.y_k)
term2 = rely * (d.d_kp1 + d.d_k - 2 * d.s_k)
# These terms are the a, b, c terms of the quadratic formula.
a = d.h_k * (d.s_k - d.d_k) + term2
b = d.h_k * d.d_k - term2
c = -d.s_k * rely
# The expression used here has better numerical behavior for small 4*a*c.
relx = tf.where(
tf.equal(rely, 0), tf.zeros([], dtype=a.dtype),
(2 * c) / (-b - tf.sqrt(b**2 - 4 * a * c)))
return tf.where(d.out_of_bounds, y, relx * d.w_k + d.x_k)
def _forward_log_det_jacobian(self, x):
"""Compute the forward derivative (Appendix A.2)."""
d = self._compute_shared(x=x)
relx = (x - d.x_k) / d.w_k
relx = tf.where(d.out_of_bounds, tf.constant(.5, x.dtype), relx)
grad = (
2 * tf.math.log(d.s_k) +
tf.math.log(d.d_kp1 * relx**2 + 2 * d.s_k * relx * (1 - relx) + # newln
d.d_k * (1 - relx)**2) -
2 * tf.math.log((d.d_kp1 + d.d_k - 2 * d.s_k) * relx *
(1 - relx) + d.s_k))
return tf.where(d.out_of_bounds, tf.zeros([], dtype=x.dtype), grad)
def _parameter_control_dependencies(self, is_init):
"""Validate parameters."""
bw, bh, kd = None, None, None
try:
shape = tf.broadcast_static_shape(self.bin_widths.shape,
self.bin_heights.shape)
except ValueError as e:
raise ValueError('`bin_widths`, `bin_heights` must broadcast: {}'.format(
str(e)))
bin_sizes_shape = shape
try:
shape = tf.broadcast_static_shape(shape[:-1], self.knot_slopes.shape[:-1])
except ValueError as e:
raise ValueError(
'`bin_widths`, `bin_heights`, and `knot_slopes` must broadcast on '
'batch axes: {}'.format(str(e)))
assertions = []
if (tensorshape_util.is_fully_defined(bin_sizes_shape[-1:]) and
tensorshape_util.is_fully_defined(self.knot_slopes.shape[-1:])):
if tensorshape_util.rank(self.knot_slopes.shape) > 0:
num_interior_knots = tensorshape_util.dims(bin_sizes_shape)[-1] - 1
if tensorshape_util.dims(
self.knot_slopes.shape)[-1] not in (1, num_interior_knots):
raise ValueError(
'Innermost axis of non-scalar `knot_slopes` must broadcast with '
'{}; got {}.'.format(num_interior_knots, self.knot_slopes.shape))
elif self.validate_args:
if is_init != any(
tensor_util.is_ref(t)
for t in (self.bin_widths, self.bin_heights, self.knot_slopes)):
bw = tf.convert_to_tensor(self.bin_widths) if bw is None else bw
bh = tf.convert_to_tensor(self.bin_heights) if bh is None else bh
kd = _ensure_at_least_1d(self.knot_slopes) if kd is None else kd
shape = tf.broadcast_dynamic_shape(
tf.shape((bw + bh)[..., :-1]), tf.shape(kd))
assertions.append(
assert_util.assert_greater(
tf.shape(shape)[0],
tf.zeros([], dtype=shape.dtype),
message='`(bin_widths + bin_heights)[..., :-1]` must broadcast '
'with `knot_slopes` to at least 1-D.'))
if not self.validate_args:
assert not assertions
return assertions
if (is_init != tensor_util.is_ref(self.bin_widths) or
is_init != tensor_util.is_ref(self.bin_heights)):
bw = tf.convert_to_tensor(self.bin_widths) if bw is None else bw
bh = tf.convert_to_tensor(self.bin_heights) if bh is None else bh
assertions += [
assert_util.assert_near(
tf.reduce_sum(bw, axis=-1),
tf.reduce_sum(bh, axis=-1),
message='`sum(bin_widths, axis=-1)` must equal '
'`sum(bin_heights, axis=-1)`.'),
]
if is_init != tensor_util.is_ref(self.bin_widths):
bw = tf.convert_to_tensor(self.bin_widths) if bw is None else bw
assertions += [
assert_util.assert_positive(
bw, message='`bin_widths` must be positive.'),
]
if is_init != tensor_util.is_ref(self.bin_heights):
bh = tf.convert_to_tensor(self.bin_heights) if bh is None else bh
assertions += [
assert_util.assert_positive(
bh, message='`bin_heights` must be positive.'),
]
if is_init != tensor_util.is_ref(self.knot_slopes):
kd = _ensure_at_least_1d(self.knot_slopes) if kd is None else kd
assertions += [
assert_util.assert_positive(
kd, message='`knot_slopes` must be positive.'),
]
return assertions
| tensorflow/probability | tensorflow_probability/python/bijectors/rational_quadratic_spline.py | Python | apache-2.0 | 17,963 |
#!/usr/bin/env python2.7
# -*- coding: utf-8 -*-
import sys
import csv
import json
import random
from elasticsearch import Elasticsearch, RequestsHttpConnection
from datetime import datetime
import sys
import csv
from requests_aws4auth import AWS4Auth
from awsconfig import ESHOST, REGION
from nocheckin import aws_access_key_id,aws_secret_access_key
host = ESHOST
region = REGION
min_score=1.25
#es = Elasticsearch( hosts=[{'host': host, 'port': 443}])
#es = Elasticsearch(host=host, port=80)
awsauth = AWS4Auth(aws_access_key_id, aws_secret_access_key, region, 'es')
es = Elasticsearch(
hosts=[{'host': host, 'port': 443}],
http_auth=awsauth,
use_ssl=True,
verify_certs=True,
connection_class=RequestsHttpConnection
)
def preProcess(msg):
cList = ['不知道該怎麼辦','怎麼辦','?','?',"。",",",":",";"]
for c in cList:
if msg.count(c) > 0:
msg = msg.replace(c," ")
return msg
def esBibleHandler(msg, words):
result =""
msg = preProcess(msg)
q = {
"min_score": min_score,
"query" :{
"match" : {
"text": msg
}
}
}
res = es.search(index="bible", body=q)
print("Got %d Hits:" % res['hits']['total'])
# find 3 hits max
maxhit = 3
cnt = 1
prefix = u'或許你想知道的是基督信仰方面...\n'
for h in res['hits']['hits']:
result = result + (h['_source']['text'])+ " \n"
#result = extraFilter(result)
cnt +=1
if cnt > maxhit:
break
if result != '':
result = prefix+result
return result
if __name__ == '__main__':
if len(sys.argv) <= 1:
print("usage: python "+sys.argv[0]+" <keyword> ")
print("")
exit(0)
print("==== result ===")
print(esBibleHandler(sys.argv[1],[]))
| taosheng/jarvis | chatbot/src/esBible.py | Python | apache-2.0 | 1,837 |
print input() + -input()
| cantora/pyc | p0tests/grader_tests/input_1.py | Python | gpl-3.0 | 25 |
'''
A Recurrent Neural Network (LSTM) implementation example using TensorFlow library.
This neural network is used to compress the state matrix into a representation
that is useful for the actor and the critic.
The weights of this network should be trained along with the actor and the critic.
Author: Max Ferguson
'''
from __future__ import print_function
import tensorflow as tf
from tensorflow.contrib import rnn, seq2seq
from tensorflow.contrib.seq2seq import Helper, TrainingHelper, BasicDecoder
from tensorflow.contrib.layers import xavier_initializer
from tfmodels.seq2seq import TrainableSequence2Sequence
class OracleNetwork(TrainableSequence2Sequence):
def create_reward(self, state, reuse):
"""
Create a deep NN that estimates collisions from state
"""
state_size = state.get_shape().as_list()[1]
n_positions = int(state_size/2)
n_particles = int(state_size/4)
n_output = 1
xavier = xavier_initializer()
with tf.variable_scope("reward", reuse=reuse):
w1 = tf.get_variable("w1", (state_size, n_positions), initializer=xavier)
b1 = tf.get_variable("b1", (n_positions,), dtype=tf.float32)
w2 = tf.get_variable("w2", (n_positions, n_particles), initializer=xavier)
b2 = tf.get_variable("b2", (n_particles,), dtype=tf.float32)
w3 = tf.get_variable("w3", (n_particles, n_particles), initializer=xavier)
b3 = tf.get_variable("b3", (n_particles,), dtype=tf.float32)
w4 = tf.get_variable("w4", (n_particles, n_output), initializer=xavier)
b4 = tf.get_variable("b4", (n_output,), dtype=tf.float32)
layer1 = tf.square(tf.matmul(state,w1) + b1) # Calculate element-wise x,y distances
layer2 = tf.nn.relu(tf.matmul(layer1,w2) + b2) # Calculate remaining space between particles
layer3 = tf.nn.sigmoid(tf.matmul(layer2,w3) + b3) # Calculate the number of collisions
return tf.matmul(layer3,w4) + b4 # Return the total reward based on number of collisions
| maxkferg/smart-city-model | modules/collision/dqn/oracle.py | Python | gpl-3.0 | 2,075 |
#!/usr/bin/env python
#
# Copyright 2007 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""Serves the stub App Engine APIs (e.g. memcache, datastore) over HTTP.
The Remote API protocol is used for communication.
"""
from __future__ import with_statement
import BaseHTTPServer
import httplib
import logging
import os.path
import pickle
import socket
import SocketServer
import subprocess
import sys
import tempfile
import threading
import time
import traceback
import urllib2
import urlparse
import wsgiref.headers
import google
import yaml
from google.appengine.api import mail_stub
from google.appengine.api import request_info
from google.appengine.api import urlfetch_stub
from google.appengine.api import user_service_stub
from google.appengine.api.app_identity import app_identity_stub
from google.appengine.api.blobstore import blobstore_stub
from google.appengine.api.blobstore import file_blob_storage
from google.appengine.api.capabilities import capability_stub
from google.appengine.api.channel import channel_service_stub
from google.appengine.api.files import file_service_stub
from google.appengine.api.logservice import logservice_stub
from google.appengine.api.search import simple_search_stub
from google.appengine.api.taskqueue import taskqueue_stub
from google.appengine.api.prospective_search import prospective_search_stub
from google.appengine.api.memcache import memcache_stub
from google.appengine.api.system import system_stub
from google.appengine.api.xmpp import xmpp_service_stub
from google.appengine.api import datastore_file_stub
from google.appengine.datastore import datastore_sqlite_stub
from google.appengine.datastore import datastore_stub_util
from google.appengine.datastore import datastore_v4_stub
from google.appengine.api import apiproxy_stub_map
from google.appengine.ext.remote_api import remote_api_pb
from google.appengine.ext.remote_api import remote_api_services
from google.appengine.runtime import apiproxy_errors
QUIT_PATH = '/quit'
GLOBAL_API_LOCK = threading.RLock()
class Error(Exception):
pass
def _ClearDatastoreStorage(datastore_path):
"""Delete the datastore storage file at the given path."""
if os.path.lexists(datastore_path):
try:
os.remove(datastore_path)
except OSError, e:
logging.warning('Failed to remove datastore file %r: %s',
datastore_path,
e)
def _ClearProspectiveSearchStorage(prospective_search_path):
"""Delete the perspective search storage file at the given path."""
if os.path.lexists(prospective_search_path):
try:
os.remove(prospective_search_path)
except OSError, e:
logging.warning('Failed to remove prospective search file %r: %s',
prospective_search_path,
e)
THREAD_SAFE_SERVICES = frozenset((
'app_identity_service',
'capability_service',
'channel',
'logservice',
'mail',
'memcache',
'remote_socket',
'urlfetch',
'user',
'xmpp',
))
def _ExecuteRequest(request):
"""Executes an API method call and returns the response object.
Args:
request: A remote_api.Request object representing the API call e.g. a call
to memcache.Get.
Returns:
A ProtocolBuffer.ProtocolMessage representing the API response e.g. a
memcache_service_pb.MemcacheGetResponse.
Raises:
apiproxy_errors.CallNotFoundError: if the requested method doesn't exist.
apiproxy_errors.ApplicationError: if the API method calls fails.
"""
service = request.service_name()
method = request.method()
service_methods = remote_api_services.SERVICE_PB_MAP.get(service, {})
request_class, response_class = service_methods.get(method, (None, None))
if not request_class:
raise apiproxy_errors.CallNotFoundError('%s.%s does not exist' % (service,
method))
request_data = request_class()
request_data.ParseFromString(request.request())
response_data = response_class()
def MakeRequest():
apiproxy_stub_map.MakeSyncCall(service, method, request_data,
response_data)
if service in THREAD_SAFE_SERVICES:
MakeRequest()
else:
with GLOBAL_API_LOCK:
MakeRequest()
return response_data
class APIRequestHandler(BaseHTTPServer.BaseHTTPRequestHandler):
"""Handler for all API server HTTP requests."""
def log_message(self, format, *args):
logging.debug(format, *args)
def do_GET(self):
if self.path == QUIT_PATH:
self._HandleShutdown()
else:
params = urlparse.parse_qs(urlparse.urlparse(self.path).query)
rtok = params.get('rtok', ['0'])[0]
self.send_response(httplib.OK)
self.send_header('Content-Type', 'text/plain')
self.end_headers()
self.wfile.write(yaml.dump({
'app_id': self.server.app_id,
'rtok': rtok,
}))
def _HandleShutdown(self):
"""Handles a request for the API Server to exit."""
self.send_response(httplib.OK)
self.send_header('Content-Type', 'text/plain')
self.end_headers()
self.wfile.write('API Server Quitting')
self.server.shutdown()
def do_POST(self):
"""Handles a single API request e.g. memcache.Get()."""
self.send_response(httplib.OK)
self.send_header('Content-Type', 'application/octet-stream')
self.end_headers()
response = remote_api_pb.Response()
try:
request = remote_api_pb.Request()
request.ParseFromString(
self.rfile.read(int(self.headers['content-length'])))
api_response = _ExecuteRequest(request).Encode()
response.set_response(api_response)
except Exception, e:
logging.debug('Exception while handling %s\n%s',
request,
traceback.format_exc())
response.set_exception(pickle.dumps(e))
if isinstance(e, apiproxy_errors.ApplicationError):
application_error = response.mutable_application_error()
application_error.set_code(e.application_error)
application_error.set_detail(e.error_detail)
self.wfile.write(response.Encode())
class APIServer(SocketServer.ThreadingMixIn, BaseHTTPServer.HTTPServer):
"""Serves API calls over HTTP."""
def __init__(self, server_address, app_id):
BaseHTTPServer.HTTPServer.__init__(self, server_address, APIRequestHandler)
self.app_id = app_id
def _SetupStubs(
app_id,
application_root,
appidentity_email_address,
appidentity_private_key_path,
trusted,
blobstore_path,
use_sqlite,
auto_id_policy,
high_replication,
datastore_path,
datastore_require_indexes,
images_host_prefix,
logs_path,
mail_smtp_host,
mail_smtp_port,
mail_smtp_user,
mail_smtp_password,
mail_enable_sendmail,
mail_show_mail_body,
mail_allow_tls,
matcher_prospective_search_path,
taskqueue_auto_run_tasks,
taskqueue_task_retry_seconds,
taskqueue_default_http_server,
user_login_url,
user_logout_url,
default_gcs_bucket_name):
"""Configures the APIs hosted by this server.
Args:
app_id: The str application id e.g. "guestbook".
application_root: The path to the directory containing the user's
application e.g. "/home/bquinlan/myapp".
trusted: A bool indicating if privileged APIs should be made available.
blobstore_path: The path to the file that should be used for blobstore
storage.
use_sqlite: A bool indicating whether DatastoreSqliteStub or
DatastoreFileStub should be used.
auto_id_policy: One of datastore_stub_util.SEQUENTIAL or .SCATTERED,
indicating whether the Datastore stub should assign IDs sequentially
or scattered.
high_replication: A bool indicating whether to use the high replication
consistency model.
datastore_path: The path to the file that should be used for datastore
storage.
datastore_require_indexes: A bool indicating if the same production
datastore indexes requirements should be enforced i.e. if True then
a google.appengine.ext.db.NeedIndexError will be be raised if a query
is executed without the required indexes.
images_host_prefix: The URL prefix (protocol://host:port) to preprend to
image urls on calls to images.GetUrlBase.
logs_path: Path to the file to store the logs data in.
mail_smtp_host: The SMTP hostname that should be used when sending e-mails.
If None then the mail_enable_sendmail argument is considered.
mail_smtp_port: The SMTP port number that should be used when sending
e-mails. If this value is None then mail_smtp_host must also be None.
mail_smtp_user: The username to use when authenticating with the
SMTP server. This value may be None if mail_smtp_host is also None or if
the SMTP server does not require authentication.
mail_smtp_password: The password to use when authenticating with the
SMTP server. This value may be None if mail_smtp_host or mail_smtp_user
is also None.
mail_enable_sendmail: A bool indicating if sendmail should be used when
sending e-mails. This argument is ignored if mail_smtp_host is not None.
mail_show_mail_body: A bool indicating whether the body of sent e-mails
should be written to the logs.
mail_allow_tls: A bool indicating whether to allow TLS support.
matcher_prospective_search_path: The path to the file that should be used to
save prospective search subscriptions.
taskqueue_auto_run_tasks: A bool indicating whether taskqueue tasks should
be run automatically or it the must be manually triggered.
taskqueue_task_retry_seconds: An int representing the number of seconds to
wait before a retrying a failed taskqueue task.
taskqueue_default_http_server: A str containing the address of the http
server that should be used to execute tasks.
user_login_url: A str containing the url that should be used for user login.
user_logout_url: A str containing the url that should be used for user
logout.
default_gcs_bucket_name: A str overriding the usual default bucket name.
"""
os.environ['APPLICATION_ID'] = app_id
tmp_app_identity_stub = app_identity_stub.AppIdentityServiceStub.Create(
email_address=appidentity_email_address,
private_key_path=appidentity_private_key_path)
if default_gcs_bucket_name is not None:
tmp_app_identity_stub.SetDefaultGcsBucketName(default_gcs_bucket_name)
apiproxy_stub_map.apiproxy.RegisterStub(
'app_identity_service', tmp_app_identity_stub)
blob_storage = file_blob_storage.FileBlobStorage(blobstore_path, app_id)
apiproxy_stub_map.apiproxy.RegisterStub(
'blobstore',
blobstore_stub.BlobstoreServiceStub(blob_storage))
apiproxy_stub_map.apiproxy.RegisterStub(
'capability_service',
capability_stub.CapabilityServiceStub())
apiproxy_stub_map.apiproxy.RegisterStub(
'channel',
channel_service_stub.ChannelServiceStub())
if use_sqlite:
datastore = datastore_sqlite_stub.DatastoreSqliteStub(
app_id,
datastore_path,
datastore_require_indexes,
trusted,
root_path=application_root,
auto_id_policy=auto_id_policy)
else:
datastore = datastore_file_stub.DatastoreFileStub(
app_id,
datastore_path,
datastore_require_indexes,
trusted,
root_path=application_root,
auto_id_policy=auto_id_policy)
if high_replication:
datastore.SetConsistencyPolicy(
datastore_stub_util.TimeBasedHRConsistencyPolicy())
apiproxy_stub_map.apiproxy.RegisterStub(
'datastore_v3', datastore)
apiproxy_stub_map.apiproxy.RegisterStub(
'datastore_v4',
datastore_v4_stub.DatastoreV4Stub(app_id))
apiproxy_stub_map.apiproxy.RegisterStub(
'file',
file_service_stub.FileServiceStub(blob_storage))
try:
from google.appengine.api.images import images_stub
except ImportError:
from google.appengine.api.images import images_not_implemented_stub
apiproxy_stub_map.apiproxy.RegisterStub(
'images',
images_not_implemented_stub.ImagesNotImplementedServiceStub())
else:
apiproxy_stub_map.apiproxy.RegisterStub(
'images',
images_stub.ImagesServiceStub(host_prefix=images_host_prefix))
apiproxy_stub_map.apiproxy.RegisterStub(
'logservice',
logservice_stub.LogServiceStub(logs_path=logs_path))
apiproxy_stub_map.apiproxy.RegisterStub(
'mail',
mail_stub.MailServiceStub(mail_smtp_host,
mail_smtp_port,
mail_smtp_user,
mail_smtp_password,
enable_sendmail=mail_enable_sendmail,
show_mail_body=mail_show_mail_body,
allow_tls=mail_allow_tls))
apiproxy_stub_map.apiproxy.RegisterStub(
'memcache',
memcache_stub.MemcacheServiceStub())
apiproxy_stub_map.apiproxy.RegisterStub(
'search',
simple_search_stub.SearchServiceStub())
apiproxy_stub_map.apiproxy.RegisterStub('system',
system_stub.SystemServiceStub())
apiproxy_stub_map.apiproxy.RegisterStub(
'taskqueue',
taskqueue_stub.TaskQueueServiceStub(
root_path=application_root,
auto_task_running=taskqueue_auto_run_tasks,
task_retry_seconds=taskqueue_task_retry_seconds,
default_http_server=taskqueue_default_http_server))
apiproxy_stub_map.apiproxy.GetStub('taskqueue').StartBackgroundExecution()
apiproxy_stub_map.apiproxy.RegisterStub(
'urlfetch',
urlfetch_stub.URLFetchServiceStub())
apiproxy_stub_map.apiproxy.RegisterStub(
'user',
user_service_stub.UserServiceStub(login_url=user_login_url,
logout_url=user_logout_url))
apiproxy_stub_map.apiproxy.RegisterStub(
'xmpp',
xmpp_service_stub.XmppServiceStub())
apiproxy_stub_map.apiproxy.RegisterStub(
'matcher',
prospective_search_stub.ProspectiveSearchStub(
matcher_prospective_search_path,
apiproxy_stub_map.apiproxy.GetStub('taskqueue')))
def _TearDownStubs():
"""Clean up any stubs that need cleanup."""
logging.info('Applying all pending transactions and saving the datastore')
datastore_stub = apiproxy_stub_map.apiproxy.GetStub('datastore_v3')
datastore_stub.Write()
def ParseCommandArguments(args):
"""Parses and the application's command line arguments.
Args:
args: A list of command line arguments *not* including the executable or
script e.g. ['-A' 'myapp', '--api_port=8000'].
Returns:
An object containing the values passed in the commandline as attributes.
Raises:
SystemExit: if the argument parsing fails.
"""
import argparse
from google.appengine.tools import boolean_action
parser = argparse.ArgumentParser()
parser.add_argument('-A', '--application', required=True)
parser.add_argument('--api_host', default='')
parser.add_argument('--api_port', default=8000, type=int)
parser.add_argument('--trusted',
action=boolean_action.BooleanAction,
const=True,
default=False)
parser.add_argument('--appidentity_email_address', default=None)
parser.add_argument('--appidentity_private_key_path', default=None)
parser.add_argument('--application_root', default=None)
parser.add_argument('--application_host', default='localhost')
parser.add_argument('--application_port', default=None)
parser.add_argument('--blobstore_path', default=None)
parser.add_argument('--datastore_path', default=None)
parser.add_argument('--auto_id_policy', default='scattered',
type=lambda s: s.lower(),
choices=(datastore_stub_util.SEQUENTIAL,
datastore_stub_util.SCATTERED))
parser.add_argument('--use_sqlite',
action=boolean_action.BooleanAction,
const=True,
default=False)
parser.add_argument('--high_replication',
action=boolean_action.BooleanAction,
const=True,
default=False)
parser.add_argument('--require_indexes',
action=boolean_action.BooleanAction,
const=True,
default=False)
parser.add_argument('--clear_datastore',
action=boolean_action.BooleanAction,
const=True,
default=False)
parser.add_argument('--logs_path', default=None)
parser.add_argument('--enable_sendmail',
action=boolean_action.BooleanAction,
const=True,
default=False)
parser.add_argument('--smtp_host', default='')
parser.add_argument('--smtp_port', default=25, type=int)
parser.add_argument('--smtp_user', default='')
parser.add_argument('--smtp_password', default='')
parser.add_argument('--show_mail_body',
action=boolean_action.BooleanAction,
const=True,
default=False)
parser.add_argument('--smtp_allow_tls',
action=boolean_action.BooleanAction,
const=True,
default=True)
parser.add_argument('--prospective_search_path', default=None)
parser.add_argument('--clear_prospective_search',
action=boolean_action.BooleanAction,
const=True,
default=False)
parser.add_argument('--enable_task_running',
action=boolean_action.BooleanAction,
const=True,
default=True)
parser.add_argument('--task_retry_seconds', default=30, type=int)
parser.add_argument('--user_login_url', default=None)
parser.add_argument('--user_logout_url', default=None)
return parser.parse_args(args)
class APIServerProcess(object):
"""Manages an API Server running as a seperate process."""
def __init__(self,
executable,
host,
port,
app_id,
script=None,
appidentity_email_address=None,
appidentity_private_key_path=None,
application_host=None,
application_port=None,
application_root=None,
auto_id_policy=None,
blobstore_path=None,
clear_datastore=None,
clear_prospective_search=None,
datastore_path=None,
enable_sendmail=None,
enable_task_running=None,
high_replication=None,
logs_path=None,
prospective_search_path=None,
require_indexes=None,
show_mail_body=None,
smtp_host=None,
smtp_password=None,
smtp_port=None,
smtp_user=None,
smtp_allow_tls=None,
task_retry_seconds=None,
trusted=None,
use_sqlite=None,
default_gcs_bucket_name=None):
"""Configures the APIs hosted by this server.
Args:
executable: The path of the executable to use when running the API Server
e.g. "/usr/bin/python".
host: The host name that should be used by the API Server e.g.
"localhost".
port: The port number that should be used by the API Server e.g. 8080.
app_id: The str application id e.g. "guestbook".
script: The name of the script that should be used, along with the
executable argument, to run the API Server e.g. "api_server.py".
If None then the executable is run without a script argument.
appidentity_email_address: Email address for service account substitute.
appidentity_private_key_path: Private key for service account substitute.
application_host: The name of the host where the development application
server is running e.g. "localhost".
application_port: The port where the application server is running e.g.
8000.
application_root: The path to the directory containing the user's
application e.g. "/home/bquinlan/myapp".
auto_id_policy: One of "sequential" or "scattered", indicating whether
the Datastore stub should assign IDs sequentially or scattered.
blobstore_path: The path to the file that should be used for blobstore
storage.
clear_datastore: Clears the file at datastore_path, emptying the
datastore from previous runs.
clear_prospective_search: Clears the file at prospective_search_path,
emptying the perspective search state from previous runs.
datastore_path: The path to the file that should be used for datastore
storage.
enable_sendmail: A bool indicating if sendmail should be used when sending
e-mails. This argument is ignored if mail_smtp_host is not None.
enable_task_running: A bool indicating whether taskqueue tasks should
be run automatically or it the must be manually triggered.
high_replication: A bool indicating whether to use the high replication
consistency model.
logs_path: Path to the file to store the logs data in.
prospective_search_path: The path to the file that should be used to
save prospective search subscriptions.
require_indexes: A bool indicating if the same production
datastore indexes requirements should be enforced i.e. if True then
a google.appengine.ext.db.NeedIndexError will be be raised if a query
is executed without the required indexes.
show_mail_body: A bool indicating whether the body of sent e-mails
should be written to the logs.
smtp_host: The SMTP hostname that should be used when sending e-mails.
If None then the enable_sendmail argument is considered.
smtp_password: The password to use when authenticating with the
SMTP server. This value may be None if smtp_host or smtp_user
is also None.
smtp_port: The SMTP port number that should be used when sending
e-mails. If this value is None then smtp_host must also be None.
smtp_user: The username to use when authenticating with the
SMTP server. This value may be None if smtp_host is also None or if
the SMTP server does not require authentication.
smtp_allow_tls: A bool indicating whether to enable TLS.
task_retry_seconds: An int representing the number of seconds to
wait before a retrying a failed taskqueue task.
trusted: A bool indicating if privileged APIs should be made available.
use_sqlite: A bool indicating whether DatastoreSqliteStub or
DatastoreFileStub should be used.
default_gcs_bucket_name: A str overriding the normal default bucket name.
"""
self._process = None
self._host = host
self._port = port
if script:
self._args = [executable, script]
else:
self._args = [executable]
self._BindArgument('--api_host', host)
self._BindArgument('--api_port', port)
self._BindArgument('--appidentity_email_address', appidentity_email_address)
self._BindArgument('--appidentity_private_key_path', appidentity_private_key_path)
self._BindArgument('--application_host', application_host)
self._BindArgument('--application_port', application_port)
self._BindArgument('--application_root', application_root)
self._BindArgument('--application', app_id)
self._BindArgument('--auto_id_policy', auto_id_policy)
self._BindArgument('--blobstore_path', blobstore_path)
self._BindArgument('--clear_datastore', clear_datastore)
self._BindArgument('--clear_prospective_search', clear_prospective_search)
self._BindArgument('--datastore_path', datastore_path)
self._BindArgument('--enable_sendmail', enable_sendmail)
self._BindArgument('--enable_task_running', enable_task_running)
self._BindArgument('--high_replication', high_replication)
self._BindArgument('--logs_path', logs_path)
self._BindArgument('--prospective_search_path', prospective_search_path)
self._BindArgument('--require_indexes', require_indexes)
self._BindArgument('--show_mail_body', show_mail_body)
self._BindArgument('--smtp_host', smtp_host)
self._BindArgument('--smtp_password', smtp_password)
self._BindArgument('--smtp_port', smtp_port)
self._BindArgument('--smtp_user', smtp_user)
self._BindArgument('--smtp_allow_tls', smtp_allow_tls)
self._BindArgument('--task_retry_seconds', task_retry_seconds)
self._BindArgument('--trusted', trusted)
self._BindArgument('--use_sqlite', use_sqlite)
self._BindArgument('--default_gcs_bucket_name', default_gcs_bucket_name)
@property
def url(self):
"""Returns the URL that should be used to communicate with the server."""
return 'http://%s:%d' % (self._host, self._port)
def __repr__(self):
return '<APIServerProcess command=%r>' % ' '.join(self._args)
def Start(self):
"""Starts the API Server process."""
assert not self._process, 'Start() can only be called once'
self._process = subprocess.Popen(self._args)
def _CanConnect(self):
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
try:
s.connect((self._host, self._port))
except socket.error:
connected = False
else:
connected = True
s.close()
return connected
def WaitUntilServing(self, timeout=30.0):
"""Waits until the API Server is ready to handle requests.
Args:
timeout: The maximum number of seconds to wait for the server to be ready.
Raises:
Error: if the server process exits or is not ready in "timeout" seconds.
"""
assert self._process, 'server was not started'
finish_time = time.time() + timeout
while time.time() < finish_time:
if self._process.poll() is not None:
raise Error('server has already exited with return: %r',
self._process.returncode)
if self._CanConnect():
return
time.sleep(0.2)
raise Error('server did not start after %f seconds', timeout)
def _BindArgument(self, argument, value):
if value is not None:
self._args.append('%s=%s' % (argument, value))
def Quit(self, timeout=5.0):
"""Causes the API Server process to exit.
Args:
timeout: The maximum number of seconds to wait for an orderly shutdown
before forceably killing the process.
"""
assert self._process, 'server was not started'
if self._process.poll() is None:
try:
urllib2.urlopen(self.url + QUIT_PATH)
except urllib2.URLError:
pass
finish_time = time.time() + timeout
while time.time() < finish_time and self._process.poll() is None:
time.sleep(0.2)
if self._process.returncode is None:
logging.warning('api_server did not quit cleanly, killing')
self._process.kill()
class ApiServerDispatcher(request_info._LocalFakeDispatcher):
"""An api_server Dispatcher implementation."""
def add_request(self, method, relative_url, headers, body, source_ip,
server_name=None, version=None, instance_id=None):
"""Process an HTTP request.
Args:
method: A str containing the HTTP method of the request.
relative_url: A str containing path and query string of the request.
headers: A list of (key, value) tuples where key and value are both str.
body: A str containing the request body.
source_ip: The source ip address for the request.
server_name: An optional str containing the server name to service this
request. If unset, the request will be dispatched to the default
server.
version: An optional str containing the version to service this request.
If unset, the request will be dispatched to the default version.
instance_id: An optional str containing the instance_id of the instance to
service this request. If unset, the request will be dispatched to
according to the load-balancing for the server and version.
Returns:
A request_info.ResponseTuple containing the response information for the
HTTP request.
"""
try:
header_dict = wsgiref.headers.Headers(headers)
connection_host = header_dict.get('host')
connection = httplib.HTTPConnection(connection_host)
connection.putrequest(
method, relative_url,
skip_host='host' in header_dict,
skip_accept_encoding='accept-encoding' in header_dict)
for header_key, header_value in headers:
connection.putheader(header_key, header_value)
connection.endheaders()
connection.send(body)
response = connection.getresponse()
response.read()
response.close()
return request_info.ResponseTuple(
'%d %s' % (response.status, response.reason), [], '')
except (httplib.HTTPException, socket.error):
logging.exception(
'An error occured while sending a %s request to "%s%s"',
method, connection_host, relative_url)
return request_info.ResponseTuple('0', [], '')
def main():
logging.basicConfig(
level=logging.INFO,
format='[API Server] [%(filename)s:%(lineno)d] %(levelname)s %(message)s')
args = ParseCommandArguments(sys.argv[1:])
if args.clear_datastore:
_ClearDatastoreStorage(args.datastore_path)
if args.clear_prospective_search:
_ClearProspectiveSearchStorage(args.prospective_search_path)
if args.blobstore_path is None:
_, blobstore_temp_filename = tempfile.mkstemp(prefix='ae-blobstore')
args.blobstore_path = blobstore_temp_filename
if args.datastore_path is None:
_, datastore_temp_filename = tempfile.mkstemp(prefix='ae-datastore')
args.datastore_path = datastore_temp_filename
if args.prospective_search_path is None:
_, prospective_search_temp_filename = tempfile.mkstemp(
prefix='ae-prospective_search')
args.prospective_search_path = prospective_search_temp_filename
if args.application_host:
application_address = args.application_host
if args.application_port and args.application_port != 80:
application_address += ':' + str(args.application_port)
else:
application_address = None
if not hasattr(args, 'default_gcs_bucket_name'):
args.default_gcs_bucket_name = None
request_info._local_dispatcher = ApiServerDispatcher()
_SetupStubs(app_id=args.application,
application_root=args.application_root,
appidentity_email_address=args.appidentity_email_address,
appidentity_private_key_path=args.appidentity_private_key_path,
trusted=args.trusted,
blobstore_path=args.blobstore_path,
datastore_path=args.datastore_path,
use_sqlite=args.use_sqlite,
auto_id_policy=args.auto_id_policy,
high_replication=args.high_replication,
datastore_require_indexes=args.require_indexes,
images_host_prefix=application_address,
logs_path=args.logs_path,
mail_smtp_host=args.smtp_host,
mail_smtp_port=args.smtp_port,
mail_smtp_user=args.smtp_user,
mail_smtp_password=args.smtp_password,
mail_enable_sendmail=args.enable_sendmail,
mail_show_mail_body=args.show_mail_body,
mail_allow_tls=args.smtp_allow_tls,
matcher_prospective_search_path=args.prospective_search_path,
taskqueue_auto_run_tasks=args.enable_task_running,
taskqueue_task_retry_seconds=args.task_retry_seconds,
taskqueue_default_http_server=application_address,
user_login_url=args.user_login_url,
user_logout_url=args.user_logout_url,
default_gcs_bucket_name=args.default_gcs_bucket_name)
server = APIServer((args.api_host, args.api_port), args.application)
try:
server.serve_forever()
finally:
_TearDownStubs()
if __name__ == '__main__':
try:
main()
except KeyboardInterrupt:
pass
| ychen820/microblog | y/google-cloud-sdk/platform/google_appengine/google/appengine/tools/api_server.py | Python | bsd-3-clause | 33,071 |
"""
Provides common functions for the CVE-Builder script.
The script provides functionality for both TAXII inboxing aswell as using
NCSC's custom adapter inbox.
"""
import json
import requests
from cabby import create_client
def _construct_headers():
headers = {
'Content-Type': 'application/xml',
'Accept': 'application/json'
}
return headers
def _certuk_inbox(content, endpoint_url):
"""Inbox the package to the certuk adapter."""
data = content
headers = _construct_headers()
response = requests.post(endpoint_url, data=data, headers=headers)
print(json.dumps(response.json(), indent=4))
return
def _taxii_inbox(content, config):
client = create_client(config['host'], use_https=config[
'ssl'], discovery_path=config['discovery_path'])
content = content
binding = config['binding']
client.set_auth(username=config['username'], password=config['password'])
client.push(content, binding, uri=config['inbox_path'])
| cwtaylor/cve-builder | functions.py | Python | gpl-3.0 | 1,023 |
# (c) 2018, NetApp Inc.
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
import json
from ansible.modules.storage.netapp.netapp_e_alerts import Alerts
from units.modules.utils import AnsibleExitJson, AnsibleFailJson, ModuleTestCase, set_module_args
__metaclass__ = type
from ansible.compat.tests import mock
class AlertsTest(ModuleTestCase):
REQUIRED_PARAMS = {
'api_username': 'rw',
'api_password': 'password',
'api_url': 'http://localhost',
'ssid': '1',
'state': 'disabled'
}
REQ_FUNC = 'ansible.modules.storage.netapp.netapp_e_alerts.request'
def _set_args(self, **kwargs):
module_args = self.REQUIRED_PARAMS.copy()
if kwargs is not None:
module_args.update(kwargs)
set_module_args(module_args)
def _validate_args(self, **kwargs):
self._set_args(**kwargs)
Alerts()
def test_validation_disable(self):
"""Ensure a default configuration succeeds"""
self._validate_args()
def test_validation_enable(self):
"""Ensure a typical, default configuration succeeds"""
self._validate_args(state='enabled', server='localhost', sender='[email protected]', recipients=['[email protected]'])
def test_validation_fail_required(self):
"""Ensure we fail on missing configuration"""
# Missing recipients
with self.assertRaises(AnsibleFailJson):
self._validate_args(state='enabled', server='localhost', sender='[email protected]')
Alerts()
# Missing sender
with self.assertRaises(AnsibleFailJson):
self._validate_args(state='enabled', server='localhost', recipients=['[email protected]'])
Alerts()
# Missing server
with self.assertRaises(AnsibleFailJson):
self._validate_args(state='enabled', sender='[email protected]', recipients=['[email protected]'])
def test_validation_fail(self):
# Empty recipients
with self.assertRaises(AnsibleFailJson):
self._validate_args(state='enabled', server='localhost', sender='[email protected]', recipients=[])
# Bad sender
with self.assertRaises(AnsibleFailJson):
self._validate_args(state='enabled', server='localhost', sender='y.z', recipients=['[email protected]'])
def test_get_configuration(self):
"""Validate retrieving the current configuration"""
self._set_args(state='enabled', server='localhost', sender='[email protected]', recipients=['[email protected]'])
expected = 'result'
alerts = Alerts()
# Expecting an update
with mock.patch(self.REQ_FUNC, return_value=(200, expected)) as req:
actual = alerts.get_configuration()
self.assertEquals(expected, actual)
self.assertEquals(req.call_count, 1)
def test_update_configuration(self):
"""Validate updating the configuration"""
initial = dict(alertingEnabled=True,
emailServerAddress='localhost',
sendAdditionalContactInformation=True,
additionalContactInformation='None',
emailSenderAddress='[email protected]',
recipientEmailAddresses=['[email protected]']
)
args = dict(state='enabled', server=initial['emailServerAddress'], sender=initial['emailSenderAddress'],
contact=initial['additionalContactInformation'], recipients=initial['recipientEmailAddresses'])
self._set_args(**args)
alerts = Alerts()
# Ensure when trigger updates when each relevant field is changed
with mock.patch(self.REQ_FUNC, return_value=(200, None)) as req:
with mock.patch.object(alerts, 'get_configuration', return_value=initial):
update = alerts.update_configuration()
self.assertFalse(update)
alerts.sender = '[email protected]'
update = alerts.update_configuration()
self.assertTrue(update)
self._set_args(**args)
alerts.recipients = ['[email protected]']
update = alerts.update_configuration()
self.assertTrue(update)
self._set_args(**args)
alerts.contact = 'abc'
update = alerts.update_configuration()
self.assertTrue(update)
self._set_args(**args)
alerts.server = 'abc'
update = alerts.update_configuration()
self.assertTrue(update)
def test_send_test_email_check(self):
"""Ensure we handle check_mode correctly"""
self._set_args(test=True)
alerts = Alerts()
alerts.check_mode = True
with mock.patch(self.REQ_FUNC) as req:
with mock.patch.object(alerts, 'update_configuration', return_value=True):
alerts.send_test_email()
self.assertFalse(req.called)
def test_send_test_email(self):
"""Ensure we send a test email if test=True"""
self._set_args(test=True)
alerts = Alerts()
with mock.patch(self.REQ_FUNC, return_value=(200, dict(response='emailSentOK'))) as req:
alerts.send_test_email()
self.assertTrue(req.called)
def test_send_test_email_fail(self):
"""Ensure we fail if the test returned a failure status"""
self._set_args(test=True)
alerts = Alerts()
ret_msg = 'fail'
with self.assertRaisesRegexp(AnsibleFailJson, ret_msg):
with mock.patch(self.REQ_FUNC, return_value=(200, dict(response=ret_msg))) as req:
alerts.send_test_email()
self.assertTrue(req.called)
def test_send_test_email_fail_connection(self):
"""Ensure we fail cleanly if we hit a connection failure"""
self._set_args(test=True)
alerts = Alerts()
with self.assertRaisesRegexp(AnsibleFailJson, r"failed to send"):
with mock.patch(self.REQ_FUNC, side_effect=Exception) as req:
alerts.send_test_email()
self.assertTrue(req.called)
def test_update(self):
# Ensure that when test is enabled and alerting is enabled, we run the test
self._set_args(state='enabled', server='localhost', sender='[email protected]', recipients=['[email protected]'], test=True)
alerts = Alerts()
with self.assertRaisesRegexp(AnsibleExitJson, r"enabled"):
with mock.patch.object(alerts, 'update_configuration', return_value=True):
with mock.patch.object(alerts, 'send_test_email') as test:
alerts.update()
self.assertTrue(test.called)
# Ensure we don't run a test when changed=False
with self.assertRaisesRegexp(AnsibleExitJson, r"enabled"):
with mock.patch.object(alerts, 'update_configuration', return_value=False):
with mock.patch.object(alerts, 'send_test_email') as test:
alerts.update()
self.assertFalse(test.called)
# Ensure that test is not called when we have alerting disabled
self._set_args(state='disabled')
alerts = Alerts()
with self.assertRaisesRegexp(AnsibleExitJson, r"disabled"):
with mock.patch.object(alerts, 'update_configuration', return_value=True):
with mock.patch.object(alerts, 'send_test_email') as test:
alerts.update()
self.assertFalse(test.called)
| caphrim007/ansible | test/units/modules/storage/netapp/test_netapp_e_alerts.py | Python | gpl-3.0 | 7,496 |
# Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
Utilities for retrieving revision information from a project's git repository.
"""
# Do not remove the following comment; it is used by
# astropy_helpers.version_helpers to determine the beginning of the code in
# this module
# BEGIN
import os
import subprocess
import warnings
def update_git_devstr(version, path=None):
"""
Updates the git revision string if and only if the path is being imported
directly from a git working copy. This ensures that the revision number in
the version string is accurate.
"""
try:
# Quick way to determine if we're in git or not - returns '' if not
devstr = get_git_devstr(sha=True, show_warning=False, path=path)
except OSError:
return version
if not devstr:
# Probably not in git so just pass silently
return version
if 'dev' in version: # update to the current git revision
version_base = version.split('.dev', 1)[0]
devstr = get_git_devstr(sha=False, show_warning=False, path=path)
return version_base + '.dev' + devstr
else:
#otherwise it's already the true/release version
return version
def get_git_devstr(sha=False, show_warning=True, path=None):
"""
Determines the number of revisions in this repository.
Parameters
----------
sha : bool
If True, the full SHA1 hash will be returned. Otherwise, the total
count of commits in the repository will be used as a "revision
number".
show_warning : bool
If True, issue a warning if git returns an error code, otherwise errors
pass silently.
path : str or None
If a string, specifies the directory to look in to find the git
repository. If `None`, the current working directory is used.
If given a filename it uses the directory containing that file.
Returns
-------
devversion : str
Either a string with the revsion number (if `sha` is False), the
SHA1 hash of the current commit (if `sha` is True), or an empty string
if git version info could not be identified.
"""
if path is None:
path = os.getcwd()
if not os.path.isdir(path):
path = os.path.abspath(os.path.dirname(path))
if not os.path.exists(os.path.join(path, '.git')):
return ''
if sha:
cmd = ['rev-parse'] # Faster for getting just the hash of HEAD
else:
cmd = ['rev-list', '--count']
try:
p = subprocess.Popen(['git'] + cmd + ['HEAD'], cwd=path,
stdout=subprocess.PIPE, stderr=subprocess.PIPE,
stdin=subprocess.PIPE)
stdout, stderr = p.communicate()
except OSError as e:
if show_warning:
warnings.warn('Error running git: ' + str(e))
return ''
if p.returncode == 128:
if show_warning:
warnings.warn('No git repository present at {0!r}! Using default '
'dev version.'.format(path))
return ''
elif p.returncode != 0:
if show_warning:
warnings.warn('Git failed while determining revision '
'count: ' + stderr)
return ''
if sha:
return stdout.decode('utf-8')[:40]
else:
return stdout.decode('utf-8').strip()
| eteq/astropy-helpers | astropy_helpers/git_helpers.py | Python | bsd-3-clause | 3,411 |
if 'variavel' in locals():
if 'variavel' in globals():
if hasattr(objeto, 'variavel'):
#https://pt.stackoverflow.com/q/50206/101
| maniero/SOpt | Python/Reflection/Variable.py | Python | mit | 136 |
import asyncio
import logging
import uuid
from zope.interface import implementer
import aioamqp
from aioamqp.exceptions import (AmqpClosedConnection,
ChannelClosed,
ConsumerCancelled)
from apium import registry
from apium.interfaces import IBroker, ISerializer
log = logging.getLogger(__name__)
@implementer(IBroker)
class Broker(object):
""" IBroker implementation for AMQP """
def __init__(self, driver):
""" Build the broker for the given driver """
self._driver = driver
self._task_queue = asyncio.Queue(maxsize=1)
self._serializer = registry.get(ISerializer)()
self._start_consuming = False
self._start_consuming_task = False
self._start_consuming_result = False
self._results = {}
self._protocol = None
self._channel = None
self._consumer_tags = []
self.connected = False
@asyncio.coroutine
def connect(self, url):
""" Connect to the broker server.
"""
try:
self._protocol = yield from aioamqp.from_url(url)
self._channel = yield from self._protocol.channel()
self.connected = True
except Exception:
log.exception('Exception while connecting to broker')
@asyncio.coroutine
def disconnect(self):
""" Disconnect to the broker server. """
self.connected = False
log.info('basic cancel')
for tag in self._consumer_tags:
yield from self._channel.basic_cancel(tag)
log.info('closing channel')
yield from self._channel.close()
self._channel = None
self._protocol.stop()
self._protocol = None
@asyncio.coroutine
def create_queue(self, queue):
""" create a working queue where tasks will be pushed.
To K.I.S.S., use direct exchange to queue.
e.g. 1 exchange per queue with the same name.
"""
log.info('Creating echange {}'.format(queue))
yield from self._channel.exchange(queue, 'direct')
log.info('Creating queue {}'.format(queue))
yield from self._channel.queue(queue, durable=True)
log.info('Binding queue {}'.format(queue))
yield from self._channel.queue_bind(queue, queue, queue)
log.info('Queue {} created'.format(queue))
@asyncio.coroutine
def delete_queue(self, queue):
""" delete working queues """
log.info('Deleting exchange {}'.format(queue))
try:
yield from self._channel.exchange_delete(queue, no_wait=False)
except Exception:
log.exception('Unmanaged exception while deleting exchange')
log.info('Deleting queue {}'.format(queue))
try:
yield from self._channel.queue_delete(queue, no_wait=False)
except Exception:
log.exception('Unmanaged exception while deleting queue {}'
''.format(queue))
@asyncio.coroutine
def publish_message(self, message, queue):
""" publish a message in a queue. """
try:
yield from self._channel.publish(message,
exchange_name=queue,
routing_key=queue)
return True
except Exception:
log.error('Unexpected error while pushing message', exc_info=True)
return False
@asyncio.coroutine
def pop_task(self):
""" Pop a task to be processed for the given queues.
If no queues are passed, all queues will be tracked. """
try:
if not self._start_consuming_task:
self._start_consuming_task = True
yield from self._subscribe_task_queues()
task = yield from self._task_queue.get()
except Exception:
log.error('Unexpected error while poping task', exc_info=True)
raise
return task
@asyncio.coroutine
def pop_result(self, task_request, timeout=None):
if not self._start_consuming_result:
self._start_consuming_result = True
yield from self._subscribe_result_queue()
future = asyncio.Future()
loop = asyncio.get_event_loop()
self._results[task_request.uuid] = future
try:
result = yield from asyncio.wait_for(future, timeout)
except TimeoutError:
future.cancel()
del self._results[task_request.uuid]
raise
return result
@asyncio.coroutine
def _subscribe_result_queue(self):
queue = self._driver.get_result_queue()
log.info('basic consume {}'.format(queue))
consumer_tag = 'result-{}'.format(queue)
self._consumer_tags.append(consumer_tag)
yield from self._channel.basic_consume(queue, consumer_tag,
no_wait=False)
asyncio.async(self._consume_queue(consumer_tag))
@asyncio.coroutine
def _subscribe_task_queues(self):
for queue in self._driver._working_queues:
log.info('basic consume {}'.format(queue))
consumer_tag = 'task-{}'.format(queue)
self._consumer_tags.append(consumer_tag)
yield from self._channel.basic_consume(queue, consumer_tag,
no_wait=False)
loop = asyncio.get_event_loop()
asyncio.async(self._consume_queue(consumer_tag))
@asyncio.coroutine
def _consume_queue(self, consumer_tag):
while self._channel:
try:
(consumer_tag,
delivery_tag,
message) = yield from self._channel.consume(consumer_tag)
log.debug('Consumer {} received {} ({})'
''.format(consumer_tag, message, delivery_tag))
message = self._serializer.deserialize(message)
if consumer_tag.split('-', 1).pop(0) == 'task':
log.debug('Pushing task in the task queue')
yield from self._task_queue.put(message)
else:
try:
self._results[message['uuid']].set_result(message)
log.debug('Result for {} pushed in the result dict'
''.format(message['uuid']))
except KeyError:
log.warn('Result arrived to late')
# XXX ack_late
yield from self._channel.basic_client_ack(delivery_tag)
except (ChannelClosed, ConsumerCancelled) as exc:
if not self.connected:
break
log.warning('Consumer has been closed, open a new channel')
# reconnect the channel
self._channel = yield from self._protocol.channel()
if self._start_consuming_task:
yield from self._subscribe_task_queues()
if self._start_consuming_result:
yield from self._subscribe_result_queue()
except Exception:
log.error('Unexpected exception while reveicing message',
exc_info=True)
| mardiros/apium | apium/broker/amqp.py | Python | bsd-3-clause | 7,312 |
#!/usr/bin/env python
#
# mallard2man.py
#
# Copyright (C) 2014 MongoDB, Inc.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
COPYRIGHT_HOLDER = "MongoDB, Inc."
GROUP = "MongoDB C Driver"
BUG_URL = 'https://jira.mongodb.org/browse/CDRIVER'
"""
This script is mean to convert a fairly basic mallard format documentation
page to a groff styled man page.
"""
import os
import re
import sys
import time
import codecs
from datetime import datetime
from xml.etree import ElementTree
INCLUDE = '{http://www.w3.org/2001/XInclude}include'
TITLE = '{http://projectmallard.org/1.0/}title'
SUBTITLE = '{http://projectmallard.org/1.0/}subtitle'
SECTION = '{http://projectmallard.org/1.0/}section'
INFO = '{http://projectmallard.org/1.0/}info'
ITEM = '{http://projectmallard.org/1.0/}item'
LISTING = '{http://projectmallard.org/1.0/}listing'
LIST = '{http://projectmallard.org/1.0/}list'
STEPS = '{http://projectmallard.org/1.0/}steps'
LINK = '{http://projectmallard.org/1.0/}link'
LINKS = '{http://projectmallard.org/1.0/}links'
SYNOPSIS = '{http://projectmallard.org/1.0/}synopsis'
CODE = '{http://projectmallard.org/1.0/}code'
INPUT = '{http://projectmallard.org/1.0/}input'
VAR = '{http://projectmallard.org/1.0/}var'
CMD = '{http://projectmallard.org/1.0/}cmd'
SYS = '{http://projectmallard.org/1.0/}sys'
P = '{http://projectmallard.org/1.0/}p'
DESC = '{http://projectmallard.org/1.0/}desc'
SCREEN = '{http://projectmallard.org/1.0/}screen'
EM = '{http://projectmallard.org/1.0/}em'
NOTE = '{http://projectmallard.org/1.0/}note'
TABLE = '{http://projectmallard.org/1.0/}table'
THEAD = '{http://projectmallard.org/1.0/}thead'
TR = '{http://projectmallard.org/1.0/}tr'
TD = '{http://projectmallard.org/1.0/}td'
OUTPUT = '{http://projectmallard.org/1.0/}output'
EXAMPLE = '{http://projectmallard.org/1.0/}example'
# Matches "\" and "-", but not "\-".
replaceables = re.compile(r'(\\(?!-))|((?<!\\)-)')
class Convert(object):
title = None
subtitle = None
sections = None
relpath = None
def __init__(self, inFile, outFile, section):
self.inFile = inFile
self.relpath = os.path.dirname(inFile)
self.outFile = outFile
self.section = section
self.sections = []
# Map: section id -> section element.
self.sections_map = {}
def _parse(self):
self.tree = ElementTree.ElementTree()
self.tree.parse(open(self.inFile))
self.root = self.tree.getroot()
# Python's standard ElementTree doesn't store an element's parent on
# the element. Make a child->parent map.
try:
iterator = self.tree.iter()
except AttributeError:
# Python 2.6.
iterator = self.tree.getiterator()
self.parent_map = dict((c, p) for p in iterator for c in p)
def _get_parent(self, ele):
return self.parent_map[ele]
def _extract(self):
# Extract the title and subtitle.
for child in self.root.getchildren():
if child.tag == TITLE:
# A title like "Version Checks" can't have spaces, otherwise
# the "whatis" entry can't be parsed from the man page title.
self.title = child.text.strip().replace(' ', '_')
elif child.tag == SUBTITLE:
self.subtitle = child.text.strip()
elif child.tag == SECTION:
if child.get('id'):
self.sections_map[child.get('id')] = child
self.sections.append(child)
if not self.subtitle and 'description' in self.sections_map:
# No "subtitle" element, use description section title as subtitle.
self.subtitle = self._section_text(self.sections_map['description'])
def _section_text(self, section):
# Find <section id="description"><p>some text</p></section>.
for child in section:
if child.tag != TITLE:
return self._textify_elem(child)
def _textify_elem(self, elem):
return ''.join(elem.itertext()).strip()
def _writeComment(self, text=''):
lines = text.split('\n')
for line in lines:
self.outFile.write('.\\" ')
self.outFile.write(line)
self.outFile.write('\n')
def _escape_char(self, match):
c = match.group(0)
if c == "-":
return r"\(hy"
elif c == "\\":
return "\\e"
assert False, "invalid char passed to _escape_char: %r" % c
def _escape(self, text):
# Avoid "hyphen-used-as-minus-sign" lintian warning about man pages,
# and escape text like "\0" as "\\0". We'll replace all "-" with "\(hy",
# which is an explicit hyphen, but leave alone the first line's
# "name \- description" text.
escaped = replaceables.sub(self._escape_char, text)
if escaped.startswith('.'):
# Lines like ". Next sentence" are misformatted by groff, do
# "\[char46] Next sentence" instead.
return r'\[char46]' + escaped[1:]
else:
return escaped
def _compressWhitespace(self, text):
return ' '.join(text.split())
def _write(self, text):
self._write_noescape(self._escape(text))
def _write_noescape(self, text):
self.outFile.write(text)
def _writeCommand(self, command, text=None):
self._write_noescape(command)
if text:
self._write(' ') # Escapes the text.
self._write(text)
self._write('\n')
def _writeLine(self, text):
if text is not None:
text = text.strip()
self._write(text)
self._write('\n')
def _generateHeader(self):
# For Debian reproducible builds:
# https://reproducible-builds.org/specs/source-date-epoch/
source_date_epoch = os.environ.get('SOURCE_DATE_EPOCH')
year = datetime.utcfromtimestamp(int(source_date_epoch or time.time())).year
self._writeComment('This manpage is Copyright (C) %s %s' % (year, COPYRIGHT_HOLDER))
self._writeComment('')
self._writeComment(
"Permission is granted to copy, distribute and/or modify this document\n"
"under the terms of the GNU Free Documentation License, Version 1.3\n"
"or any later version published by the Free Software Foundation;\n"
"with no Invariant Sections, no Front-Cover Texts, and no Back-Cover Texts.\n"
"A copy of the license is included in the section entitled \"GNU\n"
"Free Documentation License\".")
self._writeComment('')
mtime = int(source_date_epoch or os.stat(self.inFile).st_mtime)
date = datetime.utcfromtimestamp(mtime).strftime('%Y-%m-%d')
title = self.title.replace('()','').upper()
self._writeCommand(
'.TH', '"%s" "%s" "%s" "%s"\n' % (title, self.section, date, GROUP))
self._writeCommand('.SH NAME\n')
self._write_noescape('%s \\- %s\n' % (self.title, self.subtitle))
def _generateSection(self, section):
# Try to render the title first
for child in section.getchildren():
if child.tag == TITLE:
if child.text is None:
raise RuntimeError("Can't put formatting tags in <title>")
s = child.text.strip().upper()
self._writeCommand('.SH', '"%s"' % s.replace('"', ''))
for child in section.getchildren():
self._generateElement(child)
if child.tail:
self._writeLine(child.tail)
def _generateSynopsis(self, synopsis):
self._writeCommand('.nf')
for child in synopsis.getchildren():
self._generateElement(child)
if child.tail:
self._writeLine(child.tail)
self._writeCommand('.fi')
def _generateCode(self, code):
if code.tag.endswith('output'):
self._writeCommand('.br')
text = code.text
is_synopsis = self._get_parent(code).tag.endswith('synopsis')
if text and '\n' not in text and not is_synopsis:
self._writeCommand('.B', text)
else:
self._writeCommand('.nf')
self._writeLine(code.text)
for child in code.getchildren():
self._generateElement(child)
self._writeCommand('.fi')
def _generateNote(self, note):
self._writeCommand('.B NOTE')
self._writeCommand('.RS')
for child in note.getchildren():
self._generateElement(child)
if child.tail:
self._writeLine(child.tail)
self._writeCommand('.RE')
def _generateP(self, p):
if p.text:
self._writeLine(self._compressWhitespace(p.text))
for child in p.getchildren():
self._generateElement(child)
if child.tail:
self._writeLine(self._compressWhitespace(child.tail))
def _generateScreen(self, screen):
if screen.text:
self._generateCode(screen)
for child in screen.getchildren():
self._generateElement(child)
def _generateListing(self, listing):
for child in listing.getchildren():
self._generateElement(child)
def _generateList(self, l):
for child in l.getchildren():
self._generateElement(child)
def _generateEM(self, em):
self._writeCommand('.B', em.text)
def _generateOutput(self, output):
self._generateCode(output)
def _generateItem(self, item):
self._writeCommand('.IP \\[bu] 2')
for child in item.getchildren():
self._generateElement(child)
def _generateElement(self, ele):
if ele.tag in (SECTION, EXAMPLE):
self._generateSection(ele)
elif ele.tag == SYNOPSIS:
self._generateSynopsis(ele)
elif ele.tag in (CODE, VAR, SYS, CMD):
self._generateCode(ele)
elif ele.tag in (INPUT, OUTPUT):
self._generateOutput(ele)
elif ele.tag in (P, DESC):
self._generateP(ele)
elif ele.tag == EM:
self._generateEM(ele)
elif ele.tag == LISTING:
self._generateListing(ele)
elif ele.tag == ITEM:
self._generateItem(ele)
elif ele.tag in (LIST, STEPS):
self._generateList(ele)
elif ele.tag == SCREEN:
self._generateScreen(ele)
elif ele.tag == LINK:
self._generateLink(ele)
elif ele.tag == NOTE:
self._generateNote(ele)
elif ele.tag == TABLE:
self._generateTable(ele)
elif ele.tag == TR:
self._generateTr(ele)
elif ele.tag == TD:
self._generateTd(ele)
elif ele.tag == INCLUDE:
f = ele.attrib['href']
f = os.path.join(self.relpath, f)
d = codecs.open(f, 'r', encoding='utf-8').read()
self._writeLine(d)
elif ele.tag in (TITLE, INFO, THEAD, LINKS):
pass
else:
print('unknown element type %s' % ele)
def _generateTable(self, table):
for child in table.getchildren():
self._generateElement(child)
def _generateTr(self, tr):
self._writeCommand('.TP')
self._writeCommand('.B')
for child in tr.getchildren():
self._generateElement(child)
self._writeCommand('.LP')
def _generateTd(self, td):
for child in td.getchildren():
self._generateElement(child)
def _generateLink(self, link):
text = link.text
if text and '()' in text:
text = text.replace('()', '(%s)' % self.section)
if text:
self._writeCommand('.B', text)
def _generateSections(self):
for section in self.sections:
self._generateElement(section)
def _generateFooter(self):
self._writeCommand('\n.B')
self._writeCommand('\n.SH COLOPHON')
self._write('\nThis page is part of %s.' % GROUP)
self._write('\nPlease report any bugs at %s.' % BUG_URL.replace('-','\\-'))
def _generate(self):
self.realname = self.outFile
self.outFile = codecs.open(self.outFile + '.tmp', 'w', encoding='utf-8')
self._generateHeader()
self._generateSections()
self._generateFooter()
os.rename(self.outFile.name, self.realname)
self.outFile.close()
def convert(self):
self._parse()
self._extract()
self._generate()
def main(filenames, section='3'):
for inFile in filenames:
dirName = os.path.dirname(inFile) + '/man/'
baseName = os.path.basename(inFile)
baseFile = os.path.splitext(baseName)[0]
outFile = dirName + baseFile + '.' + section
c = Convert(inFile, outFile, section)
c.convert()
if __name__ == '__main__':
if len(sys.argv) < 3:
print('usage: %s SECTION FILENAMES...' % sys.argv[0])
sys.exit(1)
section = sys.argv[1]
main(sys.argv[2:], section)
sys.exit(0)
| mschoenlaub/mongo-c-driver | doc/mallard2man.py | Python | apache-2.0 | 13,664 |
# -*- coding: utf-8 -*-
#
# cloudtracker documentation build configuration file, created by
# sphinx-quickstart on Fri Aug 5 12:45:40 2011.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys, os
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
sys.path.insert(0, os.path.abspath('..'))
sys.path.insert(0, os.path.abspath('../cloudtracker/'))
# -- General configuration -----------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['sphinx.ext.autodoc', 'sphinx.ext.doctest', 'sphinx.ext.pngmath']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'cloudtracker'
copyright = u'2011, Jordan Dawe'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '1.0'
# The full version, including alpha/beta/rc tags.
release = '1.0'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'default'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'cloudtrackerdoc'
# -- Options for LaTeX output --------------------------------------------------
# The paper size ('letter' or 'a4').
#latex_paper_size = 'letter'
# The font size ('10pt', '11pt' or '12pt').
#latex_font_size = '10pt'
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index', 'cloudtracker.tex', u'cloudtracker Documentation',
u'Jordan Dawe', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Additional stuff for the LaTeX preamble.
#latex_preamble = ''
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output --------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'cloudtracker', u'cloudtracker Documentation',
[u'Jordan Dawe'], 1)
]
| freedryk/cloudtracker | doc/conf.py | Python | bsd-2-clause | 7,125 |
import unittest
# O(n)
class Solution:
def isOneBitCharacter(self, bits):
"""
:type bits: List[int]
:rtype: bool
"""
is_one_bit = False
i = 0
while i < len(bits):
is_one_bit = not bits[i]
i += 1 if is_one_bit else 2
return is_one_bit
class Test(unittest.TestCase):
def test(self):
self._test([1, 0, 0], True)
self._test([1, 1, 1, 0], False)
def _test(self, bits, expected):
actual = Solution().isOneBitCharacter(bits)
self.assertEqual(expected, actual)
if __name__ == '__main__':
unittest.main()
| chrisxue815/leetcode_python | problems/test_0717.py | Python | unlicense | 639 |
import numpy
import pylab
from scipy.optimize import curve_fit
import math
import scipy.stats
f, V, dV, I, dI = pylab.loadtxt("C:\\Users\\marco\\Desktop\\Laboratorio3\\Fotoelettrico\\datiBluasintoto.txt", unpack = True)
def ff(x, a ,b):
return a*x + b
popt = (0, -2)
pars, covm = curve_fit(ff, V, I, popt, dI, absolute_sigma = "true")
print("I_as=", pars[1],"\pm", covm[1][1]**0.5)
print("a = ", pars[0],"\pm", covm[0][0]**0.5)
w = 1/dI**2
chi2=(w*(ff(V, pars[0], pars[1])-I)**2).sum()
ndof = len(f)-2
print("chi2/ndof=",chi2,"/",ndof)
div = numpy.linspace(min(V), max(V), 1000)
pylab.plot(div, ff(div, pars[0], pars[1]), color = "black")
pylab.errorbar(V, I, dI, dV, color = "blue", linestyle = "", marker = "o")
pylab.xlabel("V (mV)")
pylab.ylabel("I (nA)")
pylab.grid()
pylab.show() | fedebell/Laboratorio3 | Fotoelettrico/fitasintotoretta.py | Python | gpl-3.0 | 794 |
#-----------------------------------------------------------------------------
# Copyright 2012-2016 Claude Zervas
# email: [email protected]
#-----------------------------------------------------------------------------
"""
2D geometry package.
Parts of this library where inspired by planar, a 2D geometry library for
python gaming:
https://bitbucket.org/caseman/planar/
"""
#from . import debug
# Expose package-wide constants and functions
from .const import TAU, set_epsilon, is_zero, float_eq, float_round
from .util import normalize_angle, calc_rotation, segments_are_g1
# Expose some basic geometric classes at package level
from .point import P
from .line import Line
from .arc import Arc
from .box import Box
| utlco/tcnc | tcnc/geom/__init__.py | Python | lgpl-3.0 | 725 |
# Copyright 2017 AT&T Corp.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from tempest.lib.services.identity.v3 import endpoint_filter_client
from tempest.tests.lib import fake_auth_provider
from tempest.tests.lib.services import base
class TestEndPointsFilterClient(base.BaseServiceTest):
FAKE_LIST_PROJECTS_FOR_ENDPOINTS = {
"projects": [
{
"domain_id": "1777c7",
"enabled": True,
"id": "1234ab1",
"type": "compute",
"links": {
"self": "http://example.com/identity/v3/projects/1234ab1"
},
"name": "Project 1",
"description": "Project 1 description",
},
{
"domain_id": "1777c7",
"enabled": True,
"id": "5678cd2",
"type": "compute",
"links": {
"self": "http://example.com/identity/v3/projects/5678cd2"
},
"name": "Project 2",
"description": "Project 2 description",
}
],
"links": {
"self": "http://example.com/identity/v3/OS-EP-FILTER/endpoints/\
u6ay5u/projects",
"previous": None,
"next": None
}
}
FAKE_LIST_ENDPOINTS_FOR_PROJECTS = {
"endpoints": [
{
"id": "u6ay5u",
"interface": "public",
"url": "http://example.com/identity/",
"region": "north",
"links": {
"self": "http://example.com/identity/v3/endpoints/u6ay5u"
},
"service_id": "5um4r",
},
{
"id": "u6ay5u",
"interface": "internal",
"url": "http://example.com/identity/",
"region": "south",
"links": {
"self": "http://example.com/identity/v3/endpoints/u6ay5u"
},
"service_id": "5um4r",
},
],
"links": {
"self": "http://example.com/identity/v3/OS-EP-FILTER/projects/\
1234ab1/endpoints",
"previous": None,
"next": None
}
}
FAKE_LIST_ENDPOINT_GROUPS_FOR_PROJECT = {
"endpoint_groups": [
{
"endpoint_group": {
"description": "endpoint group description #2",
"filters": {
"interface": "admin"
},
"id": "3de68c",
"name": "endpoint group name #2"
}
}
],
"links": {
"self": "https://url/identity/v3/OS-EP-FILTER/endpoint_groups",
}
}
FAKE_PROJECT_INFO = {
"project": {
"domain_id": "1789d1",
"id": "263fd9",
"links": {
"self": "http://example.com/identity/v3/projects/263fd9"
},
"name": "project name #1",
"description": "project description #1"
}
}
def setUp(self):
super(TestEndPointsFilterClient, self).setUp()
fake_auth = fake_auth_provider.FakeAuthProvider()
self.client = endpoint_filter_client.EndPointsFilterClient(
fake_auth, 'identity', 'regionOne')
def _test_add_endpoint_to_project(self, bytes_body=False):
self.check_service_client_function(
self.client.add_endpoint_to_project,
'tempest.lib.common.rest_client.RestClient.put',
{},
bytes_body,
status=204,
project_id=3,
endpoint_id=4)
def _test_check_endpoint_in_project(self, bytes_body=False):
self.check_service_client_function(
self.client.check_endpoint_in_project,
'tempest.lib.common.rest_client.RestClient.head',
{},
bytes_body,
status=204,
project_id=3,
endpoint_id=4)
def _test_list_projects_for_endpoint(self, bytes_body=False):
self.check_service_client_function(
self.client.list_projects_for_endpoint,
'tempest.lib.common.rest_client.RestClient.get',
self.FAKE_LIST_PROJECTS_FOR_ENDPOINTS,
bytes_body,
status=200,
endpoint_id=3)
def _test_list_endpoints_in_project(self, bytes_body=False):
self.check_service_client_function(
self.client.list_endpoints_in_project,
'tempest.lib.common.rest_client.RestClient.get',
self.FAKE_LIST_ENDPOINTS_FOR_PROJECTS,
bytes_body,
status=200,
project_id=4)
def _test_delete_endpoint_from_project(self, bytes_body=False):
self.check_service_client_function(
self.client.delete_endpoint_from_project,
'tempest.lib.common.rest_client.RestClient.delete',
{},
bytes_body,
status=204,
project_id=3,
endpoint_id=4)
def _test_list_endpoint_groups_for_project(self, bytes_body=False):
self.check_service_client_function(
self.client.list_endpoint_groups_for_project,
'tempest.lib.common.rest_client.RestClient.get',
self.FAKE_LIST_ENDPOINT_GROUPS_FOR_PROJECT,
bytes_body,
status=200,
project_id=3)
def _test_list_projects_for_endpoint_group(self, bytes_body=False):
self.check_service_client_function(
self.client.list_projects_for_endpoint_group,
'tempest.lib.common.rest_client.RestClient.get',
self.FAKE_LIST_PROJECTS_FOR_ENDPOINTS,
bytes_body,
status=200,
endpoint_group_id=5)
def _test_list_endpoints_for_endpoint_group(self, bytes_body=False):
self.check_service_client_function(
self.client.list_endpoints_for_endpoint_group,
'tempest.lib.common.rest_client.RestClient.get',
self.FAKE_LIST_ENDPOINTS_FOR_PROJECTS,
bytes_body,
status=200,
endpoint_group_id=5)
def _test_add_endpoint_group_to_project(self, bytes_body=False):
self.check_service_client_function(
self.client.add_endpoint_group_to_project,
'tempest.lib.common.rest_client.RestClient.put',
{},
bytes_body,
status=204,
endpoint_group_id=5,
project_id=6)
def _test_show_endpoint_group_for_project(self, bytes_body=False):
self.check_service_client_function(
self.client.show_endpoint_group_for_project,
'tempest.lib.common.rest_client.RestClient.get',
self.FAKE_PROJECT_INFO,
bytes_body,
endpoint_group_id=5,
project_id=6)
def test_add_endpoint_to_project_with_str_body(self):
self._test_add_endpoint_to_project()
def test_add_endpoint_to_project_with_bytes_body(self):
self._test_add_endpoint_to_project(bytes_body=True)
def test_check_endpoint_in_project_with_str_body(self):
self._test_check_endpoint_in_project()
def test_check_endpoint_in_project_with_bytes_body(self):
self._test_check_endpoint_in_project(bytes_body=True)
def test_list_projects_for_endpoint_with_str_body(self):
self._test_list_projects_for_endpoint()
def test_list_projects_for_endpoint_with_bytes_body(self):
self._test_list_projects_for_endpoint(bytes_body=True)
def test_list_endpoints_in_project_with_str_body(self):
self._test_list_endpoints_in_project()
def test_list_endpoints_in_project_with_bytes_body(self):
self._test_list_endpoints_in_project(bytes_body=True)
def test_delete_endpoint_from_project(self):
self._test_delete_endpoint_from_project()
def test_list_endpoint_groups_for_project_with_str_body(self):
self._test_list_endpoint_groups_for_project()
def test_list_endpoint_groups_for_project_with_bytes_body(self):
self._test_list_endpoint_groups_for_project(bytes_body=True)
def test_list_projects_for_endpoint_group_with_str_body(self):
self._test_list_projects_for_endpoint_group()
def test_list_projects_for_endpoint_group_with_bytes_body(self):
self._test_list_projects_for_endpoint_group(bytes_body=True)
def test_list_endpoints_for_endpoint_group_with_str_body(self):
self._test_list_endpoints_for_endpoint_group()
def test_list_endpoints_for_endpoint_group_with_bytes_body(self):
self._test_list_endpoints_for_endpoint_group(bytes_body=True)
def test_add_endpoint_group_to_project_with_str_body(self):
self._test_add_endpoint_group_to_project()
def test_add_endpoint_group_to_project_with_bytes_body(self):
self._test_add_endpoint_group_to_project(bytes_body=True)
def test_show_endpoint_group_for_project_with_str_body(self):
self._test_show_endpoint_group_for_project()
def test_show_endpoint_group_for_project_with_bytes_body(self):
self._test_show_endpoint_group_for_project(bytes_body=True)
def test_delete_endpoint_group_from_project(self):
self.check_service_client_function(
self.client.delete_endpoint_group_from_project,
'tempest.lib.common.rest_client.RestClient.delete',
{},
False,
status=204,
endpoint_group_id=5,
project_id=5)
| openstack/tempest | tempest/tests/lib/services/identity/v3/test_endpoint_filter_client.py | Python | apache-2.0 | 10,199 |
import argparse
import sys
from os import listdir, system
from os.path import isfile, join
from timeit import default_timer as timer
def list_only_files(src_dir): return filter(lambda f: f[0] != ".", [f for f in listdir(src_dir) if isfile(join(src_dir, f))])
def remove_txt_files(file_list): return filter(lambda f: f[-4:] != ".txt", file_list)
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='batch process dataset')
parser.add_argument('--root_dir', help='root dir', required=True)
parser.add_argument('--pole_masks_dir', help='directory containing pole masks', required=False)
parser.add_argument('--verbose', dest='verbose', action='store_true')
args = vars(parser.parse_args())
root_dir = args["root_dir"]
pole_masks_dir = args["pole_masks_dir"]
verbose = args["verbose"]
src_dir = root_dir + "/raw"
dest_dir = root_dir + "/vid"
start_time = timer()
system("mkdir " + root_dir + "/vid")
system("mkdir -p " + root_dir + "/logs")
system("mkdir " + root_dir + "/eqr_frames")
system("mkdir " + root_dir + "/cube_frames")
system("mkdir " + root_dir + "/pole_masks")
system("mkdir " + root_dir + "/single_cam")
if pole_masks_dir:
# Copy pole masks over
system("cp " + pole_masks_dir + "/cam15.png " + root_dir + "/pole_masks/")
system("cp " + pole_masks_dir + "/cam16.png " + root_dir + "/pole_masks/")
file_list = remove_txt_files(list_only_files(src_dir))
unique_frames = set()
frame_idx_str_prev = ""
for f in file_list:
frame_idx_str = f.replace("-", "_").split("_")[1]
if verbose:
print f
sys.stdout.flush()
if frame_idx_str != frame_idx_str_prev:
print "----------- gathering frame:", frame_idx_str
sys.stdout.flush()
unique_frames.add(frame_idx_str)
frame_idx_str_prev = frame_idx_str
# Create directories for each frame
for frame_idx_str in sorted(unique_frames):
frame_idx = int(frame_idx_str)
zero_pad_idx = format(frame_idx, '06d')
system("mkdir " + dest_dir + "/" + zero_pad_idx)
system("mkdir " + dest_dir + "/" + zero_pad_idx + "/raw")
system("mkdir " + dest_dir + "/" + zero_pad_idx + "/projections")
system("mkdir " + dest_dir + "/" + zero_pad_idx + "/isp_out")
system("mkdir " + dest_dir + "/" + zero_pad_idx + "/flow")
system("mkdir " + dest_dir + "/" + zero_pad_idx + "/flow_images")
# Copy the raw files over to the /raw subdir for each frame
for f in file_list:
frame_idx_str = f.replace("-", "_").split("_")[1]
frame_idx = int(frame_idx_str)
zero_pad_idx = format(frame_idx, '06d')
system("mv " + src_dir + "/" + f + " " + dest_dir + "/" + zero_pad_idx + "/raw/" + f)
# Rename all the frames in /raw folders to sorted numerical names
for frame_idx_str in sorted(unique_frames):
frame_idx = int(frame_idx_str)
print "----------- arranging frame:", frame_idx_str
sys.stdout.flush()
zero_pad_idx = format(frame_idx, "06d")
raw_dir = dest_dir + "/" + zero_pad_idx + "/raw"
sorted_images = sorted(list_only_files(raw_dir))
for i in range(0, len(sorted_images)):
prev_name = raw_dir + "/" + sorted_images[i]
new_name = raw_dir + "/cam" + str(i) + ".bmp"
system("mv " + prev_name + " " + new_name)
end_time = timer()
if verbose:
print "Arrange total runtime:", (end_time - start_time), "sec"
sys.stdout.flush()
| mindcont/OpenSource | Facebook/Surround360/surround360_render/scripts/arrange_dataset.py | Python | cc0-1.0 | 3,410 |
from amqpstorm import Channel
from amqpstorm import exception
from amqpstorm.queue import Queue
from amqpstorm.tests.utility import FakeConnection
from amqpstorm.tests.utility import TestFramework
class QueueExceptionTests(TestFramework):
def test_queue_declare_invalid_parameter(self):
channel = Channel(0, FakeConnection(), 360)
channel.set_state(Channel.OPEN)
queue = Queue(channel)
self.assertRaisesRegex(
exception.AMQPInvalidArgument,
'queue should be a string',
queue.declare, None
)
self.assertRaisesRegex(
exception.AMQPInvalidArgument,
'passive should be a boolean',
queue.declare, 'travis-ci', None
)
self.assertRaisesRegex(
exception.AMQPInvalidArgument,
'durable should be a boolean',
queue.declare, 'travis-ci', True, None
)
self.assertRaisesRegex(
exception.AMQPInvalidArgument,
'exclusive should be a boolean',
queue.declare, 'travis-ci', True, True, None
)
self.assertRaisesRegex(
exception.AMQPInvalidArgument,
'auto_delete should be a boolean',
queue.declare, 'travis-ci', True, True, True, None
)
self.assertRaisesRegex(
exception.AMQPInvalidArgument,
'arguments should be a dict or None',
queue.declare, 'travis-ci', True, True, True, True, []
)
def test_queue_delete_invalid_parameter(self):
channel = Channel(0, FakeConnection(), 360)
channel.set_state(Channel.OPEN)
queue = Queue(channel)
self.assertRaisesRegex(
exception.AMQPInvalidArgument,
'queue should be a string',
queue.delete, None
)
self.assertRaisesRegex(
exception.AMQPInvalidArgument,
'if_unused should be a boolean',
queue.delete, '', None
)
self.assertRaisesRegex(
exception.AMQPInvalidArgument,
'if_empty should be a boolean',
queue.delete, '', True, None
)
def test_queue_purge_invalid_parameter(self):
channel = Channel(0, FakeConnection(), 360)
channel.set_state(Channel.OPEN)
queue = Queue(channel)
self.assertRaisesRegex(
exception.AMQPInvalidArgument,
'queue should be a string',
queue.purge, None
)
def test_queue_bind_invalid_parameter(self):
channel = Channel(0, FakeConnection(), 360)
channel.set_state(Channel.OPEN)
queue = Queue(channel)
self.assertRaisesRegex(
exception.AMQPInvalidArgument,
'queue should be a string',
queue.bind, None
)
self.assertRaisesRegex(
exception.AMQPInvalidArgument,
'exchange should be a string',
queue.bind, '', None
)
self.assertRaisesRegex(
exception.AMQPInvalidArgument,
'routing_key should be a string',
queue.bind, '', '', None
)
self.assertRaisesRegex(
exception.AMQPInvalidArgument,
'arguments should be a dict or None',
queue.bind, '', '', '', []
)
def test_queue_unbind_invalid_parameter(self):
channel = Channel(0, FakeConnection(), 360)
channel.set_state(Channel.OPEN)
queue = Queue(channel)
self.assertRaisesRegex(
exception.AMQPInvalidArgument,
'queue should be a string',
queue.unbind, None
)
self.assertRaisesRegex(
exception.AMQPInvalidArgument,
'exchange should be a string',
queue.unbind, '', None
)
self.assertRaisesRegex(
exception.AMQPInvalidArgument,
'routing_key should be a string',
queue.unbind, '', '', None
)
self.assertRaisesRegex(
exception.AMQPInvalidArgument,
'arguments should be a dict or None',
queue.unbind, '', '', '', []
)
| eandersson/amqpstorm | amqpstorm/tests/unit/queue/test_queue_exception.py | Python | mit | 4,192 |
from cactusbot.packets import MessagePacket
from cactusbot.services.beam.parser import BeamParser
def test_parse_message():
assert BeamParser.parse_message({
'channel': 2151,
'id': '7f43cca0-a9c5-11e6-9c8f-6bd6b629c2eb',
'message': {
'message': [
{'data': 'Hello, world!',
'text': 'Hello, world!',
'type': 'text'}
],
'meta': {}
},
'user_id': 2547,
'user_name': '2Cubed',
'user_roles': ['Owner']
}).json == {
"message": [{
"type": "text",
"data": "Hello, world!",
"text": "Hello, world!"
}],
"user": "2Cubed",
"role": 5,
"action": False,
"target": None
}
assert BeamParser.parse_message({
'channel': 2151,
'id': '8ef6a160-a9c8-11e6-9c8f-6bd6b629c2eb',
'message': {
'message': [
{'data': 'waves ',
'text': 'waves ',
'type': 'text'},
{'coords': {'height': 24, 'width': 24, 'x': 72, 'y': 0},
'pack': 'default',
'source': 'builtin',
'text': ':D',
'type': 'emoticon'}],
'meta': {'me': True}},
'user_id': 95845,
'user_name': 'Stanley',
'user_roles': ['User']
}).json == {
"message": [{
"type": "text",
"data": "waves ",
"text": "waves "
}, {
"type": "emoji",
"data": "😃",
"text": ":D"
}],
"user": "Stanley",
"role": 1,
"action": True,
"target": None
}
def test_parse_follow():
assert BeamParser.parse_follow({
'following': True,
'user': {
'avatarUrl': 'https://uploads.beam.pro/avatar/l0icubxz-95845.jpg',
'bio': None,
'channel': {'audience': 'teen',
'badgeId': None,
'coverId': None,
'createdAt': '2016-03-05T20:41:21.000Z',
'deletedAt': None,
'description': None,
'featured': False,
'ftl': 0,
'hasTranscodes': True,
'hasVod': False,
'hosteeId': None,
'id': 68762,
'interactive': False,
'interactiveGameId': None,
'languageId': None,
'name': "Stanley's Channel",
'numFollowers': 0,
'online': False,
'partnered': False,
'suspended': False,
'thumbnailId': None,
'token': 'Stanley',
'transcodingProfileId': None,
'typeId': None,
'updatedAt': '2016-08-16T02:53:01.000Z',
'userId': 95845,
'viewersCurrent': 0,
'viewersTotal': 0,
'vodsEnabled': True},
'createdAt': '2016-03-05T20:41:21.000Z',
'deletedAt': None,
'experience': 401,
'frontendVersion': None,
'id': 95845,
'level': 13,
'primaryTeam': None,
'social': {'verified': []},
'sparks': 2236,
'updatedAt': '2016-08-20T04:35:25.000Z',
'username': 'Stanley',
'verified': True
}
}).json == {
"user": "Stanley",
"event": "follow",
"success": True,
"streak": 1
}
assert BeamParser.parse_follow({
'following': False,
'user': {
'avatarUrl': 'https://uploads.beam.pro/avatar/l0icubxz-95845.jpg',
'bio': None,
'channel': {'audience': 'teen',
'badgeId': None,
'coverId': None,
'createdAt': '2016-03-05T20:41:21.000Z',
'deletedAt': None,
'description': None,
'featured': False,
'ftl': 0,
'hasTranscodes': True,
'hasVod': False,
'hosteeId': None,
'id': 68762,
'interactive': False,
'interactiveGameId': None,
'languageId': None,
'name': "Stanley's Channel",
'numFollowers': 0,
'online': False,
'partnered': False,
'suspended': False,
'thumbnailId': None,
'token': 'Stanley',
'transcodingProfileId': None,
'typeId': None,
'updatedAt': '2016-08-16T02:53:01.000Z',
'userId': 95845,
'viewersCurrent': 0,
'viewersTotal': 0,
'vodsEnabled': True},
'createdAt': '2016-03-05T20:41:21.000Z',
'deletedAt': None,
'experience': 401,
'frontendVersion': None,
'id': 95845,
'level': 13,
'primaryTeam': None,
'social': {'verified': []},
'sparks': 2236,
'updatedAt': '2016-08-20T04:35:25.000Z',
'username': 'Stanley',
'verified': True
}
}).json == {
"user": "Stanley",
"event": "follow",
"success": False,
"streak": 1
}
def test_parse_subscribe():
assert BeamParser.parse_subscribe({
'user': {
'avatarUrl': 'https://uploads.beam.pro/avatar/20621.jpg',
'bio': 'Broadcasting Daily at 10 AM PST. Join in on fun with mostly Minecraft.',
'createdAt': '2015-05-06T05:13:52.000Z',
'deletedAt': None,
'experience': 97980,
'frontendVersion': None,
'id': 20621,
'level': 88,
'primaryTeam': 89,
'social': {
'player': 'https://player.me/innectic',
'twitter': 'https://twitter.com/Innectic',
'verified': []
},
'sparks': 174519,
'updatedAt': '2016-08-27T02:11:24.000Z',
'username': 'Innectic',
'verified': True
}
}).json == {
"user": "Innectic",
"event": "subscribe",
"success": True,
"streak": 1
}
def test_parse_resubscribe():
assert BeamParser.parse_resubscribe({
"totalMonths": 3,
"user": {
"level": 88,
"social": {
"player": "https://player.me/innectic",
"twitter": "https://twitter.com/Innectic",
"verified": []
},
"id": 20621,
"username": 'Innectic',
"verified": True,
"experience": 97980,
"sparks": 174519,
"avatarUrl": 'https://uploads.beam.pro/avatar/20621.jpg',
"bio": 'Broadcasting Daily at 10 AM PST. Join in on fun with mostly Minecraft.',
"primaryTeam": 89,
"createdAt": '2016-08-27T02:11:24.000Z',
'updatedAt': '2016-08-27T02:11:24.000Z',
"deletedAt": None
},
"since": '2016-11-12T20:01:55.000Z',
"until": '2017-03-13T21:02:25.000Z'
}).json == {
"user": "Innectic",
"event": "subscribe",
"success": True,
"streak": 3
}
def test_parse_host():
assert BeamParser.parse_host({
'hoster': {
'audience': 'teen',
'badgeId': None,
'coverId': None,
'createdAt': '2016-03-05T20:41:21.000Z',
'deletedAt': None,
'description': None,
'featured': False,
'ftl': 0,
'hasTranscodes': True,
'hasVod': False,
'hosteeId': 3016,
'id': 68762,
'interactive': False,
'interactiveGameId': None,
'languageId': None,
'name': "Stanley's Channel",
'numFollowers': 0,
'online': False,
'partnered': False,
'suspended': False,
'thumbnailId': None,
'token': 'Stanley',
'transcodingProfileId': None,
'typeId': None,
'updatedAt': '2016-11-13T20:21:59.000Z',
'userId': 95845,
'viewersCurrent': 0,
'viewersTotal': 0,
'vodsEnabled': True},
'hosterId': 68762
}).json == {
"user": "Stanley",
"event": "host",
"success": True,
"streak": 1
}
def test_synthesize():
assert BeamParser.synthesize(MessagePacket(
"Hey, ",
("tag", "Stanley"),
"! ",
("emoji", "🌵"),
" Check out ",
("url", "https://cactusbot.rtfd.org", "cactusbot.rtfd.org"),
"!"
)) == (("Hey, @Stanley! :cactus Check out cactusbot.rtfd.org!",), {})
assert BeamParser.synthesize(MessagePacket(
"waves", action=True
)) == (("/me waves",), {})
assert BeamParser.synthesize(MessagePacket(
"Hello!", target="Stanley"
)) == (("Stanley", "Hello!",), {"method": "whisper"})
| CactusBot/CactusBot | tests/services/test_beam.py | Python | gpl-3.0 | 9,641 |
# -*- coding: utf-8 -*-
__author__ = 'David Gowers'
__email__ = '[email protected]'
__version__ = '0.1.0'
| 0ion9/tmsoup | tmsoup/__init__.py | Python | lgpl-3.0 | 109 |
#!/usr/bin/env python
"""
here i wanna implement several helper functions for displaying calendar on user's terminal screen.
"""
from datetime import date
from datetime import timedelta
import shlex
import subprocess
class CommandStderrException(Exception):
pass
def get_output_from_command(command):
parsed_command = shlex.split(command)
process = subprocess.Popen(parsed_command, stdout=subprocess.PIPE)
out, err = process.communicate()
if err:
raise CommandStderrException(err)
return out
def transpose_matrix(matrix):
height = len(matrix)
width = len(matrix[0])
lens = sum([ len(x) for x in matrix ])
if lens != height * width:
print '%(height)s x %(width)s = %(lens)s' % locals()
raise Exception("argument 'matrix' must be in a shape of square.")
return [[matrix[h][w] for h in range(height)] for w in range(width)]
class GitCalendar(object):
def __init__(self, commit_number_log):
"""
commit_number_log should be in form of:
{
'2012-01-02': 0,
'2012-01-03': 1,
'2012-01-05': 2
}
"""
# generate the symbol_map for more customization
self.symbol_map = {
'unknown': '-',
'more': '+',
'empty': '0',
}
one_to_ten = [str(x) for x in range(0, 10)]
number_dict = zip(one_to_ten, one_to_ten)
self.symbol_map.update(number_dict)
self.commit_log = commit_number_log
def gen_matrix(self, end_date, length=366):
"""
generate the right shape matrix
"""
one_day_delta = timedelta(days=1)
end_date = date.today() - one_day_delta
date_list = []
for i in xrange(365):
date_list.insert(0, end_date)
end_date = end_date - one_day_delta
# calculation
def to_sunday_first_index(monday_first_index):
return (monday_first_index + 1) % 7
first_day = to_sunday_first_index(date_list[0].weekday())
last_day = to_sunday_first_index(date_list[-1].weekday())
# explain: filled with '-' to head and tail to became the multiple of 7
unknown_substitute = 'unknown'
for i in range(first_day): date_list.insert(0, unknown_substitute)
for i in range(6-last_day): date_list.append(unknown_substitute)
# form a 53 (height) x 7 (width) matrix
matrix = transpose_matrix([date_list[cursor:cursor+7] for cursor in range(len(date_list))[::7]])
return matrix
def gen_rendered_matrix(self, end_date):
"""
Get a single string as the rendered calendar
this one should use self.gen_matrix to get the result matrix
"""
matrix = self.gen_matrix(end_date=end_date)
def to2(d):
d = str(d)
return d if len(d) == 2 else '0'+d
def stringify(day):
return "%s-%s-%s" % (day.year, to2(day.month), to2(day.day)) if isinstance(day, date) else day
def bind(m):
return self.get_commits_by_date(stringify(m))
string_list = (map(bind , row) for row in matrix)
return string_list
def get_commits_by_date(self, d):
if str(d) in self.symbol_map:
return self.symbol_map[d]
else:
try:
rtn = self.commit_log[str(d)]
except KeyError:
rtn = 'empty'
else:
if rtn > 9: rtn = 'more'
return self.symbol_map[str(rtn)]
def new_symbol_map(self, new_map):
self.symbol_map.update(new_map)
def render_calendar(self, end_date = date.today()):
matrix_to_print = self.gen_rendered_matrix(end_date=end_date)
return '\n'.join([''.join(row) for row in matrix_to_print])
| littleq0903/git-calendar | git_calendar/utils.py | Python | mit | 3,829 |
from functools import wraps
def lazy(func):
cache = {}
@wraps(func)
def wrap(*args, **kwargs):
key = (args, tuple(kwargs.items()))
if key not in cache:
cache[key] = func(*args, **kwargs)
return cache[key]
return wrap
def lazy_property(func):
canary = object()
prop_name = '_prop_' + func.func_name
@property
@wraps(func)
def wrap(self, *args, **kwargs):
value = getattr(self, prop_name, canary)
if value is canary:
value = func(self, *args, **kwargs)
setattr(self, prop_name, value)
return value
return wrap
| dacjames/croi | croi/decorators.py | Python | mit | 643 |
# -*- coding: utf-8 -*-
import pytest
import threading
from .test_client import service
def test_read_write(service, servicename, keyword_name):
"""Test a write method."""
from Cauldron.ktl.procedural import read, write
write(servicename, keyword_name, "10")
assert read(servicename, keyword_name) == "10"
def test_argument_checking(service, servicename, keyword_name):
"""Check argument type checking."""
from Cauldron.ktl.procedural import read, write
with pytest.raises(TypeError):
read(1, "hello")
with pytest.raises(TypeError):
read(servicename, 1)
def test_monitor(service, waittime, keyword_name, servicename):
"""Test .monitor() for asynchronous broadcast monitoring."""
from Cauldron.ktl.procedural import monitor, callback
def monitor_cb(keyword):
"""Monitor"""
keyword.service.log.log(5, "monitor callback received.")
monitor_cb.monitored.set()
monitor_cb.monitored = threading.Event()
callback(servicename, keyword_name, monitor_cb)
monitor(servicename, keyword_name, prime=False)
monitor_cb.monitored.wait(waittime)
assert not monitor_cb.monitored.is_set()
service[keyword_name].modify("SomeValue")
monitor_cb.monitored.wait(waittime)
assert monitor_cb.monitored.is_set()
callback(servicename, keyword_name, monitor_cb, remove=True)
monitor_cb.monitored.clear()
service[keyword_name].modify("OtherValue")
monitor_cb.monitored.wait(waittime)
assert not monitor_cb.monitored.is_set()
monitor_cb.monitored.clear()
callback(servicename, keyword_name, monitor_cb, preferred=True)
monitor(servicename, keyword_name, prime=True)
service[keyword_name].modify("SomeValue")
monitor_cb.monitored.wait(waittime)
assert monitor_cb.monitored.is_set()
monitor_cb.monitored.clear()
monitor(servicename, keyword_name, start=False)
service[keyword_name].modify("OtherValue")
monitor_cb.monitored.wait(waittime)
assert not monitor_cb.monitored.is_set()
| alexrudy/Cauldron | Cauldron/tests/test_client_procedural.py | Python | bsd-3-clause | 2,094 |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Sun Jul 2 18:59:49 2017
@author: Cezar
"""
import csv
import pandas
from sklearn.ensemble import RandomForestClassifier
from sklearn.preprocessing import LabelEncoder
from sklearn.cross_validation import train_test_split
from sklearn.metrics import accuracy_score
#from sklearn.metrics import confusion_matrix
from sklearn.metrics import classification_report
def RandomForest (targetcolumn,splitst,splitfn,d,file,trainsize,n_estimators):
with open("../data/rf/" + file) as csvfile:
spamreader = csv.reader(csvfile, delimiter=',')
x = list(spamreader)
chaves = list(d.keys())
chaves.sort()
dfx2 = pandas.DataFrame(x)
le = LabelEncoder()
le.fit(dfx2[targetcolumn])
X_traind, X_testd = train_test_split(dfx2, train_size=trainsize)
X_train = X_traind.apply(LabelEncoder().fit_transform)
X_test = X_testd.apply(LabelEncoder().fit_transform)
features = X_train.columns[splitst:splitfn]
target = X_train.columns[targetcolumn]
rfc = RandomForestClassifier(n_estimators=n_estimators)
rfc.fit(X_train[features],X_train[target])
#prob_pos = rfc.predict_proba(X_test[features])
preds = rfc.predict(X_test[features])
print("\nTestSize: ",X_test.shape)
print("\nTrainSize: ",X_train.shape)
print("\nAccuracy Score: ",accuracy_score(X_test[target],preds))
#print(confusion_matrix(X_test[target],preds))
print("\nConfusion Matrix:")
print(pandas.crosstab(X_testd[target], preds,
rownames=['actual'],
colnames=['preds']))
print("\nClassification Report:")
print(classification_report(X_test[target], preds, target_names=chaves))
countdf = pandas.Series(le.inverse_transform(preds))
print("\nDistribuição Classificação:")
print(countdf.value_counts()/countdf.size)
print("\nDistribuição Real:")
print(pandas.Series(d).sort_values(ascending=False))
return
targetcolumn = 6
splitst = 0
splitfn = 5
d = {'unacc':0.70023,'acc':0.22222,'good':0.03993,'v-good':0.03762}
file = "car.csv"
trainsize = 0.75
n_estimators = 5
print("\n-"+file+"----------------------------------------------------")
RandomForest (targetcolumn,splitst,splitfn,d,file,trainsize,n_estimators)
targetcolumn = 0
splitst = 1
splitfn = 22
d = {'edible':0.518,'poisonous':0.482}
file = "mushroom.csv"
trainsize = 0.75
n_estimators = 5
print("\n-"+file+"----------------------------------------------------")
RandomForest (targetcolumn,splitst,splitfn,d,file,trainsize,n_estimators) | darknerd/SIN5017-Data-Mining | methods/RF.py | Python | mit | 2,829 |
# -*- coding: utf-8 -*-
"""
Created on Wed Nov 02 18:04:05 2016
@author: Viktor
"""
import numpy as np
from skimage.io import imread
from skimage.io import imshow
from skimage.color import rgb2gray
from skimage.filters.rank import otsu
from skimage.morphology import opening, closing
import matplotlib.pyplot as plt
from scipy import ndimage
def my_rgb2gray(img_rgb):
img_gray = np.ndarray((img_rgb.shape[0], img_rgb.shape[1])) # zauzimanje memorije za sliku (nema trece dimenzije)
img_gray = 0*img_rgb[:, :, 0] + 0*img_rgb[:, :, 1] + 1*img_rgb[:, :, 2]
img_gray = img_gray.astype('uint8') # u prethodnom koraku smo mnozili sa float, pa sada moramo da vratimo u [0,255] opseg
return img_gray
def processing(path):
img = imread(path)
gray = my_rgb2gray(img)
binary = 1 - (gray > 0.5)
binary = closing(binary)
binary = opening(binary)
labeled, nr_objects = ndimage.label(binary)
return nr_objects
image_path = []
result = []
with open('Test/out.txt') as f:
data = f.read()
lines = data.split('\n')
for i, line in enumerate(lines):
if(i>1):
cols = line.split('\t')
image_path.append(cols[0])
f.close()
for imgs in image_path:
if(imgs!=''):
result.append(processing('Test/'+imgs))
with open('Test/out.txt','w') as f:
f.write('RA 1/2013 Viktor Sanca\n')
f.write('file\tcircles\n')
for i in range(0,len(image_path)-1):
f.write(image_path[i]+'\t'+str(result[i])+'\n')
f.close()
incorrect = ['images/img-13.png',
'images/img-17.png',
'images/img-20.png',
'images/img-35.png',
'images/img-42.png',
'images/img-45.png',
'images/img-51.png',
'images/img-58.png',
'images/img-87.png',
'images/img-96.png']
path = 'Trening/'+incorrect[0]
img = imread(path)
imshow(img)
gray = my_rgb2gray(img)
imshow(gray)
binary = 1 - (gray > 0.5)
binary = closing(binary)
binary = opening(binary)
imshow(binary)
labeled, nr_objects = ndimage.label(binary)
print nr_objects | vsanca/soft-computing-sandbox | assignments/level 0/lvl0.py | Python | gpl-3.0 | 2,058 |
from droneapi import connect
from droneapi.lib import VehicleMode
from droneapi.tools import with_sitl
from pymavlink import mavutil
import time
import sys
import os
from nose.tools import assert_equals
def current_milli_time():
return int(round(time.time() * 1000))
@with_sitl
def test_timeout(connpath):
v = connect(connpath, await_params=True)
value = v.parameters['THR_MIN']
assert_equals(type(value), float)
start = current_milli_time()
v.parameters['THR_MIN'] = value + 10
end = current_milli_time()
newvalue = v.parameters['THR_MIN']
assert_equals(type(newvalue), float)
assert_equals(newvalue, value + 10)
# TODO once this issue is fixed
# assert end - start < 1000, 'time to set parameter was %s, over 1s' % (end - start,)
| trishhyles/dronekit-python | tests/sitl/test_12.py | Python | apache-2.0 | 786 |
import sys, os, yaml, glob
import subprocess
import argparse
def main(args):
workingDir = os.getcwd()
assemblers = sum(args.assemblers, [])
samples_data_dir = args.sample_data_dir
checkSupportedAssemblers(assemblers, args.global_config)
#The specified assembler are supported, at least they are present in global configuration (they might be not implemented but this is unlikely)
for sample_dir_name in [dir for dir in os.listdir(samples_data_dir) if os.path.isdir(os.path.join(samples_data_dir, dir))]:
assembly_folder = os.path.join(os.getcwd(), sample_dir_name)
if not os.path.exists(assembly_folder):
os.makedirs(assembly_folder)
os.chdir(assembly_folder)
for assembler in assemblers:
#assemble data stored in sample_dir_name with assembler assembler
sample_YAML_name = os.path.join(assembly_folder, "{}_{}.yaml".format(sample_dir_name, assembler))
sample_YAML = open(sample_YAML_name, 'w')
sample_YAML.write("pipeline:\n")
sample_YAML.write(" assemble\n")
sample_YAML.write("tools:\n")
sample_YAML.write(" [{}]\n".format(assembler))
sample_YAML.write("genomeSize: {}\n".format(args.genomeSize))
sample_YAML.write("kmer: {}\n".format(args.kmer))
sample_YAML.write("output: {}\n".format(sample_dir_name))
sample_YAML.write("libraries:\n")
sample_data_dir = os.path.join(samples_data_dir,sample_dir_name)
sample_files = [ f for f in os.listdir(sample_data_dir) if os.path.isfile(os.path.join(sample_data_dir,f))]
pair1_file = ""
pair2_file = ""
single = ""
sample_YAML.write(" lib1:\n")
for file in sample_files:
if "_R1_" in file or "_1.fastq.gz" in file:
if pair1_file:
sys.exit("Error: processing sample {} found more that one library/run for read 1".format(sample_dir_name))
pair1_file = os.path.join(sample_data_dir,file)
sample_YAML.write(" pair1: {}\n".format(pair1_file))
elif "_R2_" in file or "_2.fastq.gz" in file:
if pair2_file:
sys.exit("Error: processing sample {} found more that one library/run for read 2".format(sample_dir_name))
pair2_file = os.path.join(sample_data_dir,file)
sample_YAML.write(" pair2: {}\n".format(pair2_file))
elif "merged" in file or "single" in file:
single = os.path.join(sample_data_dir,file)
sample_YAML.write(" orientation: {}\n".format(args.orientation))
sample_YAML.write(" insert: {}\n".format(args.insert))
sample_YAML.write(" std: {}\n".format(args.std))
if single != "":
sample_YAML.write(" lib2:\n")
sample_YAML.write(" pair1: {}\n".format(single))
sample_YAML.write(" pair2:\n")
sample_YAML.write(" orientation: none\n")
sample_YAML.write(" insert: 0\n")
sample_YAML.write(" std: 0\n")
sample_YAML.close
if(args.global_config is not None):
submit_job(sample_YAML_name, args.global_config, sample_dir_name, args.project, assembler)
os.chdir(workingDir)
return
def checkSupportedAssemblers(assemblers, global_config):
with open(global_config) as in_handle:
global_config = yaml.load(in_handle)
for assembler in assemblers:
if assembler not in global_config["Tools"]:
print "assembler {} not supported. Supported assemblers specified in the global configuration are:".format(assembler)
for supported_assembler, options in global_config["assemble"].items():
print supported_assembler
sys.exit("Error: assembler {} not supported".format(assembler))
def submit_job(sample_config, global_config, sample_name, project, assembler):
workingDir = os.getcwd()
slurm_file = os.path.join(workingDir, "{}_{}.slurm".format(sample_name, assembler))
slurm_handle = open(slurm_file, "w")
slurm_handle.write("#! /bin/bash -l\n")
slurm_handle.write("set -e\n")
slurm_handle.write("#SBATCH -A {}\n".format(project))
slurm_handle.write("#SBATCH -o {}_{}.out\n".format(sample_name, assembler))
slurm_handle.write("#SBATCH -e {}_{}.err\n".format(sample_name, assembler))
slurm_handle.write("#SBATCH -J {}_{}.job\n".format(sample_name, assembler))
slurm_handle.write("#SBATCH -p node -n 8\n")
slurm_handle.write("#SBATCH -t 05:00:00\n")
slurm_handle.write("#SBATCH --mail-user [email protected]\n")
slurm_handle.write("#SBATCH --mail-type=ALL\n")
slurm_handle.write("\n\n");
slurm_handle.write("module load abyss/1.3.5\n");
slurm_handle.write("python ~/DE_NOVO_PIPELINE/de_novo_scilife/script/deNovo_pipeline.py --global-config {} --sample-config {}\n\n".format(global_config,sample_config))
slurm_handle.close()
command=("sbatch", slurm_file)
subprocess.call(command)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--sample-data-dir', help="full path to directory containing one folder per sample. Each sample contaains only one library (i.e., one PE lib)", type=str)
parser.add_argument('--genomeSize', help="genome size", type=str)
parser.add_argument('--orientation', help="I assume I am working only with PE (if not manual editing is needed)", type=str)
parser.add_argument('--insert', help="I assume that all samples have the same insert (if not manual editing is needed)", type=str)
parser.add_argument('--std', help="I assume tha all sample have same std (if not manual editing is needed)", type=str)
parser.add_argument('--kmer', help="kmer to use", type=str)
parser.add_argument('--global-config', help='foo help')
parser.add_argument('--assemblers', action='append', nargs='+', help="List of assemblers to be employed on the datasets specified")
parser.add_argument('--project', help="UPPMAX project to use", default="b2010029")
args = parser.parse_args()
main(args) | senthil10/NouGAT | utils/prepare_de_novo_assembly.py | Python | mit | 6,340 |
import shutil
import tempfile
import numpy as np
import os
from os.path import getsize
import pytest
import yaml
from util import PATH_TO_TESTS, seed, dummy_predict_with_threshold
PATH_TO_ASSETS = os.path.join(PATH_TO_TESTS, 'assets')
PATH_TO_RETINA_DIR = os.path.join(PATH_TO_ASSETS, 'recordings', 'retina')
PATH_TO_RETINA_CONFIG_DIR = os.path.join(PATH_TO_RETINA_DIR, 'config')
@pytest.fixture(autouse=True)
def setup():
seed(0)
@pytest.fixture
def patch_triage_network(monkeypatch):
to_patch = 'yass.neuralnetwork.model.KerasModel.predict_with_threshold'
monkeypatch.setattr(to_patch, dummy_predict_with_threshold)
yield
def _path_to_config():
return os.path.join(PATH_TO_RETINA_CONFIG_DIR, 'config.yaml')
def _data_info():
with open(_path_to_config()) as f:
d = yaml.load(f)
return d
@pytest.fixture()
def data_info():
return _data_info()
@pytest.fixture()
def data():
info = _data_info()['recordings']
path = os.path.join(PATH_TO_RETINA_DIR, 'data.bin')
d = np.fromfile(path, dtype=info['dtype'])
n_observations = int(getsize(path) / info['n_channels'] /
np.dtype(info['dtype']).itemsize)
d = d.reshape(n_observations, info['n_channels'])
return d
@pytest.fixture()
def path_to_tests():
return PATH_TO_TESTS
@pytest.fixture()
def path_to_performance():
return os.path.join(PATH_TO_TESTS, 'performance/')
@pytest.fixture
def make_tmp_folder():
temp = tempfile.mkdtemp()
yield temp
shutil.rmtree(temp)
@pytest.fixture()
def path_to_data():
return os.path.join(PATH_TO_RETINA_DIR, 'data.bin')
@pytest.fixture()
def path_to_geometry():
return os.path.join(PATH_TO_RETINA_DIR, 'geometry.npy')
@pytest.fixture()
def path_to_sample_pipeline_folder():
return os.path.join(PATH_TO_RETINA_DIR,
'sample_pipeline_output')
@pytest.fixture()
def path_to_standardized_data():
return os.path.join(PATH_TO_RETINA_DIR,
'sample_pipeline_output', 'preprocess',
'standardized.bin')
@pytest.fixture()
def path_to_output_reference():
return os.path.join(PATH_TO_ASSETS, 'output_reference')
@pytest.fixture
def path_to_config():
return _path_to_config()
@pytest.fixture
def path_to_config_threshold():
return os.path.join(PATH_TO_RETINA_CONFIG_DIR, 'config_threshold.yaml')
@pytest.fixture
def path_to_config_with_wrong_channels():
return os.path.join(PATH_TO_RETINA_CONFIG_DIR,
'wrong_channels.yaml')
@pytest.fixture
def path_to_txt_geometry():
return os.path.join(PATH_TO_ASSETS, 'test_files', 'geometry.txt')
@pytest.fixture
def path_to_npy_geometry():
return os.path.join(PATH_TO_ASSETS, 'test_files', 'geometry.npy')
| paninski-lab/yass | tests/conftest.py | Python | apache-2.0 | 2,794 |
#!/usr/bin/env python
########################################################################################################################
# Simple Logging System #
########################################################################################################################
# (C) Manuel Bernal Llinares <[email protected]>#
# Distributed under the Apache License 2.0 #
########################################################################################################################
import os
import logging
import settings # Get the settings singleton that should have already been initialized
_logger = None
def getLogger():
global _logger
if not _logger:
# print("Preparing logger")
# Instantiate the _logger
config = settings.getConfig()
# Create the logger
_logger = logging.getLogger(config["logger"]["namespace"])
# Set formatter
formatter = logging.Formatter('[%(asctime)s][%(levelname)s] - %(name)s --- %(message)s')
# Set logger handler
handler = logging.FileHandler(config["logger"]["filePath"])
handler.setFormatter(formatter)
# Set logging level
if "level" in config["logger"]:
if "debug" in config["logger"]["level"]:
handler.setLevel(logging.DEBUG)
elif "critical" in config["logger"]["level"]:
handler.setLevel(logging.CRITICAL)
elif "error" in config["logger"]["level"]:
handler.setLevel(logging.ERROR)
elif "fatal" in config["logger"]["level"]:
handler.setLevel(logging.FATAL)
elif "warning" in config["logger"]["level"]:
handler.setLevel(logging.WARNING)
else:
handler.setLevel(logging.INFO)
else:
handler.setLevel(logging.INFO)
_logger.setLevel(handler.level)
# Add handler to logger
_logger.addHandler(handler)
return _logger # logging.getLogger(settings.getConfig()["logger"]["namespace"])
def main():
print("This module is not designed to play alone. Sorry.")
if __name__ == "__main__":
main()
| sahmri/simpleBackup | logger.py | Python | apache-2.0 | 2,246 |
"""\
This module is deprecated. Please update your code to use
``authkit.authorize.pylons_adaptors`` instead of this module.
"""
import warnings
warnings.warn(
"""This module is deprecated. Please update your code to use ``authkit.authorize.pylons_adaptors`` instead of this module.""",
DeprecationWarning,
2
)
from authkit.authorize.pylons_adaptors import *
| santisiri/popego | envs/ALPHA-POPEGO/lib/python2.5/site-packages/AuthKit-0.4.0-py2.5.egg/authkit/pylons_adaptors.py | Python | bsd-3-clause | 374 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
'''
---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
通过三个关键字IP,URL和访问状态来对nginx日志进行排序,找出访问次数最高的十个访问记录输出出来
---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
'''
def ReadFile(FileName):
Lines=[]
f=open(FileName,'r')
for line in f.readlines():
Lines.append(line.rstrip('\n'))
f.close()
return Lines
def FilterNginx(Lines):
FilterDict={}
for i in range(0,len(Lines)):
LineList=Lines[i].split()
IP,Url,Status=LineList[0],LineList[6].rstrip('"'),LineList[8]
Key=(IP,Url,Status)
FilterDict[Key]=FilterDict.get(Key,0)+1
FilterList=sorted(FilterDict.items(),key=lambda x:x[1],reverse=True)
for i in range(0,10):
print '访问次数:%s\t访问IP:%-15s\t访问URL:%-50s\t访问状态:%s' %(FilterList[i][1],FilterList[i][0][0],FilterList[i][0][1],FilterList[i][0][2])
if __name__ == "__main__":
Lines=ReadFile('www_access_20140823.log')
FilterNginx(Lines)
'''
功能ok, 加油
'''
| 51reboot/actual_09_homework | 03/huxianglin/filter_nginx.py | Python | mit | 1,385 |
# -*- coding: utf-8 -*-
#
# Sideboard documentation build configuration file, created by
# sphinx-quickstart on Tue Jul 16 09:17:02 2013.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys, os
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
# -- General configuration -----------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['sphinx.ext.autodoc', 'sphinx.ext.doctest', 'sphinx.ext.todo', 'sphinx.ext.coverage', 'sphinx.ext.viewcode']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'Sideboard'
copyright = u'2013, Sideboard team'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
#version = '1.0'
# The full version, including alpha/beta/rc tags.
#release = '1.0.1'
__here__ = os.path.abspath(os.path.dirname(__file__)) # there should be a PEP for this
exec(open(os.path.join(__here__, '..', '..', 'sideboard', '_version.py')).read())
release = version = __version__
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = []
# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
#keep_warnings = False
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'default'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'Sideboarddoc'
# -- Options for LaTeX output --------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index', 'Sideboard.tex', u'Sideboard Documentation',
u'Sideboard team', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output --------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'sideboard', u'Sideboard Documentation',
[u'Sideboard team'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output ------------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'Sideboard', u'Sideboard Documentation',
u'Sideboard team', 'Sideboard', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#texinfo_no_detailmenu = False
# -- Options for Epub output ---------------------------------------------------
# Bibliographic Dublin Core info.
epub_title = u'Sideboard'
epub_author = u'Sideboard team'
epub_publisher = u'Sideboard team'
epub_copyright = u'2013, Sideboard team'
# The language of the text. It defaults to the language option
# or en if the language is not set.
#epub_language = ''
# The scheme of the identifier. Typical schemes are ISBN or URL.
#epub_scheme = ''
# The unique identifier of the text. This can be a ISBN number
# or the project homepage.
#epub_identifier = ''
# A unique identification for the text.
#epub_uid = ''
# A tuple containing the cover image and cover page html template filenames.
#epub_cover = ()
# A sequence of (type, uri, title) tuples for the guide element of content.opf.
#epub_guide = ()
# HTML files that should be inserted before the pages created by sphinx.
# The format is a list of tuples containing the path and title.
#epub_pre_files = []
# HTML files shat should be inserted after the pages created by sphinx.
# The format is a list of tuples containing the path and title.
#epub_post_files = []
# A list of files that should not be packed into the epub file.
#epub_exclude_files = []
# The depth of the table of contents in toc.ncx.
#epub_tocdepth = 3
# Allow duplicate toc entries.
#epub_tocdup = True
# Fix unsupported image types using the PIL.
#epub_fix_images = False
# Scale large images.
#epub_max_image_width = 0
# If 'no', URL addresses will not be shown.
#epub_show_urls = 'inline'
# If false, no index is generated.
#epub_use_index = True
| binary1230/sideboard | docs/source/conf.py | Python | bsd-3-clause | 9,856 |
from django.conf import settings
from django.conf.urls.defaults import patterns, url
urlpatterns = patterns('')
# The following is used to serve up local media files like images, css, js
# Use __file__ to find the absolute path to this file. This can be used to
# determine the path to the static directory which contains all the files
# we are trying to expose
static_root = '%s/static' % __file__[:__file__.rfind('/')]
base_url_regex = r'^muddle_static/(?P<path>.*)$'
urlpatterns += patterns('',
(base_url_regex, 'django.views.static.serve',
{'document_root': static_root})
)
| dannyman/ganeti_webmgr | ganeti_webmgr/muddle/urls.py | Python | gpl-2.0 | 656 |
# Time: O(n^2)
# Space: O(1)
# Given an array consists of non-negative integers,
# your task is to count the number of triplets chosen
# from the array that can make triangles
# if we take them as side lengths of a triangle.
#
# Example 1:
# Input: [2,2,3,4]
# Output: 3
# Explanation:
# Valid combinations are:
# 2,3,4 (using the first 2)
# 2,3,4 (using the second 2)
# 2,2,3
# Note:
# The length of the given array won't exceed 1000.
# The integers in the given array are in the range of [0, 1000].
class Solution(object):
def triangleNumber(self, nums):
"""
:type nums: List[int]
:rtype: int
"""
result = 0
nums.sort()
for i in xrange(len(nums)-2):
if nums[i] == 0:
continue
k = i+2
for j in xrange(i+1, len(nums)-1):
while k < len(nums) and nums[i] + nums[j] > nums[k]:
k += 1
result += k-j-1
return result
| yiwen-luo/LeetCode | Python/valid-triangle-number.py | Python | mit | 987 |
"""Allows to serve Sphinx generated docs from django."""
__version__ = '0.1.2'
| svfat/django-docs | docs/__init__.py | Python | bsd-3-clause | 80 |
import string
import random
def random_string(length, upper=True, lower=True, digit=True, symbol=True):
chars = ''
if upper:
chars += string.ascii_uppercase
if lower:
chars += string.ascii_lowercase
if digit:
chars += string.digits
if symbol:
chars += string.punctuation
if len(chars) == 0:
raise ValueException("You must enable at least one kind of character")
return ''.join(random.SystemRandom().choice(chars) for _ in range(length));
| James1345/white-noise | whitenoise/random/random_string.py | Python | mit | 509 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
How many Sundays fell on the first of the month
during the twentieth century?
"""
from datetime import date
def pe19(n=2000):
"""
>>> pe19()
171
"""
s = 0
for y in range(1901, n + 1):
for m in range(1, 13):
d = date(y, m, 1)
if d.weekday() == 6:
s += 1
return s
if __name__ == "__main__":
import doctest
doctest.testmod()
try:
while True:
s = input('> ')
n = int(s)
print(pe19(n))
except (SyntaxError, EOFError, KeyboardInterrupt, NameError):
pass
| kittttttan/pe | py/pe/pe19.py | Python | mit | 646 |
import os
import threading
from System.Core.Global import *
from System.Core.Colors import *
from System.Core.Modbus import *
from System.Lib import ipcalc
class Module:
info = {
'Name': 'Read Coils Exception Function',
'Author': ['@enddo'],
'Description': ("Fuzzing Read Coils Exception Function"),
}
options = {
'RHOSTS' :['' ,True ,'The target address range or CIDR identifier'],
'RPORT' :[502 ,False ,'The port number for modbus protocol'],
'UID' :['' ,True ,'Modbus Slave UID.'],
'Threads' :[1 ,False ,'The number of concurrent threads'],
'Output' :[True ,False ,'The stdout save in output directory']
}
output = ''
def exploit(self):
moduleName = self.info['Name']
print bcolors.OKBLUE + '[+]' + bcolors.ENDC + ' Module ' + moduleName + ' Start'
ips = list()
for ip in ipcalc.Network(self.options['RHOSTS'][0]):
ips.append(str(ip))
while ips:
for i in range(int(self.options['Threads'][0])):
if(len(ips) > 0):
thread = threading.Thread(target=self.do,args=(ips.pop(0),))
thread.start()
THREADS.append(thread)
else:
break
for thread in THREADS:
thread.join()
if(self.options['Output'][0]):
open(mainPath + '/Output/' + moduleName + '_' + self.options['RHOSTS'][0].replace('/','_') + '.txt','a').write('='*30 + '\n' + self.output + '\n\n')
self.output = ''
def printLine(self,str,color):
self.output += str + '\n'
if(str.find('[+]') != -1):
print str.replace('[+]',color + '[+]' + bcolors.ENDC)
elif(str.find('[-]') != -1):
print str.replace('[-]',color + '[+]' + bcolors.ENDC)
else:
print str
def do(self,ip):
c = connectToTarget(ip,self.options['RPORT'][0])
if(c == None):
self.printLine('[-] Modbus is not running on : ' + ip,bcolors.WARNING)
return None
self.printLine('[+] Connecting to ' + ip,bcolors.OKGREEN)
ans = c.sr1(ModbusADU(transId=getTransId(),unitId=int(self.options['UID'][0]))/ModbusPDU01_Read_Coils_Exception(),timeout=timeout, verbose=0)
ans = ModbusADU_Answer(str(ans))
self.printLine('[+] Response is :',bcolors.OKGREEN)
ans.show()
| enddo/smod | Application/modules/modbus/function/readCoilsException.py | Python | gpl-2.0 | 2,116 |
import numpy as np
tansig = lambda n: 2 / (1 + np.exp(-2 * n)) - 1
sigmoid = lambda n: 1 / (1 + np.exp(-n))
hardlim = lambda n: 1 if n >= 0 else 0
purelin = lambda n: n
relu = lambda n: np.fmax(0, n)
square_error = lambda x, y: np.sum(0.5 * (x - y)**2)
sig_prime = lambda z: sigmoid(z) * (1 - sigmoid(z))
relu_prime = lambda z: relu(z) * (1 - relu(z))
softmax = lambda n: np.exp(n)/np.sum(np.exp(n))
softmax_prime = lambda n: softmax(n) * (1 - softmax(n))
cross_entropy = lambda x, y: -np.dot(x, np.log(y))
| alexvlis/shape | nnmath.py | Python | gpl-3.0 | 518 |
from apps.userfeeds.models import UserFeed, UserFeedEntry
from apps.userfeeds.services import FeedWriteService, FeedInformationService
from celery.canvas import group
from djangofeeds.tasks import refresh_feed
from logging import getLogger
from feedreader.celery import app
logger = getLogger(__name__)
def update_feed_status(item, status):
update_feed_status_task.delay(item, status)
def inc_entry_status(item):
"""
sets the status to seen for new or unread items
"""
logger.info('Status of %s == %s', item.pk, item.status)
if item.status in (UserFeedEntry.ENTRY_NEW_VAL, UserFeedEntry.ENTRY_UNREAD_VAL):
status = UserFeedEntry.ENTRY_SEEN_VAL
update_feed_status_task.delay(item, status)
@app.task
def update_feed_status_task(item, status):
item.status = status
item.save()
logger.info('Status of %s set to %s', item, status)
def add_feeds(user, feedurls, fuzzy=True, dry_run=False):
write_task = write_feed_task.s(user=user, dry_run=dry_run)
info_task = get_feed_info_from_url_task.s(fuzzy=fuzzy)
group((info_task | write_task)(feedurl) for feedurl in feedurls).delay()
def add_feed(user, feedurl, fuzzy=True, logger=logger, dry_run=False):
write_task = write_feed_task.s(user=user, dry_run=dry_run)
info_task = get_feed_info_from_url_task.s(fuzzy=fuzzy)
(info_task | write_task).delay(feedurl)
@app.task
def write_feed_task(feed_info, user, dry_run, logger=logger):
feed_wrtier_serice = FeedWriteService(user, logger, dry_run)
return feed_wrtier_serice.rsave(feed_info)
@app.task
def get_feed_info_from_url_task(feed_url, fuzzy=False):
info_service = FeedInformationService(feed_url, accept_fuzzy=fuzzy)
return info_service.parse()
def load_feeds(user=None):
"""
updates all feeds (if a user is given, only the feeds of this user are updatet
:param user: user which feeds should be updated, all user if not set
"""
user_feeds = UserFeed.objects.all()
if user:
user_feeds = user_feeds.filter(user=user)
feeds = set(user_feed.feed for user_feed in user_feeds)
logger.debug('found %s feeds to update for user %s.', len(feeds), user)
group(load_feed_task.s(feed) for feed in feeds).delay()
@app.task
def load_feed_task(feed):
"""
Wrapper to update a feed.
Wrapps the ``refresh_feed`` task from djangofeeds.
:param feed: feed to update
"""
# small hacke needet to get djangofees task working without the
# task decorator
refresh_feed.get_logger = lambda **kwargs: logger
refresh_feed(feed_url=feed.feed_url)
def save_user_feed_entries(feed_entry):
"""
delegates the new feeds to all users which have this as userfeed
:param feed_entry: which was updated
"""
save_user_feed_entries_task.delay(feed_entry)
@app.task
def save_user_feed_entries_task(feed_entry):
"""
stores the given feed entry for all users which have this feed as userfeed
"""
base_feed = feed_entry.feed
user_feeds = UserFeed.objects.filter(feed=base_feed)
for item in user_feeds:
save_user_feed_item_task.delay(user_feed=item, base_feed_entry=feed_entry)
@app.task
def save_user_feed_item_task(user_feed, base_feed_entry):
user_feed_entry, created = UserFeedEntry.objects.get_or_create(feed=user_feed, entry=base_feed_entry)
if created:
logger.debug('created new FeedEntry of "%s" for user "%s"', base_feed_entry, user_feed.user)
| jo-soft/jadfr | jadfr/apps/userfeeds/tasks.py | Python | gpl-3.0 | 3,462 |
"""
===========================
Covertype dataset benchmark
===========================
Benchmark stochastic gradient descent (SGD), Liblinear, and Naive Bayes, CART
(decision tree), RandomForest and Extra-Trees on the forest covertype dataset
of Blackard, Jock, and Dean [1]. The dataset comprises 581,012 samples. It is
low dimensional with 54 features and a sparsity of approx. 23%. Here, we
consider the task of predicting class 1 (spruce/fir). The classification
performance of SGD is competitive with Liblinear while being two orders of
magnitude faster to train::
[..]
Classification performance:
===========================
Classifier train-time test-time error-rate
--------------------------------------------
liblinear 15.9744s 0.0705s 0.2305
GaussianNB 3.0666s 0.3884s 0.4841
SGD 1.0558s 0.1152s 0.2300
CART 79.4296s 0.0523s 0.0469
RandomForest 1190.1620s 0.5881s 0.0243
ExtraTrees 640.3194s 0.6495s 0.0198
The same task has been used in a number of papers including:
* `"SVM Optimization: Inverse Dependence on Training Set Size"
<http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.139.2112>`_
S. Shalev-Shwartz, N. Srebro - In Proceedings of ICML '08.
* `"Pegasos: Primal estimated sub-gradient solver for svm"
<http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.74.8513>`_
S. Shalev-Shwartz, Y. Singer, N. Srebro - In Proceedings of ICML '07.
* `"Training Linear SVMs in Linear Time"
<www.cs.cornell.edu/People/tj/publications/joachims_06a.pdf>`_
T. Joachims - In SIGKDD '06
[1] http://archive.ics.uci.edu/ml/datasets/Covertype
"""
from __future__ import division, print_function
# Author: Peter Prettenhofer <[email protected]>
# Arnaud Joly <[email protected]>
# License: BSD 3 clause
import os
from time import time
import argparse
import numpy as np
from sklearn.datasets import fetch_covtype, get_data_home
from sklearn.svm import LinearSVC
from sklearn.linear_model import SGDClassifier
from sklearn.naive_bayes import GaussianNB
from sklearn.tree import DecisionTreeClassifier
from sklearn.ensemble import RandomForestClassifier, ExtraTreesClassifier
from sklearn.ensemble import GradientBoostingClassifier
from sklearn.metrics import zero_one_loss
from sklearn.externals.joblib import Memory
from sklearn.utils import check_array
# Memoize the data extraction and memory map the resulting
# train / test splits in readonly mode
memory = Memory(os.path.join(get_data_home(), 'covertype_benchmark_data'),
mmap_mode='r')
@memory.cache
def load_data(dtype=np.float32, order='C', random_state=13):
"""Load the data, then cache and memmap the train/test split"""
######################################################################
## Load dataset
print("Loading dataset...")
data = fetch_covtype(download_if_missing=True, shuffle=True,
random_state=random_state)
X = check_array(data['data'], dtype=dtype, order=order)
y = (data['target'] != 1).astype(np.int)
## Create train-test split (as [Joachims, 2006])
print("Creating train-test split...")
n_train = 522911
X_train = X[:n_train]
y_train = y[:n_train]
X_test = X[n_train:]
y_test = y[n_train:]
## Standardize first 10 features (the numerical ones)
mean = X_train.mean(axis=0)
std = X_train.std(axis=0)
mean[10:] = 0.0
std[10:] = 1.0
X_train = (X_train - mean) / std
X_test = (X_test - mean) / std
return X_train, X_test, y_train, y_test
ESTIMATORS = {
'GBRT': GradientBoostingClassifier(n_estimators=250),
'ExtraTrees': ExtraTreesClassifier(n_estimators=20),
'RandomForest': RandomForestClassifier(n_estimators=20),
'CART': DecisionTreeClassifier(min_samples_split=5),
'SGD': SGDClassifier(alpha=0.001, n_iter=2),
'GaussianNB': GaussianNB(),
'liblinear': LinearSVC(loss="l2", penalty="l2", C=1000, dual=False,
tol=1e-3)
}
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument('--classifiers', nargs="+",
choices=ESTIMATORS, type=str,
default=['liblinear', 'GaussianNB', 'SGD', 'CART'],
help="list of classifiers to benchmark.")
parser.add_argument('--n-jobs', nargs="?", default=1, type=int,
help="Number of concurrently running workers for "
"models that support parallelism.")
parser.add_argument('--order', nargs="?", default="C", type=str,
choices=["F", "C"],
help="Allow to choose between fortran and C ordered "
"data")
parser.add_argument('--random-seed', nargs="?", default=13, type=int,
help="Common seed used by random number generator.")
args = vars(parser.parse_args())
print(__doc__)
X_train, X_test, y_train, y_test = load_data(
order=args["order"], random_state=args["random_seed"])
print("")
print("Dataset statistics:")
print("===================")
print("%s %d" % ("number of features:".ljust(25), X_train.shape[1]))
print("%s %d" % ("number of classes:".ljust(25), np.unique(y_train).size))
print("%s %s" % ("data type:".ljust(25), X_train.dtype))
print("%s %d (pos=%d, neg=%d, size=%dMB)"
% ("number of train samples:".ljust(25),
X_train.shape[0], np.sum(y_train == 1),
np.sum(y_train == 0), int(X_train.nbytes / 1e6)))
print("%s %d (pos=%d, neg=%d, size=%dMB)"
% ("number of test samples:".ljust(25),
X_test.shape[0], np.sum(y_test == 1),
np.sum(y_test == 0), int(X_test.nbytes / 1e6)))
print()
print("Training Classifiers")
print("====================")
error, train_time, test_time = {}, {}, {}
for name in sorted(args["classifiers"]):
print("Training %s ... " % name, end="")
estimator = ESTIMATORS[name]
estimator_params = estimator.get_params()
if "random_state" in estimator_params:
estimator.set_params(random_state=args["random_seed"])
if "n_jobs" in estimator_params:
estimator.set_params(n_jobs=args["n_jobs"])
time_start = time()
estimator.fit(X_train, y_train)
train_time[name] = time() - time_start
time_start = time()
y_pred = estimator.predict(X_test)
test_time[name] = time() - time_start
error[name] = zero_one_loss(y_test, y_pred)
print("done")
print()
print("Classification performance:")
print("===========================")
print("%s %s %s %s"
% ("Classifier ", "train-time", "test-time", "error-rate"))
print("-" * 44)
for name in sorted(args["classifiers"], key=error.get):
print("%s %s %s %s" % (name.ljust(12),
("%.4fs" % train_time[name]).center(10),
("%.4fs" % test_time[name]).center(10),
("%.4f" % error[name]).center(10)))
print()
| ankurankan/scikit-learn | benchmarks/bench_covertype.py | Python | bsd-3-clause | 7,233 |
"""Weak reference support for Python.
This module is an implementation of PEP 205:
http://www.python.org/dev/peps/pep-0205/
"""
# Naming convention: Variables named "wr" are weak reference objects;
# they are called this instead of "ref" to avoid name collisions with
# the module-global ref() function imported from _weakref.
import UserDict
#from _weakref import (
# getweakrefcount,
# getweakrefs,
# ref,
# proxy,
# CallableProxyType,
# ProxyType,
# ReferenceType)
from '__go__/grumpy' import WeakRefType as ReferenceType
ref = ReferenceType
import _weakrefset
WeakSet = _weakrefset.WeakSet
_IterationGuard = _weakrefset._IterationGuard
import exceptions
ReferenceError = exceptions.ReferenceError
#ProxyTypes = (ProxyType, CallableProxyType)
#__all__ = ["ref", "proxy", "getweakrefcount", "getweakrefs",
# "WeakKeyDictionary", "ReferenceError", "ReferenceType", "ProxyType",
# "CallableProxyType", "ProxyTypes", "WeakValueDictionary", 'WeakSet']
class WeakValueDictionary(UserDict.UserDict):
"""Mapping class that references values weakly.
Entries in the dictionary will be discarded when no strong
reference to the value exists anymore
"""
# We inherit the constructor without worrying about the input
# dictionary; since it uses our .update() method, we get the right
# checks (if the other dictionary is a WeakValueDictionary,
# objects are unwrapped on the way out, and we always wrap on the
# way in).
def __init__(*args, **kw):
if not args:
raise TypeError("descriptor '__init__' of 'WeakValueDictionary' "
"object needs an argument")
self = args[0]
args = args[1:]
if len(args) > 1:
raise TypeError('expected at most 1 arguments, got %d' % len(args))
def remove(wr, selfref=ref(self)):
self = selfref()
if self is not None:
if self._iterating:
self._pending_removals.append(wr.key)
else:
del self.data[wr.key]
self._remove = remove
# A list of keys to be removed
self._pending_removals = []
self._iterating = set()
UserDict.UserDict.__init__(self, *args, **kw)
def _commit_removals(self):
l = self._pending_removals
d = self.data
# We shouldn't encounter any KeyError, because this method should
# always be called *before* mutating the dict.
while l:
del d[l.pop()]
def __getitem__(self, key):
o = self.data[key]()
if o is None:
raise KeyError, key
else:
return o
def __delitem__(self, key):
if self._pending_removals:
self._commit_removals()
del self.data[key]
def __contains__(self, key):
try:
o = self.data[key]()
except KeyError:
return False
return o is not None
def has_key(self, key):
try:
o = self.data[key]()
except KeyError:
return False
return o is not None
def __repr__(self):
return "<WeakValueDictionary at %s>" % id(self)
def __setitem__(self, key, value):
if self._pending_removals:
self._commit_removals()
self.data[key] = KeyedRef(value, self._remove, key)
def clear(self):
if self._pending_removals:
self._commit_removals()
self.data.clear()
def copy(self):
new = WeakValueDictionary()
for key, wr in self.data.items():
o = wr()
if o is not None:
new[key] = o
return new
__copy__ = copy
def __deepcopy__(self, memo):
import copy
new = self.__class__()
for key, wr in self.data.items():
o = wr()
if o is not None:
new[copy.deepcopy(key, memo)] = o
return new
def get(self, key, default=None):
try:
wr = self.data[key]
except KeyError:
return default
else:
o = wr()
if o is None:
# This should only happen
return default
else:
return o
def items(self):
L = []
for key, wr in self.data.items():
o = wr()
if o is not None:
L.append((key, o))
return L
def iteritems(self):
with _IterationGuard(self):
for wr in self.data.itervalues():
value = wr()
if value is not None:
yield wr.key, value
def iterkeys(self):
with _IterationGuard(self):
for k in self.data.iterkeys():
yield k
__iter__ = iterkeys
def itervaluerefs(self):
"""Return an iterator that yields the weak references to the values.
The references are not guaranteed to be 'live' at the time
they are used, so the result of calling the references needs
to be checked before being used. This can be used to avoid
creating references that will cause the garbage collector to
keep the values around longer than needed.
"""
with _IterationGuard(self):
for wr in self.data.itervalues():
yield wr
def itervalues(self):
with _IterationGuard(self):
for wr in self.data.itervalues():
obj = wr()
if obj is not None:
yield obj
def popitem(self):
if self._pending_removals:
self._commit_removals()
while 1:
key, wr = self.data.popitem()
o = wr()
if o is not None:
return key, o
def pop(self, key, *args):
if self._pending_removals:
self._commit_removals()
try:
o = self.data.pop(key)()
except KeyError:
if args:
return args[0]
raise
if o is None:
raise KeyError, key
else:
return o
def setdefault(self, key, default=None):
try:
wr = self.data[key]
except KeyError:
if self._pending_removals:
self._commit_removals()
self.data[key] = KeyedRef(default, self._remove, key)
return default
else:
return wr()
def update(*args, **kwargs):
if not args:
raise TypeError("descriptor 'update' of 'WeakValueDictionary' "
"object needs an argument")
self = args[0]
args = args[1:]
if len(args) > 1:
raise TypeError('expected at most 1 arguments, got %d' % len(args))
dict = args[0] if args else None
if self._pending_removals:
self._commit_removals()
d = self.data
if dict is not None:
if not hasattr(dict, "items"):
dict = type({})(dict)
for key, o in dict.items():
d[key] = KeyedRef(o, self._remove, key)
if len(kwargs):
self.update(kwargs)
def valuerefs(self):
"""Return a list of weak references to the values.
The references are not guaranteed to be 'live' at the time
they are used, so the result of calling the references needs
to be checked before being used. This can be used to avoid
creating references that will cause the garbage collector to
keep the values around longer than needed.
"""
return self.data.values()
def values(self):
L = []
for wr in self.data.values():
o = wr()
if o is not None:
L.append(o)
return L
class KeyedRef(ref):
"""Specialized reference that includes a key corresponding to the value.
This is used in the WeakValueDictionary to avoid having to create
a function object for each key stored in the mapping. A shared
callback object can use the 'key' attribute of a KeyedRef instead
of getting a reference to the key from an enclosing scope.
"""
__slots__ = "key",
def __new__(type, ob, callback, key):
self = ref.__new__(type, ob, callback)
self.key = key
return self
def __init__(self, ob, callback, key):
super(KeyedRef, self).__init__(ob, callback)
class WeakKeyDictionary(UserDict.UserDict):
""" Mapping class that references keys weakly.
Entries in the dictionary will be discarded when there is no
longer a strong reference to the key. This can be used to
associate additional data with an object owned by other parts of
an application without adding attributes to those objects. This
can be especially useful with objects that override attribute
accesses.
"""
def __init__(self, dict=None):
self.data = {}
def remove(k, selfref=ref(self)):
self = selfref()
if self is not None:
if self._iterating:
self._pending_removals.append(k)
else:
del self.data[k]
self._remove = remove
# A list of dead weakrefs (keys to be removed)
self._pending_removals = []
self._iterating = set()
if dict is not None:
self.update(dict)
def _commit_removals(self):
# NOTE: We don't need to call this method before mutating the dict,
# because a dead weakref never compares equal to a live weakref,
# even if they happened to refer to equal objects.
# However, it means keys may already have been removed.
l = self._pending_removals
d = self.data
while l:
try:
del d[l.pop()]
except KeyError:
pass
def __delitem__(self, key):
del self.data[ref(key)]
def __getitem__(self, key):
return self.data[ref(key)]
def __repr__(self):
return "<WeakKeyDictionary at %s>" % id(self)
def __setitem__(self, key, value):
self.data[ref(key, self._remove)] = value
def copy(self):
new = WeakKeyDictionary()
for key, value in self.data.items():
o = key()
if o is not None:
new[o] = value
return new
__copy__ = copy
def __deepcopy__(self, memo):
import copy
new = self.__class__()
for key, value in self.data.items():
o = key()
if o is not None:
new[o] = copy.deepcopy(value, memo)
return new
def get(self, key, default=None):
return self.data.get(ref(key),default)
def has_key(self, key):
try:
wr = ref(key)
except TypeError:
return 0
return wr in self.data
def __contains__(self, key):
try:
wr = ref(key)
except TypeError:
return 0
return wr in self.data
def items(self):
L = []
for key, value in self.data.items():
o = key()
if o is not None:
L.append((o, value))
return L
def iteritems(self):
with _IterationGuard(self):
for wr, value in self.data.iteritems():
key = wr()
if key is not None:
yield key, value
def iterkeyrefs(self):
"""Return an iterator that yields the weak references to the keys.
The references are not guaranteed to be 'live' at the time
they are used, so the result of calling the references needs
to be checked before being used. This can be used to avoid
creating references that will cause the garbage collector to
keep the keys around longer than needed.
"""
with _IterationGuard(self):
for wr in self.data.iterkeys():
yield wr
def iterkeys(self):
with _IterationGuard(self):
for wr in self.data.iterkeys():
obj = wr()
if obj is not None:
yield obj
__iter__ = iterkeys
def itervalues(self):
with _IterationGuard(self):
for value in self.data.itervalues():
yield value
def keyrefs(self):
"""Return a list of weak references to the keys.
The references are not guaranteed to be 'live' at the time
they are used, so the result of calling the references needs
to be checked before being used. This can be used to avoid
creating references that will cause the garbage collector to
keep the keys around longer than needed.
"""
return self.data.keys()
def keys(self):
L = []
for wr in self.data.keys():
o = wr()
if o is not None:
L.append(o)
return L
def popitem(self):
while 1:
key, value = self.data.popitem()
o = key()
if o is not None:
return o, value
def pop(self, key, *args):
return self.data.pop(ref(key), *args)
def setdefault(self, key, default=None):
return self.data.setdefault(ref(key, self._remove),default)
def update(self, dict=None, **kwargs):
d = self.data
if dict is not None:
if not hasattr(dict, "items"):
dict = type({})(dict)
for key, value in dict.items():
d[ref(key, self._remove)] = value
if len(kwargs):
self.update(kwargs)
| google/grumpy | third_party/stdlib/weakref.py | Python | apache-2.0 | 13,741 |
import requests
import sys
def chat_post_request(token, channel, message):
"""Post a message to a Slack channel"""
# See https://api.slack.com/methods/chat.postMessage
chat_api_url = 'https://slack.com/api/chat.postMessage'
data = {
'token': token,
'channel': channel,
'as_user': True,
'parse': 'full',
'text': message
}
resp = requests.post(chat_api_url, data=data)
if resp.ok and resp.json()['ok']:
return resp.json()
print("[chat.postMessage] %s: %s" % (resp.status_code, resp.text), file=sys.stderr)
return None # TODO: raise error instead of handling None case?
def markdown_post_request(token, channels, title, content, filetype='post'):
"""Create a ‘Post’ in a Slack channel using Markdown formatting."""
# See https://api.slack.com/methods/files.upload
file_upload_api_url = 'https://slack.com/api/files.upload'
data = {
'token': token,
'channels': channels,
'content': content,
'title': title,
'filetype': filetype
}
resp = requests.post(file_upload_api_url, data=data)
if resp.ok and resp.json()['ok']:
return resp.json()
print("[files.upload] %s: %s" % (resp.status_code, resp.text), file=sys.stderr)
return None # TODO: raise error instead of handling None case? | otherjoel/SlackFileCleanup | slack_utils.py | Python | gpl-3.0 | 1,385 |
#!/usr/bin/python
#
# Copyright (c) 2018 Yuwei Zhou, <[email protected]>
#
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: azure_rm_servicebusqueue
version_added: "2.8"
short_description: Manage Azure Service Bus queue.
description:
- Create, update or delete an Azure Service Bus queue.
options:
resource_group:
description:
- name of resource group.
required: true
name:
description:
- name of the queue.
required: true
namespace:
description:
- Servicebus namespace name.
- A namespace is a scoping container for all messaging components.
- Multiple queues and topics can reside within a single namespace, and namespaces often serve as application containers.
required: true
state:
description:
- Assert the state of the queue. Use 'present' to create or update and
'absent' to delete.
default: present
choices:
- absent
- present
auto_delete_on_idle_in_seconds:
description:
- Time idle interval after which a queue is automatically deleted.
- The minimum duration is 5 minutes.
type: int
dead_lettering_on_message_expiration:
description:
- A value that indicates whether a queue has dead letter support when a message expires.
type: bool
default_message_time_to_live_seconds:
description:
- Default message timespan to live value.
- This is the duration after which the message expires, starting from when the message is sent to Service Bus.
- This is the default value used when TimeToLive is not set on a message itself.
type: int
enable_batched_operations:
description:
- Value that indicates whether server-side batched operations are enabled.
type: bool
enable_express:
description:
- Value that indicates whether Express Entities are enabled.
- An express topic or queue holds a message in memory temporarily before writing it to persistent storage.
type: bool
enable_partitioning:
description:
- A value that indicates whether the topic or queue is to be partitioned across multiple message brokers.
type: bool
forward_dead_lettered_messages_to:
description:
- Queue or topic name to forward the Dead Letter message for a queue.
forward_to:
description:
- Queue or topic name to forward the messages for a queue.
lock_duration_in_seconds:
description:
- Timespan duration of a peek-lock.
- The amount of time that the message is locked for other receivers.
- The maximum value for LockDuration is 5 minutes.
type: int
max_delivery_count:
description:
- he maximum delivery count.
- A message is automatically deadlettered after this number of deliveries.
type: int
max_size_in_mb:
description:
- The maximum size of the queue in megabytes, which is the size of memory allocated for the queue.
type: int
requires_duplicate_detection:
description:
- A value indicating if this queue or topic requires duplicate detection.
type: bool
duplicate_detection_time_in_seconds:
description:
- TimeSpan structure that defines the duration of the duplicate detection history.
type: int
requires_session:
description:
- A value that indicates whether the queue supports the concept of sessions.
type: bool
status:
description:
- Status of the entity.
choices:
- active
- disabled
- send_disabled
- receive_disabled
extends_documentation_fragment:
- azure
- azure_tags
author:
- "Yuwei Zhou (@yuwzho)"
'''
EXAMPLES = '''
- name: Create a queue
azure_rm_servicebusqueue:
name: subqueue
resource_group: foo
namespace: bar
duplicate_detection_time_in_seconds: 600
'''
RETURN = '''
id:
description: Current state of the queue.
returned: success
type: str
'''
try:
from msrestazure.azure_exceptions import CloudError
except ImportError:
# This is handled in azure_rm_common
pass
from ansible.module_utils.azure_rm_common import AzureRMModuleBase
from ansible.module_utils.common.dict_transformations import _snake_to_camel, _camel_to_snake
from ansible.module_utils._text import to_native
from datetime import datetime, timedelta
duration_spec_map = dict(
default_message_time_to_live='default_message_time_to_live_seconds',
duplicate_detection_history_time_window='duplicate_detection_time_in_seconds',
auto_delete_on_idle='auto_delete_on_idle_in_seconds',
lock_duration='lock_duration_in_seconds'
)
sas_policy_spec = dict(
state=dict(type='str', default='present', choices=['present', 'absent']),
name=dict(type='str', required=True),
regenerate_key=dict(type='bool'),
rights=dict(type='str', choices=['manage', 'listen', 'send', 'listen_send'])
)
class AzureRMServiceBusQueue(AzureRMModuleBase):
def __init__(self):
self.module_arg_spec = dict(
resource_group=dict(type='str', required=True),
name=dict(type='str', required=True),
state=dict(type='str', default='present', choices=['present', 'absent']),
namespace=dict(type='str', required=True),
auto_delete_on_idle_in_seconds=dict(type='int'),
dead_lettering_on_message_expiration=dict(type='bool'),
default_message_time_to_live_seconds=dict(type='int'),
duplicate_detection_time_in_seconds=dict(type='int'),
enable_batched_operations=dict(type='bool'),
enable_express=dict(type='bool'),
enable_partitioning=dict(type='bool'),
forward_dead_lettered_messages_to=dict(type='str'),
forward_to=dict(type='str'),
lock_duration_in_seconds=dict(type='int'),
max_delivery_count=dict(type='int'),
max_size_in_mb=dict(type='int'),
requires_duplicate_detection=dict(type='bool'),
requires_session=dict(type='bool'),
status=dict(type='str',
choices=['active', 'disabled', 'send_disabled', 'receive_disabled'])
)
self.resource_group = None
self.name = None
self.state = None
self.namespace = None
self.location = None
self.type = None
self.subscription_topic_name = None
self.auto_delete_on_idle_in_seconds = None
self.dead_lettering_on_message_expiration = None
self.default_message_time_to_live_seconds = None
self.enable_batched_operations = None
self.enable_express = None
self.enable_partitioning = None
self.forward_dead_lettered_messages_to = None
self.forward_to = None
self.lock_duration_in_seconds = None
self.max_delivery_count = None
self.max_size_in_mb = None
self.requires_duplicate_detection = None
self.status = None
self.results = dict(
changed=False,
id=None
)
super(AzureRMServiceBusQueue, self).__init__(self.module_arg_spec,
supports_check_mode=True)
def exec_module(self, **kwargs):
for key in list(self.module_arg_spec.keys()):
setattr(self, key, kwargs[key])
changed = False
original = self.get()
if self.state == 'present':
# Create the resource instance
params = dict(
dead_lettering_on_message_expiration=self.dead_lettering_on_message_expiration,
enable_batched_operations=self.enable_batched_operations,
enable_express=self.enable_express,
enable_partitioning=self.enable_partitioning,
forward_dead_lettered_messages_to=self.forward_dead_lettered_messages_to,
forward_to=self.forward_to,
max_delivery_count=self.max_delivery_count,
max_size_in_megabytes=self.max_size_in_mb
)
if self.status:
params['status'] = self.servicebus_models.EntityStatus(str.capitalize(_snake_to_camel(self.status)))
for k, v in duration_spec_map.items():
seconds = getattr(self, v)
if seconds:
params[k] = timedelta(seconds=seconds)
instance = self.servicebus_models.SBQueue(**params)
result = original
if not original:
changed = True
result = instance
else:
result = original
attribute_map = set(self.servicebus_models.SBQueue._attribute_map.keys()) - set(self.servicebus_models.SBQueue._validation.keys())
for attribute in attribute_map:
value = getattr(instance, attribute)
if value and value != getattr(original, attribute):
changed = True
if changed and not self.check_mode:
result = self.create_or_update(instance)
self.results = self.to_dict(result)
elif original:
changed = True
if not self.check_mode:
self.delete()
self.results['deleted'] = True
self.results['changed'] = changed
return self.results
def create_or_update(self, param):
try:
client = self._get_client()
return client.create_or_update(self.resource_group, self.namespace, self.name, param)
except Exception as exc:
self.fail('Error creating or updating queue {0} - {1}'.format(self.name, str(exc.inner_exception) or str(exc)))
def delete(self):
try:
client = self._get_client()
client.delete(self.resource_group, self.namespace, self.name)
return True
except Exception as exc:
self.fail("Error deleting queue {0} - {1}".format(self.name, str(exc)))
def _get_client(self):
return self.servicebus_client.queues
def get(self):
try:
client = self._get_client()
return client.get(self.resource_group, self.namespace, self.name)
except Exception:
return None
def to_dict(self, instance):
result = dict()
attribute_map = self.servicebus_models.SBQueue._attribute_map
for attribute in attribute_map.keys():
value = getattr(instance, attribute)
if not value:
continue
if attribute_map[attribute]['type'] == 'duration':
if is_valid_timedelta(value):
key = duration_spec_map.get(attribute) or attribute
result[key] = int(value.total_seconds())
elif attribute == 'status':
result['status'] = _camel_to_snake(value)
elif isinstance(value, self.servicebus_models.MessageCountDetails):
result[attribute] = value.as_dict()
elif isinstance(value, self.servicebus_models.SBSku):
result[attribute] = value.name.lower()
elif isinstance(value, datetime):
result[attribute] = str(value)
elif isinstance(value, str):
result[attribute] = to_native(value)
elif attribute == 'max_size_in_megabytes':
result['max_size_in_mb'] = value
else:
result[attribute] = value
return result
def is_valid_timedelta(value):
if value == timedelta(10675199, 10085, 477581):
return None
return value
def main():
AzureRMServiceBusQueue()
if __name__ == '__main__':
main()
| dagwieers/ansible | lib/ansible/modules/cloud/azure/azure_rm_servicebusqueue.py | Python | gpl-3.0 | 12,388 |
# This file is part of Shuup.
#
# Copyright (c) 2012-2017, Shoop Commerce Ltd. All rights reserved.
#
# This source code is licensed under the OSL-3.0 license found in the
# LICENSE file in the root directory of this source tree.
from __future__ import unicode_literals
from django.utils.translation import ugettext_lazy as _
import shuup.apps
class AppConfig(shuup.apps.AppConfig):
name = __name__
verbose_name = _("Shuup Default Tax")
label = "default_tax"
provides = {
"tax_module": ["shuup.default_tax.module:DefaultTaxModule"],
"admin_module": ["shuup.default_tax.admin_module:TaxRulesAdminModule"],
}
default_app_config = __name__ + ".AppConfig"
| suutari-ai/shoop | shuup/default_tax/__init__.py | Python | agpl-3.0 | 695 |
#!/usr/bin/env python3
"""
Encode MP3 audio using ffmpeg (libmp3lame).
"""
import argparse
import glob
import logging
import os
import re
import shutil
import signal
import sys
from typing import Generator, List, Tuple
import command_mod
import logging_mod
import subtask_mod
logger = logging.getLogger(__name__)
console_handler = logging.StreamHandler()
console_handler.setFormatter(logging_mod.ColoredFormatter())
logger.addHandler(console_handler)
logger.setLevel(logging.INFO)
class Options:
"""
Options class
"""
def __init__(self) -> None:
self._args: argparse.Namespace = None
self.parse(sys.argv)
def get_audio_codec(self) -> str:
"""
Return audio codec.
"""
return self._audio_codec
def get_audio_quality(self) -> str:
"""
Return audio quality.
"""
return self._args.audioQuality[0]
def get_audio_volume(self) -> str:
"""
Return audio volume.
"""
return self._args.audioVolume[0]
def get_files(self) -> List[str]:
"""
Return list of files.
"""
return self._files
def get_file_new(self) -> str:
"""
Return new file location.
"""
return self._file_new
def get_flags(self) -> List[str]:
"""
Return extra flags
"""
return self._args.flags
def get_noskip_flag(self) -> bool:
"""
Return noskip flag.
"""
return self._args.noskip_flag
def get_run_time(self) -> str:
"""
Return run time.
"""
return self._args.runTime[0]
def get_start_time(self) -> str:
"""
Return start time.
"""
return self._args.startTime[0]
def get_threads(self) -> str:
"""
Return threads.
"""
return self._args.threads[0]
def _parse_args(self, args: List[str]) -> None:
parser = argparse.ArgumentParser(
description="Encode MP3 audio using ffmpeg (libmp3lame).",
)
parser.add_argument(
'-noskip',
dest='noskip_flag',
action='store_true',
help="Disable skipping of encoding when codecs same.",
)
parser.add_argument(
'-aq',
nargs=1,
dest='audioQuality',
default=[None],
help="Select audio bitrate in kbps (128 default).",
)
parser.add_argument(
'-avol',
nargs=1,
dest='audioVolume',
default=[None],
help='Select audio volume adjustment in dB (ie "-5", "5").',
)
parser.add_argument(
'-start',
nargs=1,
dest='startTime',
default=[None],
help="Start encoding at time n seconds.",
)
parser.add_argument(
'-time',
nargs=1,
dest='runTime',
default=[None],
help="Stop encoding after n seconds.",
)
parser.add_argument(
'-threads',
nargs=1,
default=['2'],
help="Threads are faster but decrease quality. Default is 2.",
)
parser.add_argument(
'-flags',
nargs=1,
default=[],
help="Supply additional flags to ffmpeg.",
)
parser.add_argument(
'files',
nargs='+',
metavar='file',
help='Multimedia file. A target ".mp3" file '
'can be given as the first file.'
)
self._args = parser.parse_args(args)
def parse(self, args: List[str]) -> None:
"""
Parse arguments
"""
self._parse_args(args[1:])
if self._args.files[0].endswith('.mp3'):
self._file_new = self._args.files[0]
self._files = self._args.files[1:]
if not self._files or self._file_new in self._files:
raise SystemExit(
f"{sys.argv[0]}: The input and output files "
"must be different.",
)
else:
self._file_new = ''
self._files = self._args.files
self._audio_codec = 'libmp3lame'
class Media:
"""
Media class
"""
def __init__(self, file: str) -> None:
self._file = file
self._length = '0'
self._stream = {}
self._type = 'Unknown'
ffprobe = command_mod.Command('ffprobe', args=[file], errors='stop')
task = subtask_mod.Batch(ffprobe.get_cmdline())
task.run(error2output=True)
number = 0
isjunk = re.compile('^ *Stream #[^ ]*: ')
try:
for line in task.get_output():
if line.strip().startswith('Duration:'):
self._length = line.replace(',', '').split()[1]
elif line.strip().startswith('Stream #0'):
self._stream[number] = isjunk.sub('', line)
number += 1
elif line.strip().startswith('Input #'):
self._type = line.replace(', from', '').split()[2]
except IndexError as exception:
raise SystemExit(
f'{sys.argv[0]}: Invalid "{file}" media file.',
) from exception
def get_stream(self) -> Generator[Tuple[int, str], None, None]:
"""
Return stream
"""
for key, value in sorted(self._stream.items()):
yield (key, value)
def get_stream_audio(self) -> Generator[Tuple[int, str], None, None]:
"""
Return audio stream
"""
for key, value in sorted(self._stream.items()):
if value.startswith('Audio: '):
yield (key, value)
def get_type(self) -> str:
"""
Return media type
"""
return self._type
def has_audio(self) -> bool:
"""
Return True if audio found
"""
for value in self._stream.values():
if value.startswith('Audio: '):
return True
return False
def has_audio_codec(self, codec: str) -> bool:
"""
Return True if audio codec found
"""
for value in self._stream.values():
if value.startswith(f'Audio: {codec}'):
return True
return False
def has_video(self) -> bool:
"""
Return True if video found
"""
for value in self._stream.values():
if value.startswith('Video: '):
return True
return False
def has_video_codec(self, codec: str) -> bool:
"""
Return True if video codec found
"""
for value in self._stream.values():
if value.startswith(f'Video: {codec}'):
return True
return False
def is_valid(self) -> bool:
"""
Return True if valid media
"""
return self._type != 'Unknown'
def show(self) -> None:
"""
Show information
"""
if self.is_valid():
logger.info(
"%s = Type: %s (%s), %d bytes",
self._file,
self._type,
self._length,
os.path.getsize(self._file),
)
for stream, information in self.get_stream():
logger.info("%s[%d] = %s", self._file, stream, information)
class Encoder:
"""
Encoder class
"""
def __init__(self, options: Options) -> None:
self.config(options)
def _config_audio(self, media: Media) -> None:
if media.has_audio:
changing = (
self._options.get_audio_quality() or
self._options.get_audio_volume()
)
if (not media.has_audio_codec('mp3') or
self._options.get_noskip_flag() or
changing or
len(self._options.get_files()) > 1):
self._ffmpeg.extend_args([
'-c:a',
self._options.get_audio_codec(),
])
if self._options.get_audio_quality():
self._ffmpeg.extend_args([
'-b:a',
f'{self._options.get_audio_quality()}K',
])
else:
self._ffmpeg.extend_args(['-b:a', '128K'])
if self._options.get_audio_volume():
self._ffmpeg.extend_args([
'-af',
f'volume={self._options.get_audio_volume()}dB'
])
else:
self._ffmpeg.extend_args(['-c:a', 'copy'])
def _config(self, file: str) -> Media:
media = Media(file)
self._ffmpeg.set_args(['-i', file])
self._config_audio(media)
if self._options.get_start_time():
self._ffmpeg.extend_args(['-ss', self._options.get_start_time()])
if self._options.get_run_time():
self._ffmpeg.extend_args(['-t', self._options.get_run_time()])
self._ffmpeg.extend_args([
'-vn',
'-threads',
self._options.get_threads()
] + self._options.get_flags())
return media
def _run(self) -> None:
child = subtask_mod.Child(
self._ffmpeg.get_cmdline()).run(error2output=True)
line = ''
ispattern = re.compile(
'^$| version |^ *(built |configuration:|lib|Metadata:|Duration:|'
'compatible_brands:|Stream|concat:|Program|service|lastkeyframe)|'
'^(In|Out)put | : |^Press|^Truncating|bitstream (filter|'
'malformed)|Buffer queue|buffer underflow|message repeated|'
r'^\[|p11-kit:|^Codec AVOption threads|COMPATIBLE_BRANDS:|'
'concat ->'
)
while True:
byte = child.stdout.read(1)
line += byte.decode('utf-8', 'replace')
if not byte:
break
if byte in (b'\n', b'\r'):
if not ispattern.search(line):
sys.stdout.write(line)
sys.stdout.flush()
line = ''
elif byte == b'\r':
sys.stdout.write(line)
line = ''
if not ispattern.search(line):
logger.info(line)
exitcode = child.wait()
if exitcode:
sys.exit(exitcode)
def _single(self) -> None:
output_file = self._options.get_file_new()
self._config(self._options.get_files()[0])
if len(self._options.get_files()) > 1:
args = []
maps = ''
number = 0
for file in self._options.get_files():
media = Media(file)
args.extend(['-i', file])
for stream, _ in media.get_stream_audio():
maps += f'[{number}:{stream}] '
number += 1
self._ffmpeg.set_args(args + [
'-filter_complex',
f'{maps}concat=n={number}:v=0:a=1 [out]',
'-map',
'[out]'
] + self._ffmpeg.get_args()[2:])
self._ffmpeg.extend_args(['-f', 'mp3', '-y', output_file+'.part'])
self._run()
shutil.move(output_file+'.part', output_file)
Media(self._options.get_file_new()).show()
def _multi(self) -> None:
for file in self._options.get_files():
if not file.endswith('.mp3'):
self._config(file)
file_new = file.rsplit('.', 1)[0] + '.mp3'
self._ffmpeg.extend_args(['-f', 'mp3', '-y', file_new+'.part'])
self._run()
shutil.move(file_new+'.part', file_new)
Media(file_new).show()
def config(self, options: Options) -> None:
"""
Configure encoder
"""
self._options = options
self._ffmpeg = command_mod.Command(
'ffmpeg',
args=options.get_flags(),
errors='stop'
)
def run(self) -> None:
"""
Run encoder
"""
if self._options.get_file_new():
self._single()
else:
self._multi()
class Main:
"""
Main class
"""
def __init__(self) -> None:
try:
self.config()
sys.exit(self.run())
except (EOFError, KeyboardInterrupt):
sys.exit(114)
except SystemExit as exception:
sys.exit(exception)
@staticmethod
def config() -> None:
"""
Configure program
"""
if hasattr(signal, 'SIGPIPE'):
signal.signal(signal.SIGPIPE, signal.SIG_DFL)
if os.name == 'nt':
argv = []
for arg in sys.argv:
files = glob.glob(arg) # Fixes Windows globbing bug
if files:
argv.extend(files)
else:
argv.append(arg)
sys.argv = argv
@staticmethod
def run() -> int:
"""
Start program
"""
options = Options()
Encoder(options).run()
return 0
if __name__ == '__main__':
if '--pydoc' in sys.argv:
help(__name__)
else:
Main()
| drtuxwang/system-config | bin/mp3.py | Python | gpl-2.0 | 13,459 |
from basic_models_behaviors.models import PublishableModel, SoftDeletableModel, TimestampableModel
class PublishableMock(PublishableModel):
pass
class SoftDeletableMock(SoftDeletableModel):
pass
class TimestampableMock(TimestampableModel):
pass
| wo0dyn/django-basic-models-behaviors | tests/models.py | Python | bsd-3-clause | 263 |
# Copyright 2015-2017 ACSONE SA/NV (<http://acsone.eu>)
# License AGPL-3.0 or later (https://www.gnu.org/licenses/agpl.html).
from odoo import models, api, fields, _
from odoo.exceptions import UserError
class AccountInvoice(models.Model):
_inherit = "account.invoice"
@api.multi
def action_move_create(self):
previously_validated = self.filtered(lambda inv: inv.move_name)
res = super(AccountInvoice, self).action_move_create()
for inv in self:
if inv.journal_id.check_chronology:
invoices = \
self.search([('state', 'not in',
['open', 'paid', 'cancel', 'proforma',
'proforma2']),
('date_invoice', '!=', False),
('date_invoice', '<', inv.date_invoice),
('journal_id', '=', inv.journal_id.id)],
limit=1)
if invoices:
date_invoice_format = fields.Date.\
from_string(inv.date_invoice)
date_invoice_tz = fields\
.Date.context_today(self, date_invoice_format)
raise UserError(_("Chronology Error. "
"Please confirm older draft "
"invoices before %s and try again.")
% date_invoice_tz)
if inv not in previously_validated:
invoices = self.search([('state', 'in', ['open', 'paid']),
('date_invoice', '>',
inv.date_invoice),
('journal_id', '=',
inv.journal_id.id)],
limit=1)
if invoices:
date_invoice_format = fields.Date.\
from_string(inv.date_invoice)
date_invoice_tz = fields\
.Date.context_today(self, date_invoice_format)
raise UserError(_("Chronology Error. "
"There exist at least one invoice "
"with a later date to %s.") %
date_invoice_tz)
return res
| ddico/account-financial-tools | account_invoice_constraint_chronology/model/account_invoice.py | Python | agpl-3.0 | 2,514 |
import urllib
import urllib2
from django.conf import settings
from django.template.loader import render_to_string
from django.utils import simplejson as json
from django.utils.safestring import mark_safe
DEFAULT_API_SSL_SERVER = "https://www.google.com/recaptcha/api"
DEFAULT_API_SERVER = "http://www.google.com/recaptcha/api"
DEFAULT_VERIFY_SERVER = "www.google.com"
DEFAULT_WIDGET_TEMPLATE = 'captcha/widget.html'
API_SSL_SERVER = getattr(settings, "CAPTCHA_API_SSL_SERVER", \
DEFAULT_API_SSL_SERVER)
API_SERVER = getattr(settings, "CAPTCHA_API_SERVER", DEFAULT_API_SERVER)
VERIFY_SERVER = getattr(settings, "CAPTCHA_VERIFY_SERVER", \
DEFAULT_VERIFY_SERVER)
WIDGET_TEMPLATE = getattr(settings, "CAPTCHA_WIDGET_TEMPLATE", \
DEFAULT_WIDGET_TEMPLATE)
RECAPTCHA_SUPPORTED_LANUAGES = ('en', 'nl', 'fr', 'de', 'pt', 'ru', 'es', 'tr')
class RecaptchaResponse(object):
def __init__(self, is_valid, error_code=None):
self.is_valid = is_valid
self.error_code = error_code
def displayhtml(public_key,
attrs,
use_ssl=False,
error=None):
"""Gets the HTML to display for reCAPTCHA
public_key -- The public api key
use_ssl -- Should the request be sent over ssl?
error -- An error message to display (from RecaptchaResponse.error_code)"""
error_param = ''
if error:
error_param = '&error=%s' % error
if use_ssl:
server = API_SSL_SERVER
else:
server = API_SERVER
if not 'lang' in attrs:
attrs['lang'] = settings.LANGUAGE_CODE[:2]
return render_to_string(WIDGET_TEMPLATE,
{'api_server': server,
'public_key': public_key,
'error_param': error_param,
'options': mark_safe(json.dumps(attrs, indent=2))
})
def submit(recaptcha_challenge_field,
recaptcha_response_field,
private_key,
remoteip,
use_ssl=False):
"""
Submits a reCAPTCHA request for verification. Returns RecaptchaResponse
for the request
recaptcha_challenge_field -- The value of recaptcha_challenge_field
from the form
recaptcha_response_field -- The value of recaptcha_response_field
from the form
private_key -- your reCAPTCHA private key
remoteip -- the user's ip address
"""
if not (recaptcha_response_field and recaptcha_challenge_field and
len(recaptcha_response_field) and len(recaptcha_challenge_field)):
return RecaptchaResponse(
is_valid=False,
error_code='incorrect-captcha-sol'
)
def encode_if_necessary(s):
if isinstance(s, unicode):
return s.encode('utf-8')
return s
params = urllib.urlencode({
'privatekey': encode_if_necessary(private_key),
'remoteip': encode_if_necessary(remoteip),
'challenge': encode_if_necessary(recaptcha_challenge_field),
'response': encode_if_necessary(recaptcha_response_field),
})
if use_ssl:
verify_url = 'https://%s/recaptcha/api/verify' % VERIFY_SERVER
else:
verify_url = 'http://%s/recaptcha/api/verify' % VERIFY_SERVER
request = urllib2.Request(
url=verify_url,
data=params,
headers={
"Content-type": "application/x-www-form-urlencoded",
"User-agent": "reCAPTCHA Python"
}
)
httpresp = urllib2.urlopen(request)
return_values = httpresp.read().splitlines()
httpresp.close()
return_code = return_values[0]
if (return_code == "true"):
return RecaptchaResponse(is_valid=True)
else:
return RecaptchaResponse(is_valid=False, error_code=return_values[1])
| ckprice/bedrock | vendor-local/src/django-recaptcha/captcha/client.py | Python | mpl-2.0 | 3,705 |
# -*- coding: utf-8 -*-
##############################################################################
#
# Modulo realizzato da Andrea Cometa ([email protected])
# Compatible with OpenERP release 6.1.X
# Copyright (C) 2012 Andrea Cometa. All Rights Reserved.
# Email: [email protected]
# Web site: http://www.andreacometa.it
#
##############################################################################
import time
from report import report_sxw
import inspect, os
from datetime import datetime
class account_due_list_webkit(report_sxw.rml_parse):
def __init__(self, cr, uid, name, context):
super(account_due_list_webkit, self).__init__(cr, uid, name, context=context)
file_path = os.path.dirname(inspect.getfile(inspect.currentframe()))
self.localcontext.update({
'datetime': datetime,
'time': time,
'cr':cr,
'uid': uid,
'file_path':file_path,
})
report_sxw.report_sxw('report.account_due_list.scadenzario',
'account.move.line',
'account_due_list_extended/reports/scadenzario.mako',
parser=account_due_list_webkit)
| odoousers2014/LibrERP | account_due_list/reports/parser.py | Python | agpl-3.0 | 1,214 |
#------------------------------------------------------------------------------
# pycparser: c_parser.py
#
# CParser class: Parser and AST builder for the C language
#
# Copyright (C) 2008-2013, Eli Bendersky
# License: BSD
#------------------------------------------------------------------------------
import re
from .ply import yacc
from . import c_ast
from .c_lexer import CLexer
from .plyparser import PLYParser, Coord, ParseError
from .ast_transforms import fix_switch_cases
class CParser(PLYParser):
def __init__(
self,
lex_optimize=True,
lextab='cffi._pycparser.lextab',
yacc_optimize=True,
yacctab='cffi._pycparser.yacctab',
yacc_debug=False):
""" Create a new CParser.
Some arguments for controlling the debug/optimization
level of the parser are provided. The defaults are
tuned for release/performance mode.
The simple rules for using them are:
*) When tweaking CParser/CLexer, set these to False
*) When releasing a stable parser, set to True
lex_optimize:
Set to False when you're modifying the lexer.
Otherwise, changes in the lexer won't be used, if
some lextab.py file exists.
When releasing with a stable lexer, set to True
to save the re-generation of the lexer table on
each run.
lextab:
Points to the lex table that's used for optimized
mode. Only if you're modifying the lexer and want
some tests to avoid re-generating the table, make
this point to a local lex table file (that's been
earlier generated with lex_optimize=True)
yacc_optimize:
Set to False when you're modifying the parser.
Otherwise, changes in the parser won't be used, if
some parsetab.py file exists.
When releasing with a stable parser, set to True
to save the re-generation of the parser table on
each run.
yacctab:
Points to the yacc table that's used for optimized
mode. Only if you're modifying the parser, make
this point to a local yacc table file
yacc_debug:
Generate a parser.out file that explains how yacc
built the parsing table from the grammar.
"""
self.clex = CLexer(
error_func=self._lex_error_func,
on_lbrace_func=self._lex_on_lbrace_func,
on_rbrace_func=self._lex_on_rbrace_func,
type_lookup_func=self._lex_type_lookup_func)
self.clex.build(
optimize=lex_optimize,
lextab=lextab)
self.tokens = self.clex.tokens
rules_with_opt = [
'abstract_declarator',
'assignment_expression',
'declaration_list',
'declaration_specifiers',
'designation',
'expression',
'identifier_list',
'init_declarator_list',
'parameter_type_list',
'specifier_qualifier_list',
'block_item_list',
'type_qualifier_list',
'struct_declarator_list'
]
for rule in rules_with_opt:
self._create_opt_rule(rule)
self.cparser = yacc.yacc(
module=self,
start='translation_unit_or_empty',
debug=yacc_debug,
optimize=yacc_optimize,
tabmodule=yacctab)
# Stack of scopes for keeping track of symbols. _scope_stack[-1] is
# the current (topmost) scope. Each scope is a dictionary that
# specifies whether a name is a type. If _scope_stack[n][name] is
# True, 'name' is currently a type in the scope. If it's False,
# 'name' is used in the scope but not as a type (for instance, if we
# saw: int name;
# If 'name' is not a key in _scope_stack[n] then 'name' was not defined
# in this scope at all.
self._scope_stack = [dict()]
# Keeps track of the last token given to yacc (the lookahead token)
self._last_yielded_token = None
def parse(self, text, filename='', debuglevel=0):
""" Parses C code and returns an AST.
text:
A string containing the C source code
filename:
Name of the file being parsed (for meaningful
error messages)
debuglevel:
Debug level to yacc
"""
self.clex.filename = filename
self.clex.reset_lineno()
self._scope_stack = [dict()]
self._last_yielded_token = None
return self.cparser.parse(
input=text,
lexer=self.clex,
debug=debuglevel)
######################-- PRIVATE --######################
def _push_scope(self):
self._scope_stack.append(dict())
def _pop_scope(self):
assert len(self._scope_stack) > 1
self._scope_stack.pop()
def _add_typedef_name(self, name, coord):
""" Add a new typedef name (ie a TYPEID) to the current scope
"""
if not self._scope_stack[-1].get(name, True):
self._parse_error(
"Typedef %r previously declared as non-typedef "
"in this scope" % name, coord)
self._scope_stack[-1][name] = True
def _add_identifier(self, name, coord):
""" Add a new object, function, or enum member name (ie an ID) to the
current scope
"""
if self._scope_stack[-1].get(name, False):
self._parse_error(
"Non-typedef %r previously declared as typedef "
"in this scope" % name, coord)
self._scope_stack[-1][name] = False
def _is_type_in_scope(self, name):
""" Is *name* a typedef-name in the current scope?
"""
for scope in reversed(self._scope_stack):
# If name is an identifier in this scope it shadows typedefs in
# higher scopes.
in_scope = scope.get(name)
if in_scope is not None: return in_scope
return False
def _lex_error_func(self, msg, line, column):
self._parse_error(msg, self._coord(line, column))
def _lex_on_lbrace_func(self):
self._push_scope()
def _lex_on_rbrace_func(self):
self._pop_scope()
def _lex_type_lookup_func(self, name):
""" Looks up types that were previously defined with
typedef.
Passed to the lexer for recognizing identifiers that
are types.
"""
is_type = self._is_type_in_scope(name)
return is_type
def _get_yacc_lookahead_token(self):
""" We need access to yacc's lookahead token in certain cases.
This is the last token yacc requested from the lexer, so we
ask the lexer.
"""
return self.clex.last_token
# To understand what's going on here, read sections A.8.5 and
# A.8.6 of K&R2 very carefully.
#
# A C type consists of a basic type declaration, with a list
# of modifiers. For example:
#
# int *c[5];
#
# The basic declaration here is 'int c', and the pointer and
# the array are the modifiers.
#
# Basic declarations are represented by TypeDecl (from module
# c_ast) and the modifiers are FuncDecl, PtrDecl and
# ArrayDecl.
#
# The standard states that whenever a new modifier is parsed,
# it should be added to the end of the list of modifiers. For
# example:
#
# K&R2 A.8.6.2: Array Declarators
#
# In a declaration T D where D has the form
# D1 [constant-expression-opt]
# and the type of the identifier in the declaration T D1 is
# "type-modifier T", the type of the
# identifier of D is "type-modifier array of T"
#
# This is what this method does. The declarator it receives
# can be a list of declarators ending with TypeDecl. It
# tacks the modifier to the end of this list, just before
# the TypeDecl.
#
# Additionally, the modifier may be a list itself. This is
# useful for pointers, that can come as a chain from the rule
# p_pointer. In this case, the whole modifier list is spliced
# into the new location.
#
def _type_modify_decl(self, decl, modifier):
""" Tacks a type modifier on a declarator, and returns
the modified declarator.
Note: the declarator and modifier may be modified
"""
#~ print '****'
#~ decl.show(offset=3)
#~ modifier.show(offset=3)
#~ print '****'
modifier_head = modifier
modifier_tail = modifier
# The modifier may be a nested list. Reach its tail.
#
while modifier_tail.type:
modifier_tail = modifier_tail.type
# If the decl is a basic type, just tack the modifier onto
# it
#
if isinstance(decl, c_ast.TypeDecl):
modifier_tail.type = decl
return modifier
else:
# Otherwise, the decl is a list of modifiers. Reach
# its tail and splice the modifier onto the tail,
# pointing to the underlying basic type.
#
decl_tail = decl
while not isinstance(decl_tail.type, c_ast.TypeDecl):
decl_tail = decl_tail.type
modifier_tail.type = decl_tail.type
decl_tail.type = modifier_head
return decl
# Due to the order in which declarators are constructed,
# they have to be fixed in order to look like a normal AST.
#
# When a declaration arrives from syntax construction, it has
# these problems:
# * The innermost TypeDecl has no type (because the basic
# type is only known at the uppermost declaration level)
# * The declaration has no variable name, since that is saved
# in the innermost TypeDecl
# * The typename of the declaration is a list of type
# specifiers, and not a node. Here, basic identifier types
# should be separated from more complex types like enums
# and structs.
#
# This method fixes these problems.
#
def _fix_decl_name_type(self, decl, typename):
""" Fixes a declaration. Modifies decl.
"""
# Reach the underlying basic type
#
type = decl
while not isinstance(type, c_ast.TypeDecl):
type = type.type
decl.name = type.declname
type.quals = decl.quals
# The typename is a list of types. If any type in this
# list isn't an IdentifierType, it must be the only
# type in the list (it's illegal to declare "int enum ..")
# If all the types are basic, they're collected in the
# IdentifierType holder.
#
for tn in typename:
if not isinstance(tn, c_ast.IdentifierType):
if len(typename) > 1:
self._parse_error(
"Invalid multiple types specified", tn.coord)
else:
type.type = tn
return decl
if not typename:
# Functions default to returning int
#
if not isinstance(decl.type, c_ast.FuncDecl):
self._parse_error(
"Missing type in declaration", decl.coord)
type.type = c_ast.IdentifierType(
['int'],
coord=decl.coord)
else:
# At this point, we know that typename is a list of IdentifierType
# nodes. Concatenate all the names into a single list.
#
type.type = c_ast.IdentifierType(
[name for id in typename for name in id.names],
coord=typename[0].coord)
return decl
def _add_declaration_specifier(self, declspec, newspec, kind):
""" Declaration specifiers are represented by a dictionary
with the entries:
* qual: a list of type qualifiers
* storage: a list of storage type qualifiers
* type: a list of type specifiers
* function: a list of function specifiers
This method is given a declaration specifier, and a
new specifier of a given kind.
Returns the declaration specifier, with the new
specifier incorporated.
"""
spec = declspec or dict(qual=[], storage=[], type=[], function=[])
spec[kind].insert(0, newspec)
return spec
def _build_declarations(self, spec, decls, typedef_namespace=False):
""" Builds a list of declarations all sharing the given specifiers.
If typedef_namespace is true, each declared name is added
to the "typedef namespace", which also includes objects,
functions, and enum constants.
"""
is_typedef = 'typedef' in spec['storage']
declarations = []
# Bit-fields are allowed to be unnamed.
#
if decls[0].get('bitsize') is not None:
pass
# When redeclaring typedef names as identifiers in inner scopes, a
# problem can occur where the identifier gets grouped into
# spec['type'], leaving decl as None. This can only occur for the
# first declarator.
#
elif decls[0]['decl'] is None:
if len(spec['type']) < 2 or len(spec['type'][-1].names) != 1 or \
not self._is_type_in_scope(spec['type'][-1].names[0]):
coord = '?'
for t in spec['type']:
if hasattr(t, 'coord'):
coord = t.coord
break
self._parse_error('Invalid declaration', coord)
# Make this look as if it came from "direct_declarator:ID"
decls[0]['decl'] = c_ast.TypeDecl(
declname=spec['type'][-1].names[0],
type=None,
quals=None,
coord=spec['type'][-1].coord)
# Remove the "new" type's name from the end of spec['type']
del spec['type'][-1]
# A similar problem can occur where the declaration ends up looking
# like an abstract declarator. Give it a name if this is the case.
#
elif not isinstance(decls[0]['decl'],
(c_ast.Struct, c_ast.Union, c_ast.IdentifierType)):
decls_0_tail = decls[0]['decl']
while not isinstance(decls_0_tail, c_ast.TypeDecl):
decls_0_tail = decls_0_tail.type
if decls_0_tail.declname is None:
decls_0_tail.declname = spec['type'][-1].names[0]
del spec['type'][-1]
for decl in decls:
assert decl['decl'] is not None
if is_typedef:
declaration = c_ast.Typedef(
name=None,
quals=spec['qual'],
storage=spec['storage'],
type=decl['decl'],
coord=decl['decl'].coord)
else:
declaration = c_ast.Decl(
name=None,
quals=spec['qual'],
storage=spec['storage'],
funcspec=spec['function'],
type=decl['decl'],
init=decl.get('init'),
bitsize=decl.get('bitsize'),
coord=decl['decl'].coord)
if isinstance(declaration.type,
(c_ast.Struct, c_ast.Union, c_ast.IdentifierType)):
fixed_decl = declaration
else:
fixed_decl = self._fix_decl_name_type(declaration, spec['type'])
# Add the type name defined by typedef to a
# symbol table (for usage in the lexer)
#
if typedef_namespace:
if is_typedef:
self._add_typedef_name(fixed_decl.name, fixed_decl.coord)
else:
self._add_identifier(fixed_decl.name, fixed_decl.coord)
declarations.append(fixed_decl)
return declarations
def _build_function_definition(self, spec, decl, param_decls, body):
""" Builds a function definition.
"""
assert 'typedef' not in spec['storage']
declaration = self._build_declarations(
spec=spec,
decls=[dict(decl=decl, init=None)],
typedef_namespace=True)[0]
return c_ast.FuncDef(
decl=declaration,
param_decls=param_decls,
body=body,
coord=decl.coord)
def _select_struct_union_class(self, token):
""" Given a token (either STRUCT or UNION), selects the
appropriate AST class.
"""
if token == 'struct':
return c_ast.Struct
else:
return c_ast.Union
##
## Precedence and associativity of operators
##
precedence = (
('left', 'LOR'),
('left', 'LAND'),
('left', 'OR'),
('left', 'XOR'),
('left', 'AND'),
('left', 'EQ', 'NE'),
('left', 'GT', 'GE', 'LT', 'LE'),
('left', 'RSHIFT', 'LSHIFT'),
('left', 'PLUS', 'MINUS'),
('left', 'TIMES', 'DIVIDE', 'MOD')
)
##
## Grammar productions
## Implementation of the BNF defined in K&R2 A.13
##
# Wrapper around a translation unit, to allow for empty input.
# Not strictly part of the C99 Grammar, but useful in practice.
#
def p_translation_unit_or_empty(self, p):
""" translation_unit_or_empty : translation_unit
| empty
"""
if p[1] is None:
p[0] = c_ast.FileAST([])
else:
p[0] = c_ast.FileAST(p[1])
def p_translation_unit_1(self, p):
""" translation_unit : external_declaration
"""
# Note: external_declaration is already a list
#
p[0] = p[1]
def p_translation_unit_2(self, p):
""" translation_unit : translation_unit external_declaration
"""
if p[2] is not None:
p[1].extend(p[2])
p[0] = p[1]
# Declarations always come as lists (because they can be
# several in one line), so we wrap the function definition
# into a list as well, to make the return value of
# external_declaration homogenous.
#
def p_external_declaration_1(self, p):
""" external_declaration : function_definition
"""
p[0] = [p[1]]
def p_external_declaration_2(self, p):
""" external_declaration : declaration
"""
p[0] = p[1]
def p_external_declaration_3(self, p):
""" external_declaration : pp_directive
"""
p[0] = p[1]
def p_external_declaration_4(self, p):
""" external_declaration : SEMI
"""
p[0] = None
def p_pp_directive(self, p):
""" pp_directive : PPHASH
"""
self._parse_error('Directives not supported yet',
self._coord(p.lineno(1)))
# In function definitions, the declarator can be followed by
# a declaration list, for old "K&R style" function definitios.
#
def p_function_definition_1(self, p):
""" function_definition : declarator declaration_list_opt compound_statement
"""
# no declaration specifiers - 'int' becomes the default type
spec = dict(
qual=[],
storage=[],
type=[c_ast.IdentifierType(['int'],
coord=self._coord(p.lineno(1)))],
function=[])
p[0] = self._build_function_definition(
spec=spec,
decl=p[1],
param_decls=p[2],
body=p[3])
def p_function_definition_2(self, p):
""" function_definition : declaration_specifiers declarator declaration_list_opt compound_statement
"""
spec = p[1]
p[0] = self._build_function_definition(
spec=spec,
decl=p[2],
param_decls=p[3],
body=p[4])
def p_statement(self, p):
""" statement : labeled_statement
| expression_statement
| compound_statement
| selection_statement
| iteration_statement
| jump_statement
"""
p[0] = p[1]
# In C, declarations can come several in a line:
# int x, *px, romulo = 5;
#
# However, for the AST, we will split them to separate Decl
# nodes.
#
# This rule splits its declarations and always returns a list
# of Decl nodes, even if it's one element long.
#
def p_decl_body(self, p):
""" decl_body : declaration_specifiers init_declarator_list_opt
"""
spec = p[1]
# p[2] (init_declarator_list_opt) is either a list or None
#
if p[2] is None:
# By the standard, you must have at least one declarator unless
# declaring a structure tag, a union tag, or the members of an
# enumeration.
#
ty = spec['type']
s_u_or_e = (c_ast.Struct, c_ast.Union, c_ast.Enum)
if len(ty) == 1 and isinstance(ty[0], s_u_or_e):
decls = [c_ast.Decl(
name=None,
quals=spec['qual'],
storage=spec['storage'],
funcspec=spec['function'],
type=ty[0],
init=None,
bitsize=None,
coord=ty[0].coord)]
# However, this case can also occur on redeclared identifiers in
# an inner scope. The trouble is that the redeclared type's name
# gets grouped into declaration_specifiers; _build_declarations
# compensates for this.
#
else:
decls = self._build_declarations(
spec=spec,
decls=[dict(decl=None, init=None)],
typedef_namespace=True)
else:
decls = self._build_declarations(
spec=spec,
decls=p[2],
typedef_namespace=True)
p[0] = decls
# The declaration has been split to a decl_body sub-rule and
# SEMI, because having them in a single rule created a problem
# for defining typedefs.
#
# If a typedef line was directly followed by a line using the
# type defined with the typedef, the type would not be
# recognized. This is because to reduce the declaration rule,
# the parser's lookahead asked for the token after SEMI, which
# was the type from the next line, and the lexer had no chance
# to see the updated type symbol table.
#
# Splitting solves this problem, because after seeing SEMI,
# the parser reduces decl_body, which actually adds the new
# type into the table to be seen by the lexer before the next
# line is reached.
def p_declaration(self, p):
""" declaration : decl_body SEMI
"""
p[0] = p[1]
# Since each declaration is a list of declarations, this
# rule will combine all the declarations and return a single
# list
#
def p_declaration_list(self, p):
""" declaration_list : declaration
| declaration_list declaration
"""
p[0] = p[1] if len(p) == 2 else p[1] + p[2]
def p_declaration_specifiers_1(self, p):
""" declaration_specifiers : type_qualifier declaration_specifiers_opt
"""
p[0] = self._add_declaration_specifier(p[2], p[1], 'qual')
def p_declaration_specifiers_2(self, p):
""" declaration_specifiers : type_specifier declaration_specifiers_opt
"""
p[0] = self._add_declaration_specifier(p[2], p[1], 'type')
def p_declaration_specifiers_3(self, p):
""" declaration_specifiers : storage_class_specifier declaration_specifiers_opt
"""
p[0] = self._add_declaration_specifier(p[2], p[1], 'storage')
def p_declaration_specifiers_4(self, p):
""" declaration_specifiers : function_specifier declaration_specifiers_opt
"""
p[0] = self._add_declaration_specifier(p[2], p[1], 'function')
def p_storage_class_specifier(self, p):
""" storage_class_specifier : AUTO
| REGISTER
| STATIC
| EXTERN
| TYPEDEF
"""
p[0] = p[1]
def p_function_specifier(self, p):
""" function_specifier : INLINE
"""
p[0] = p[1]
def p_type_specifier_1(self, p):
""" type_specifier : VOID
| _BOOL
| CHAR
| SHORT
| INT
| LONG
| FLOAT
| DOUBLE
| _COMPLEX
| SIGNED
| UNSIGNED
"""
p[0] = c_ast.IdentifierType([p[1]], coord=self._coord(p.lineno(1)))
def p_type_specifier_2(self, p):
""" type_specifier : typedef_name
| enum_specifier
| struct_or_union_specifier
"""
p[0] = p[1]
def p_type_qualifier(self, p):
""" type_qualifier : CONST
| RESTRICT
| VOLATILE
"""
p[0] = p[1]
def p_init_declarator_list_1(self, p):
""" init_declarator_list : init_declarator
| init_declarator_list COMMA init_declarator
"""
p[0] = p[1] + [p[3]] if len(p) == 4 else [p[1]]
# If the code is declaring a variable that was declared a typedef in an
# outer scope, yacc will think the name is part of declaration_specifiers,
# not init_declarator, and will then get confused by EQUALS. Pass None
# up in place of declarator, and handle this at a higher level.
#
def p_init_declarator_list_2(self, p):
""" init_declarator_list : EQUALS initializer
"""
p[0] = [dict(decl=None, init=p[2])]
# Similarly, if the code contains duplicate typedefs of, for example,
# array types, the array portion will appear as an abstract declarator.
#
def p_init_declarator_list_3(self, p):
""" init_declarator_list : abstract_declarator
"""
p[0] = [dict(decl=p[1], init=None)]
# Returns a {decl=<declarator> : init=<initializer>} dictionary
# If there's no initializer, uses None
#
def p_init_declarator(self, p):
""" init_declarator : declarator
| declarator EQUALS initializer
"""
p[0] = dict(decl=p[1], init=(p[3] if len(p) > 2 else None))
def p_specifier_qualifier_list_1(self, p):
""" specifier_qualifier_list : type_qualifier specifier_qualifier_list_opt
"""
p[0] = self._add_declaration_specifier(p[2], p[1], 'qual')
def p_specifier_qualifier_list_2(self, p):
""" specifier_qualifier_list : type_specifier specifier_qualifier_list_opt
"""
p[0] = self._add_declaration_specifier(p[2], p[1], 'type')
# TYPEID is allowed here (and in other struct/enum related tag names), because
# struct/enum tags reside in their own namespace and can be named the same as types
#
def p_struct_or_union_specifier_1(self, p):
""" struct_or_union_specifier : struct_or_union ID
| struct_or_union TYPEID
"""
klass = self._select_struct_union_class(p[1])
p[0] = klass(
name=p[2],
decls=None,
coord=self._coord(p.lineno(2)))
def p_struct_or_union_specifier_2(self, p):
""" struct_or_union_specifier : struct_or_union brace_open struct_declaration_list brace_close
"""
klass = self._select_struct_union_class(p[1])
p[0] = klass(
name=None,
decls=p[3],
coord=self._coord(p.lineno(2)))
def p_struct_or_union_specifier_3(self, p):
""" struct_or_union_specifier : struct_or_union ID brace_open struct_declaration_list brace_close
| struct_or_union TYPEID brace_open struct_declaration_list brace_close
"""
klass = self._select_struct_union_class(p[1])
p[0] = klass(
name=p[2],
decls=p[4],
coord=self._coord(p.lineno(2)))
def p_struct_or_union(self, p):
""" struct_or_union : STRUCT
| UNION
"""
p[0] = p[1]
# Combine all declarations into a single list
#
def p_struct_declaration_list(self, p):
""" struct_declaration_list : struct_declaration
| struct_declaration_list struct_declaration
"""
p[0] = p[1] if len(p) == 2 else p[1] + p[2]
def p_struct_declaration_1(self, p):
""" struct_declaration : specifier_qualifier_list struct_declarator_list_opt SEMI
"""
spec = p[1]
assert 'typedef' not in spec['storage']
if p[2] is not None:
decls = self._build_declarations(
spec=spec,
decls=p[2])
elif len(spec['type']) == 1:
# Anonymous struct/union, gcc extension, C1x feature.
# Although the standard only allows structs/unions here, I see no
# reason to disallow other types since some compilers have typedefs
# here, and pycparser isn't about rejecting all invalid code.
#
node = spec['type'][0]
if isinstance(node, c_ast.Node):
decl_type = node
else:
decl_type = c_ast.IdentifierType(node)
decls = self._build_declarations(
spec=spec,
decls=[dict(decl=decl_type)])
else:
# Structure/union members can have the same names as typedefs.
# The trouble is that the member's name gets grouped into
# specifier_qualifier_list; _build_declarations compensates.
#
decls = self._build_declarations(
spec=spec,
decls=[dict(decl=None, init=None)])
p[0] = decls
def p_struct_declaration_2(self, p):
""" struct_declaration : specifier_qualifier_list abstract_declarator SEMI
"""
# "Abstract declarator?!", you ask? Structure members can have the
# same names as typedefs. The trouble is that the member's name gets
# grouped into specifier_qualifier_list, leaving any remainder to
# appear as an abstract declarator, as in:
# typedef int Foo;
# struct { Foo Foo[3]; };
#
p[0] = self._build_declarations(
spec=p[1],
decls=[dict(decl=p[2], init=None)])
def p_struct_declarator_list(self, p):
""" struct_declarator_list : struct_declarator
| struct_declarator_list COMMA struct_declarator
"""
p[0] = p[1] + [p[3]] if len(p) == 4 else [p[1]]
# struct_declarator passes up a dict with the keys: decl (for
# the underlying declarator) and bitsize (for the bitsize)
#
def p_struct_declarator_1(self, p):
""" struct_declarator : declarator
"""
p[0] = {'decl': p[1], 'bitsize': None}
def p_struct_declarator_2(self, p):
""" struct_declarator : declarator COLON constant_expression
| COLON constant_expression
"""
if len(p) > 3:
p[0] = {'decl': p[1], 'bitsize': p[3]}
else:
p[0] = {'decl': c_ast.TypeDecl(None, None, None), 'bitsize': p[2]}
def p_enum_specifier_1(self, p):
""" enum_specifier : ENUM ID
| ENUM TYPEID
"""
p[0] = c_ast.Enum(p[2], None, self._coord(p.lineno(1)))
def p_enum_specifier_2(self, p):
""" enum_specifier : ENUM brace_open enumerator_list brace_close
"""
p[0] = c_ast.Enum(None, p[3], self._coord(p.lineno(1)))
def p_enum_specifier_3(self, p):
""" enum_specifier : ENUM ID brace_open enumerator_list brace_close
| ENUM TYPEID brace_open enumerator_list brace_close
"""
p[0] = c_ast.Enum(p[2], p[4], self._coord(p.lineno(1)))
def p_enumerator_list(self, p):
""" enumerator_list : enumerator
| enumerator_list COMMA
| enumerator_list COMMA enumerator
"""
if len(p) == 2:
p[0] = c_ast.EnumeratorList([p[1]], p[1].coord)
elif len(p) == 3:
p[0] = p[1]
else:
p[1].enumerators.append(p[3])
p[0] = p[1]
def p_enumerator(self, p):
""" enumerator : ID
| ID EQUALS constant_expression
"""
if len(p) == 2:
enumerator = c_ast.Enumerator(
p[1], None,
self._coord(p.lineno(1)))
else:
enumerator = c_ast.Enumerator(
p[1], p[3],
self._coord(p.lineno(1)))
self._add_identifier(enumerator.name, enumerator.coord)
p[0] = enumerator
def p_declarator_1(self, p):
""" declarator : direct_declarator
"""
p[0] = p[1]
def p_declarator_2(self, p):
""" declarator : pointer direct_declarator
"""
p[0] = self._type_modify_decl(p[2], p[1])
# Since it's impossible for a type to be specified after a pointer, assume
# it's intended to be the name for this declaration. _add_identifier will
# raise an error if this TYPEID can't be redeclared.
#
def p_declarator_3(self, p):
""" declarator : pointer TYPEID
"""
decl = c_ast.TypeDecl(
declname=p[2],
type=None,
quals=None,
coord=self._coord(p.lineno(2)))
p[0] = self._type_modify_decl(decl, p[1])
def p_direct_declarator_1(self, p):
""" direct_declarator : ID
"""
p[0] = c_ast.TypeDecl(
declname=p[1],
type=None,
quals=None,
coord=self._coord(p.lineno(1)))
def p_direct_declarator_2(self, p):
""" direct_declarator : LPAREN declarator RPAREN
"""
p[0] = p[2]
def p_direct_declarator_3(self, p):
""" direct_declarator : direct_declarator LBRACKET assignment_expression_opt RBRACKET
"""
arr = c_ast.ArrayDecl(
type=None,
dim=p[3],
coord=p[1].coord)
p[0] = self._type_modify_decl(decl=p[1], modifier=arr)
# Special for VLAs
#
def p_direct_declarator_4(self, p):
""" direct_declarator : direct_declarator LBRACKET TIMES RBRACKET
"""
arr = c_ast.ArrayDecl(
type=None,
dim=c_ast.ID(p[3], self._coord(p.lineno(3))),
coord=p[1].coord)
p[0] = self._type_modify_decl(decl=p[1], modifier=arr)
def p_direct_declarator_5(self, p):
""" direct_declarator : direct_declarator LPAREN parameter_type_list RPAREN
| direct_declarator LPAREN identifier_list_opt RPAREN
"""
func = c_ast.FuncDecl(
args=p[3],
type=None,
coord=p[1].coord)
# To see why _get_yacc_lookahead_token is needed, consider:
# typedef char TT;
# void foo(int TT) { TT = 10; }
# Outside the function, TT is a typedef, but inside (starting and
# ending with the braces) it's a parameter. The trouble begins with
# yacc's lookahead token. We don't know if we're declaring or
# defining a function until we see LBRACE, but if we wait for yacc to
# trigger a rule on that token, then TT will have already been read
# and incorrectly interpreted as TYPEID. We need to add the
# parameters to the scope the moment the lexer sees LBRACE.
#
if self._get_yacc_lookahead_token().type == "LBRACE":
if func.args is not None:
for param in func.args.params:
if isinstance(param, c_ast.EllipsisParam): break
self._add_identifier(param.name, param.coord)
p[0] = self._type_modify_decl(decl=p[1], modifier=func)
def p_pointer(self, p):
""" pointer : TIMES type_qualifier_list_opt
| TIMES type_qualifier_list_opt pointer
"""
coord = self._coord(p.lineno(1))
p[0] = c_ast.PtrDecl(
quals=p[2] or [],
type=p[3] if len(p) > 3 else None,
coord=coord)
def p_type_qualifier_list(self, p):
""" type_qualifier_list : type_qualifier
| type_qualifier_list type_qualifier
"""
p[0] = [p[1]] if len(p) == 2 else p[1] + [p[2]]
def p_parameter_type_list(self, p):
""" parameter_type_list : parameter_list
| parameter_list COMMA ELLIPSIS
"""
if len(p) > 2:
p[1].params.append(c_ast.EllipsisParam(self._coord(p.lineno(3))))
p[0] = p[1]
def p_parameter_list(self, p):
""" parameter_list : parameter_declaration
| parameter_list COMMA parameter_declaration
"""
if len(p) == 2: # single parameter
p[0] = c_ast.ParamList([p[1]], p[1].coord)
else:
p[1].params.append(p[3])
p[0] = p[1]
def p_parameter_declaration_1(self, p):
""" parameter_declaration : declaration_specifiers declarator
"""
spec = p[1]
if not spec['type']:
spec['type'] = [c_ast.IdentifierType(['int'],
coord=self._coord(p.lineno(1)))]
p[0] = self._build_declarations(
spec=spec,
decls=[dict(decl=p[2])])[0]
def p_parameter_declaration_2(self, p):
""" parameter_declaration : declaration_specifiers abstract_declarator_opt
"""
spec = p[1]
if not spec['type']:
spec['type'] = [c_ast.IdentifierType(['int'],
coord=self._coord(p.lineno(1)))]
# Parameters can have the same names as typedefs. The trouble is that
# the parameter's name gets grouped into declaration_specifiers, making
# it look like an old-style declaration; compensate.
#
if len(spec['type']) > 1 and len(spec['type'][-1].names) == 1 and \
self._is_type_in_scope(spec['type'][-1].names[0]):
decl = self._build_declarations(
spec=spec,
decls=[dict(decl=p[2], init=None)])[0]
# This truly is an old-style parameter declaration
#
else:
decl = c_ast.Typename(
quals=spec['qual'],
type=p[2] or c_ast.TypeDecl(None, None, None),
coord=self._coord(p.lineno(2)))
typename = spec['type']
decl = self._fix_decl_name_type(decl, typename)
p[0] = decl
def p_identifier_list(self, p):
""" identifier_list : identifier
| identifier_list COMMA identifier
"""
if len(p) == 2: # single parameter
p[0] = c_ast.ParamList([p[1]], p[1].coord)
else:
p[1].params.append(p[3])
p[0] = p[1]
def p_initializer_1(self, p):
""" initializer : assignment_expression
"""
p[0] = p[1]
def p_initializer_2(self, p):
""" initializer : brace_open initializer_list brace_close
| brace_open initializer_list COMMA brace_close
"""
p[0] = p[2]
def p_initializer_list(self, p):
""" initializer_list : designation_opt initializer
| initializer_list COMMA designation_opt initializer
"""
if len(p) == 3: # single initializer
init = p[2] if p[1] is None else c_ast.NamedInitializer(p[1], p[2])
p[0] = c_ast.InitList([init], p[2].coord)
else:
init = p[4] if p[3] is None else c_ast.NamedInitializer(p[3], p[4])
p[1].exprs.append(init)
p[0] = p[1]
def p_designation(self, p):
""" designation : designator_list EQUALS
"""
p[0] = p[1]
# Designators are represented as a list of nodes, in the order in which
# they're written in the code.
#
def p_designator_list(self, p):
""" designator_list : designator
| designator_list designator
"""
p[0] = [p[1]] if len(p) == 2 else p[1] + [p[2]]
def p_designator(self, p):
""" designator : LBRACKET constant_expression RBRACKET
| PERIOD identifier
"""
p[0] = p[2]
def p_type_name(self, p):
""" type_name : specifier_qualifier_list abstract_declarator_opt
"""
#~ print '=========='
#~ print p[1]
#~ print p[2]
#~ print p[2].children()
#~ print '=========='
typename = c_ast.Typename(
quals=p[1]['qual'],
type=p[2] or c_ast.TypeDecl(None, None, None),
coord=self._coord(p.lineno(2)))
p[0] = self._fix_decl_name_type(typename, p[1]['type'])
def p_abstract_declarator_1(self, p):
""" abstract_declarator : pointer
"""
dummytype = c_ast.TypeDecl(None, None, None)
p[0] = self._type_modify_decl(
decl=dummytype,
modifier=p[1])
def p_abstract_declarator_2(self, p):
""" abstract_declarator : pointer direct_abstract_declarator
"""
p[0] = self._type_modify_decl(p[2], p[1])
def p_abstract_declarator_3(self, p):
""" abstract_declarator : direct_abstract_declarator
"""
p[0] = p[1]
# Creating and using direct_abstract_declarator_opt here
# instead of listing both direct_abstract_declarator and the
# lack of it in the beginning of _1 and _2 caused two
# shift/reduce errors.
#
def p_direct_abstract_declarator_1(self, p):
""" direct_abstract_declarator : LPAREN abstract_declarator RPAREN """
p[0] = p[2]
def p_direct_abstract_declarator_2(self, p):
""" direct_abstract_declarator : direct_abstract_declarator LBRACKET assignment_expression_opt RBRACKET
"""
arr = c_ast.ArrayDecl(
type=None,
dim=p[3],
coord=p[1].coord)
p[0] = self._type_modify_decl(decl=p[1], modifier=arr)
def p_direct_abstract_declarator_3(self, p):
""" direct_abstract_declarator : LBRACKET assignment_expression_opt RBRACKET
"""
p[0] = c_ast.ArrayDecl(
type=c_ast.TypeDecl(None, None, None),
dim=p[2],
coord=self._coord(p.lineno(1)))
def p_direct_abstract_declarator_4(self, p):
""" direct_abstract_declarator : direct_abstract_declarator LBRACKET TIMES RBRACKET
"""
arr = c_ast.ArrayDecl(
type=None,
dim=c_ast.ID(p[3], self._coord(p.lineno(3))),
coord=p[1].coord)
p[0] = self._type_modify_decl(decl=p[1], modifier=arr)
def p_direct_abstract_declarator_5(self, p):
""" direct_abstract_declarator : LBRACKET TIMES RBRACKET
"""
p[0] = c_ast.ArrayDecl(
type=c_ast.TypeDecl(None, None, None),
dim=c_ast.ID(p[3], self._coord(p.lineno(3))),
coord=self._coord(p.lineno(1)))
def p_direct_abstract_declarator_6(self, p):
""" direct_abstract_declarator : direct_abstract_declarator LPAREN parameter_type_list_opt RPAREN
"""
func = c_ast.FuncDecl(
args=p[3],
type=None,
coord=p[1].coord)
p[0] = self._type_modify_decl(decl=p[1], modifier=func)
def p_direct_abstract_declarator_7(self, p):
""" direct_abstract_declarator : LPAREN parameter_type_list_opt RPAREN
"""
p[0] = c_ast.FuncDecl(
args=p[2],
type=c_ast.TypeDecl(None, None, None),
coord=self._coord(p.lineno(1)))
# declaration is a list, statement isn't. To make it consistent, block_item
# will always be a list
#
def p_block_item(self, p):
""" block_item : declaration
| statement
"""
p[0] = p[1] if isinstance(p[1], list) else [p[1]]
# Since we made block_item a list, this just combines lists
#
def p_block_item_list(self, p):
""" block_item_list : block_item
| block_item_list block_item
"""
# Empty block items (plain ';') produce [None], so ignore them
p[0] = p[1] if (len(p) == 2 or p[2] == [None]) else p[1] + p[2]
def p_compound_statement_1(self, p):
""" compound_statement : brace_open block_item_list_opt brace_close """
p[0] = c_ast.Compound(
block_items=p[2],
coord=self._coord(p.lineno(1)))
def p_labeled_statement_1(self, p):
""" labeled_statement : ID COLON statement """
p[0] = c_ast.Label(p[1], p[3], self._coord(p.lineno(1)))
def p_labeled_statement_2(self, p):
""" labeled_statement : CASE constant_expression COLON statement """
p[0] = c_ast.Case(p[2], [p[4]], self._coord(p.lineno(1)))
def p_labeled_statement_3(self, p):
""" labeled_statement : DEFAULT COLON statement """
p[0] = c_ast.Default([p[3]], self._coord(p.lineno(1)))
def p_selection_statement_1(self, p):
""" selection_statement : IF LPAREN expression RPAREN statement """
p[0] = c_ast.If(p[3], p[5], None, self._coord(p.lineno(1)))
def p_selection_statement_2(self, p):
""" selection_statement : IF LPAREN expression RPAREN statement ELSE statement """
p[0] = c_ast.If(p[3], p[5], p[7], self._coord(p.lineno(1)))
def p_selection_statement_3(self, p):
""" selection_statement : SWITCH LPAREN expression RPAREN statement """
p[0] = fix_switch_cases(
c_ast.Switch(p[3], p[5], self._coord(p.lineno(1))))
def p_iteration_statement_1(self, p):
""" iteration_statement : WHILE LPAREN expression RPAREN statement """
p[0] = c_ast.While(p[3], p[5], self._coord(p.lineno(1)))
def p_iteration_statement_2(self, p):
""" iteration_statement : DO statement WHILE LPAREN expression RPAREN SEMI """
p[0] = c_ast.DoWhile(p[5], p[2], self._coord(p.lineno(1)))
def p_iteration_statement_3(self, p):
""" iteration_statement : FOR LPAREN expression_opt SEMI expression_opt SEMI expression_opt RPAREN statement """
p[0] = c_ast.For(p[3], p[5], p[7], p[9], self._coord(p.lineno(1)))
def p_iteration_statement_4(self, p):
""" iteration_statement : FOR LPAREN declaration expression_opt SEMI expression_opt RPAREN statement """
p[0] = c_ast.For(c_ast.DeclList(p[3]), p[4], p[6], p[8], self._coord(p.lineno(1)))
def p_jump_statement_1(self, p):
""" jump_statement : GOTO ID SEMI """
p[0] = c_ast.Goto(p[2], self._coord(p.lineno(1)))
def p_jump_statement_2(self, p):
""" jump_statement : BREAK SEMI """
p[0] = c_ast.Break(self._coord(p.lineno(1)))
def p_jump_statement_3(self, p):
""" jump_statement : CONTINUE SEMI """
p[0] = c_ast.Continue(self._coord(p.lineno(1)))
def p_jump_statement_4(self, p):
""" jump_statement : RETURN expression SEMI
| RETURN SEMI
"""
p[0] = c_ast.Return(p[2] if len(p) == 4 else None, self._coord(p.lineno(1)))
def p_expression_statement(self, p):
""" expression_statement : expression_opt SEMI """
if p[1] is None:
p[0] = c_ast.EmptyStatement(self._coord(p.lineno(1)))
else:
p[0] = p[1]
def p_expression(self, p):
""" expression : assignment_expression
| expression COMMA assignment_expression
"""
if len(p) == 2:
p[0] = p[1]
else:
if not isinstance(p[1], c_ast.ExprList):
p[1] = c_ast.ExprList([p[1]], p[1].coord)
p[1].exprs.append(p[3])
p[0] = p[1]
def p_typedef_name(self, p):
""" typedef_name : TYPEID """
p[0] = c_ast.IdentifierType([p[1]], coord=self._coord(p.lineno(1)))
def p_assignment_expression(self, p):
""" assignment_expression : conditional_expression
| unary_expression assignment_operator assignment_expression
"""
if len(p) == 2:
p[0] = p[1]
else:
p[0] = c_ast.Assignment(p[2], p[1], p[3], p[1].coord)
# K&R2 defines these as many separate rules, to encode
# precedence and associativity. Why work hard ? I'll just use
# the built in precedence/associativity specification feature
# of PLY. (see precedence declaration above)
#
def p_assignment_operator(self, p):
""" assignment_operator : EQUALS
| XOREQUAL
| TIMESEQUAL
| DIVEQUAL
| MODEQUAL
| PLUSEQUAL
| MINUSEQUAL
| LSHIFTEQUAL
| RSHIFTEQUAL
| ANDEQUAL
| OREQUAL
"""
p[0] = p[1]
def p_constant_expression(self, p):
""" constant_expression : conditional_expression """
p[0] = p[1]
def p_conditional_expression(self, p):
""" conditional_expression : binary_expression
| binary_expression CONDOP expression COLON conditional_expression
"""
if len(p) == 2:
p[0] = p[1]
else:
p[0] = c_ast.TernaryOp(p[1], p[3], p[5], p[1].coord)
def p_binary_expression(self, p):
""" binary_expression : cast_expression
| binary_expression TIMES binary_expression
| binary_expression DIVIDE binary_expression
| binary_expression MOD binary_expression
| binary_expression PLUS binary_expression
| binary_expression MINUS binary_expression
| binary_expression RSHIFT binary_expression
| binary_expression LSHIFT binary_expression
| binary_expression LT binary_expression
| binary_expression LE binary_expression
| binary_expression GE binary_expression
| binary_expression GT binary_expression
| binary_expression EQ binary_expression
| binary_expression NE binary_expression
| binary_expression AND binary_expression
| binary_expression OR binary_expression
| binary_expression XOR binary_expression
| binary_expression LAND binary_expression
| binary_expression LOR binary_expression
"""
if len(p) == 2:
p[0] = p[1]
else:
p[0] = c_ast.BinaryOp(p[2], p[1], p[3], p[1].coord)
def p_cast_expression_1(self, p):
""" cast_expression : unary_expression """
p[0] = p[1]
def p_cast_expression_2(self, p):
""" cast_expression : LPAREN type_name RPAREN cast_expression """
p[0] = c_ast.Cast(p[2], p[4], self._coord(p.lineno(1)))
def p_unary_expression_1(self, p):
""" unary_expression : postfix_expression """
p[0] = p[1]
def p_unary_expression_2(self, p):
""" unary_expression : PLUSPLUS unary_expression
| MINUSMINUS unary_expression
| unary_operator cast_expression
"""
p[0] = c_ast.UnaryOp(p[1], p[2], p[2].coord)
def p_unary_expression_3(self, p):
""" unary_expression : SIZEOF unary_expression
| SIZEOF LPAREN type_name RPAREN
"""
p[0] = c_ast.UnaryOp(
p[1],
p[2] if len(p) == 3 else p[3],
self._coord(p.lineno(1)))
def p_unary_operator(self, p):
""" unary_operator : AND
| TIMES
| PLUS
| MINUS
| NOT
| LNOT
"""
p[0] = p[1]
def p_postfix_expression_1(self, p):
""" postfix_expression : primary_expression """
p[0] = p[1]
def p_postfix_expression_2(self, p):
""" postfix_expression : postfix_expression LBRACKET expression RBRACKET """
p[0] = c_ast.ArrayRef(p[1], p[3], p[1].coord)
def p_postfix_expression_3(self, p):
""" postfix_expression : postfix_expression LPAREN argument_expression_list RPAREN
| postfix_expression LPAREN RPAREN
"""
p[0] = c_ast.FuncCall(p[1], p[3] if len(p) == 5 else None, p[1].coord)
def p_postfix_expression_4(self, p):
""" postfix_expression : postfix_expression PERIOD ID
| postfix_expression PERIOD TYPEID
| postfix_expression ARROW ID
| postfix_expression ARROW TYPEID
"""
field = c_ast.ID(p[3], self._coord(p.lineno(3)))
p[0] = c_ast.StructRef(p[1], p[2], field, p[1].coord)
def p_postfix_expression_5(self, p):
""" postfix_expression : postfix_expression PLUSPLUS
| postfix_expression MINUSMINUS
"""
p[0] = c_ast.UnaryOp('p' + p[2], p[1], p[1].coord)
def p_postfix_expression_6(self, p):
""" postfix_expression : LPAREN type_name RPAREN brace_open initializer_list brace_close
| LPAREN type_name RPAREN brace_open initializer_list COMMA brace_close
"""
p[0] = c_ast.CompoundLiteral(p[2], p[5])
def p_primary_expression_1(self, p):
""" primary_expression : identifier """
p[0] = p[1]
def p_primary_expression_2(self, p):
""" primary_expression : constant """
p[0] = p[1]
def p_primary_expression_3(self, p):
""" primary_expression : unified_string_literal
| unified_wstring_literal
"""
p[0] = p[1]
def p_primary_expression_4(self, p):
""" primary_expression : LPAREN expression RPAREN """
p[0] = p[2]
def p_argument_expression_list(self, p):
""" argument_expression_list : assignment_expression
| argument_expression_list COMMA assignment_expression
"""
if len(p) == 2: # single expr
p[0] = c_ast.ExprList([p[1]], p[1].coord)
else:
p[1].exprs.append(p[3])
p[0] = p[1]
def p_identifier(self, p):
""" identifier : ID """
p[0] = c_ast.ID(p[1], self._coord(p.lineno(1)))
def p_constant_1(self, p):
""" constant : INT_CONST_DEC
| INT_CONST_OCT
| INT_CONST_HEX
"""
p[0] = c_ast.Constant(
'int', p[1], self._coord(p.lineno(1)))
def p_constant_2(self, p):
""" constant : FLOAT_CONST
| HEX_FLOAT_CONST
"""
p[0] = c_ast.Constant(
'float', p[1], self._coord(p.lineno(1)))
def p_constant_3(self, p):
""" constant : CHAR_CONST
| WCHAR_CONST
"""
p[0] = c_ast.Constant(
'char', p[1], self._coord(p.lineno(1)))
# The "unified" string and wstring literal rules are for supporting
# concatenation of adjacent string literals.
# I.e. "hello " "world" is seen by the C compiler as a single string literal
# with the value "hello world"
#
def p_unified_string_literal(self, p):
""" unified_string_literal : STRING_LITERAL
| unified_string_literal STRING_LITERAL
"""
if len(p) == 2: # single literal
p[0] = c_ast.Constant(
'string', p[1], self._coord(p.lineno(1)))
else:
p[1].value = p[1].value[:-1] + p[2][1:]
p[0] = p[1]
def p_unified_wstring_literal(self, p):
""" unified_wstring_literal : WSTRING_LITERAL
| unified_wstring_literal WSTRING_LITERAL
"""
if len(p) == 2: # single literal
p[0] = c_ast.Constant(
'string', p[1], self._coord(p.lineno(1)))
else:
p[1].value = p[1].value.rstrip[:-1] + p[2][1:]
p[0] = p[1]
def p_brace_open(self, p):
""" brace_open : LBRACE
"""
p[0] = p[1]
def p_brace_close(self, p):
""" brace_close : RBRACE
"""
p[0] = p[1]
def p_empty(self, p):
'empty : '
p[0] = None
def p_error(self, p):
# If error recovery is added here in the future, make sure
# _get_yacc_lookahead_token still works!
#
if p:
self._parse_error(
'before: %s' % p.value,
self._coord(lineno=p.lineno,
column=self.clex.find_tok_column(p)))
else:
self._parse_error('At end of input', '')
#------------------------------------------------------------------------------
if __name__ == "__main__":
import pprint
import time, sys
#t1 = time.time()
#parser = CParser(lex_optimize=True, yacc_debug=True, yacc_optimize=False)
#sys.write(time.time() - t1)
#buf = '''
#int (*k)(int);
#'''
## set debuglevel to 2 for debugging
#t = parser.parse(buf, 'x.c', debuglevel=0)
#t.show(showcoord=True)
| BartoszCichecki/onlinepython | onlinepython/pypy-2.4.0-win32/lib_pypy/cffi/_pycparser/c_parser.py | Python | gpl-2.0 | 59,384 |
def extractThatbadtranslatorWordpressCom(item):
'''
Parser for 'thatbadtranslator.wordpress.com'
'''
vol, chp, frag, postfix = extractVolChapterFragmentPostfix(item['title'])
if not (chp or vol) or "preview" in item['title'].lower():
return None
tagmap = [
('PRC', 'PRC', 'translated'),
('Loiterous', 'Loiterous', 'oel'),
]
for tagname, name, tl_type in tagmap:
if tagname in item['tags']:
return buildReleaseMessageWithType(item, name, vol, chp, frag=frag, postfix=postfix, tl_type=tl_type)
return False
| fake-name/ReadableWebProxy | WebMirror/management/rss_parser_funcs/feed_parse_extractThatbadtranslatorWordpressCom.py | Python | bsd-3-clause | 574 |
#!/usr/bin/env python
# external
import numpy as np
from sympy import symbols
from numpy.testing import assert_allclose
# local
from ..shapes import *
def test_shape():
shape = Shape(name='shape', color='blue')
assert shape.name == 'shape'
assert shape.__str__() == 'Shape shape color:blue'
assert shape.__repr__() == 'Shape'
assert shape.color == 'blue'
shape.name = 'shape1'
assert shape.name == 'shape1'
shape.color = 'red'
assert shape.color == 'red'
assert shape.generate_dict() == {"color": "red",
"type": "Shape",
"name": "shape1"}
assert isinstance(shape, Shape)
#testing unnamed
shape_ = Shape(color='blue')
assert shape_.name == 'unnamed'
assert shape_.__str__() == 'Shape unnamed color:blue'
assert shape_.__repr__() == 'Shape'
def test_shape_geometry_with_expressions():
shape = Shape()
shape.length = symbols('l')
shape.geometry_attrs.append('length')
expected = {"color": "grey",
"type": "Shape",
"name": "unnamed",
"length": 16.0}
actual = shape.generate_dict(constant_map={symbols('l'): 16.0})
assert actual == expected
shape = Shape()
shape.length = symbols('l1') + symbols('l2') ** 2
shape.geometry_attrs.append('length')
expected = {"color": "grey",
"type": "Shape",
"name": "unnamed",
"length": 20.0}
actual = shape.generate_dict(constant_map={symbols('l1'): 4.0,
symbols('l2'): 4.0})
assert actual == expected
def test_cube():
cube = Cube(10.0, name='cube', color='blue')
assert cube.name == 'cube'
assert cube.__str__() == 'Cube cube color:blue length:10.0'
assert cube.__repr__() == 'Cube'
assert cube.length == 10.0
assert cube.color == 'blue'
cube.name = 'cube1'
assert cube.name == 'cube1'
cube.length = 16.0
assert cube.length == 16.0
cube.color = 'red'
assert cube.color == 'red'
assert cube.generate_dict() == {"color": "red",
"type": "Cube",
"name": "cube1",
"length": 16.0}
assert isinstance(cube, Shape)
#testing unnamed
cube = Cube(10.0, color='blue')
assert cube.name == 'unnamed'
assert cube.__str__() == 'Cube unnamed color:blue length:10.0'
assert cube.__repr__() == 'Cube'
cube = Cube(symbols('V') ** (1.0 / 3.0))
actual = cube.generate_dict(constant_map={symbols('V'): 27.0})
assert actual == {"color": "grey",
"type": "Cube",
"name": "unnamed",
"length": 3.0}
def test_cylinder():
cylinder = Cylinder(10.0, 5.0, name='cylinder', color='blue')
assert cylinder.name == 'cylinder'
assert cylinder.__str__() == \
'Cylinder cylinder color:blue length:10.0 radius:5.0'
assert cylinder.__repr__() == 'Cylinder'
assert cylinder.length == 10.0
assert cylinder.radius == 5.0
assert cylinder.color == 'blue'
cylinder.name = 'cylinder1'
assert cylinder.name == 'cylinder1'
cylinder.length = 14.0
assert cylinder.length == 14.0
cylinder.radius = 7.0
assert cylinder.radius == 7.0
cylinder.color = 'cyan'
assert cylinder.color == 'cyan'
assert cylinder.generate_dict() == {"color": "cyan",
"type": "Cylinder",
"name": "cylinder1",
"length": 14.0,
"radius": 7.0}
assert isinstance(cylinder, Shape)
cylinder_ = Cylinder(10.0, 5.0, color='blue')
assert cylinder_.name == 'unnamed'
assert cylinder_.__str__() == \
'Cylinder unnamed color:blue length:10.0 radius:5.0'
assert cylinder_.__repr__() == 'Cylinder'
def test_cone():
cone = Cone(10.0, 5.0, name='cone', color='darkblue')
assert cone.name == 'cone'
assert cone.__str__() == \
'Cone cone color:darkblue length:10.0 radius:5.0'
assert cone.__repr__() == 'Cone'
assert cone.length == 10.0
assert cone.radius == 5.0
assert cone.color == 'darkblue'
cone.name = 'cone1'
assert cone.name == 'cone1'
cone.length = 16.0
assert cone.length == 16.0
cone.radius = 3.0
assert cone.radius == 3.0
cone.color = 'darkcyan'
assert cone.color == 'darkcyan'
assert cone.generate_dict() == {"color": "darkcyan",
"type": "Cone",
"name": "cone1",
"length": 16.0,
"radius": 3.0}
assert isinstance(cone, Shape)
cone_ = Cone(10.0, 5.0, color='darkblue')
assert cone_.name == 'unnamed'
assert cone_.__str__() == \
'Cone unnamed color:darkblue length:10.0 radius:5.0'
assert cone_.__repr__() == 'Cone'
def test_sphere():
sphere = Sphere(10.0, name='sphere', color='azure')
assert sphere.name == 'sphere'
assert sphere.__str__() == 'Sphere sphere color:azure radius:10.0'
assert sphere.__repr__() == 'Sphere'
assert sphere.radius == 10.0
assert sphere.color == 'azure'
sphere.name = 'sphere1'
assert sphere.name == 'sphere1'
sphere.radius = 14.0
assert sphere.radius == 14.0
sphere.color = 'aqua'
assert sphere.color == 'aqua'
assert sphere.generate_dict() == {"color": "aqua",
"type": "Sphere",
"name": "sphere1",
"radius": 14.0}
assert isinstance(sphere, Shape)
sphere_ = Sphere(10.0, color='azure')
assert sphere_.name == 'unnamed'
assert sphere_.__str__() == 'Sphere unnamed color:azure radius:10.0'
assert sphere_.__repr__() == 'Sphere'
def test_circle():
circle = Circle(10.0, name='circle', color='gold')
assert circle.name == 'circle'
assert circle.__str__() == 'Circle circle color:gold radius:10.0'
assert circle.__repr__() == 'Circle'
assert circle.radius == 10.0
assert circle.color == 'gold'
circle.name = 'circle1'
assert circle.name == 'circle1'
circle.radius = 12.0
assert circle.radius == 12.0
circle.color = 'black'
assert circle.color == 'black'
assert circle.generate_dict() == {"color": "black",
"type": "Circle",
"name": "circle1",
"radius": 12.0}
assert isinstance(circle, Shape)
circle = Circle(10.0, color='gold')
assert circle.name == 'unnamed'
assert circle.__str__() == 'Circle unnamed color:gold radius:10.0'
assert circle.__repr__() == 'Circle'
def test_mesh():
point_list = [[2., 3., 1.], [4., 6., 2.],
[5., 3., 1.], [5., 3., 6.],
[2., 8., 4.], [7., 4., 1.]]
mesh_shape = Mesh(point_list, name='mesh', color='green')
assert mesh_shape.name == 'mesh'
assert mesh_shape.__str__() == \
'Mesh mesh color:green points:[[ 2. 3. 1.]\n [ 4. 6. 2.]\n [ 5. 3. 1.]\n [ 5. 3. 6.]\n [ 2. 8. 4.]\n [ 7. 4. 1.]]'
assert mesh_shape.__repr__() == 'Mesh'
assert_allclose(mesh_shape.points, point_list)
assert mesh_shape.color == 'green'
mesh_shape.name = 'mesh1'
assert mesh_shape.name == 'mesh1'
new_point_list = [[3., 4., 12.],
[2., 4., 4.],
[3., 2., 41.],
[2., 5., 4.]]
mesh_shape.points = new_point_list
assert_allclose(mesh_shape.points, new_point_list)
mesh_shape.color = 'pink'
assert mesh_shape.color == 'pink'
actual = mesh_shape.generate_dict()
expected = {"color": "pink",
"type": "Mesh",
"name": "mesh1",
"points": np.asarray(new_point_list)}
for key in ['color', 'type', 'name']:
actual[key] == expected[key]
assert_allclose(actual['points'], expected['points'])
assert isinstance(mesh_shape, Shape)
mesh_shape_ = Mesh(points=point_list, color='green')
assert mesh_shape_.name == 'unnamed'
assert mesh_shape_.__str__() == \
'Mesh unnamed color:green points:[[ 2. 3. 1.]\n [ 4. 6. 2.]\n [ 5. 3. 1.]\n [ 5. 3. 6.]\n [ 2. 8. 4.]\n [ 7. 4. 1.]]'
assert mesh_shape_.__repr__() == 'Mesh'
def test_plane():
plane = Plane(10.0, 20.0, name='plane', color='indigo')
assert plane.name == 'plane'
assert plane.__str__() == \
'Plane plane color:indigo length:10.0 width:20.0'
assert plane.__repr__() == 'Plane'
assert plane.length == 10.0
assert plane.width == 20.0
assert plane.color == 'indigo'
plane.name = 'plane1'
assert plane.name == 'plane1'
plane.length = 30.0
assert plane.length == 30.0
plane.width = 10.0
assert plane.width == 10.0
plane.color = 'lavender'
assert plane.color == 'lavender'
assert plane.generate_dict() == {"color": "lavender",
"type": "Plane",
"name": "plane1",
"width": 10.0,
"length": 30.0}
assert isinstance(plane, Shape)
plane_ = Plane(10.0, 20.0, color='indigo')
assert plane_.name == 'unnamed'
assert plane_.__str__() == \
'Plane unnamed color:indigo length:10.0 width:20.0'
assert plane_.__repr__() == 'Plane'
def test_tetrahedron():
#Tetrahedron,Octahedron and Icosahedron
# geometry is defined by the radius of the
#circumscribed sphere. It would be mentioned explicitly in the
#docstrings
tetrahedron = Tetrahedron(5.0, name='tetrahedron', color='maroon')
assert tetrahedron.name == 'tetrahedron'
assert tetrahedron.__str__() == \
'Tetrahedron tetrahedron color:maroon radius:5.0'
assert tetrahedron.__repr__() == 'Tetrahedron'
assert tetrahedron.radius == 5.0
assert tetrahedron.color == 'maroon'
tetrahedron.name = 'tetrahedron1'
assert tetrahedron.name == 'tetrahedron1'
tetrahedron.radius = 7.0
assert tetrahedron.radius == 7.0
tetrahedron.color = 'orange'
assert tetrahedron.color == 'orange'
assert tetrahedron.generate_dict() == {"color": "orange",
"type": "Tetrahedron",
"name": "tetrahedron1",
"radius": 7.0}
assert isinstance(tetrahedron, Shape)
tetrahedron_ = Tetrahedron(5.0, color='maroon')
assert tetrahedron_.name == 'unnamed'
assert tetrahedron_.__str__() == \
'Tetrahedron unnamed color:maroon radius:5.0'
assert tetrahedron_.__repr__() == 'Tetrahedron'
def test_octahedron():
octahedron = Octahedron(12.0, name='octahedron', color='purple')
assert octahedron.name == 'octahedron'
assert octahedron.__str__() == \
'Octahedron octahedron color:purple radius:12.0'
assert octahedron.__repr__() == 'Octahedron'
assert octahedron.radius == 12.0
assert octahedron.color == 'purple'
octahedron.name = 'octahedron1'
assert octahedron.name == 'octahedron1'
octahedron.radius = 2.0
assert octahedron.radius == 2.0
octahedron.color = 'red'
assert octahedron.color == 'red'
assert octahedron.generate_dict() == {"color": "red",
"type": "Octahedron",
"name": "octahedron1",
"radius": 2.0}
assert isinstance(octahedron, Shape)
octahedron_ = Octahedron(12.0, color='purple')
assert octahedron_.name == 'unnamed'
assert octahedron_.__str__() == \
'Octahedron unnamed color:purple radius:12.0'
assert octahedron_.__repr__() == 'Octahedron'
def test_icosahedron():
icosahedron = Icosahedron(11.0, name='icosahedron', color='blue')
assert icosahedron.name == 'icosahedron'
assert icosahedron.__str__() == \
'Icosahedron icosahedron color:blue radius:11.0'
assert icosahedron.__repr__() == 'Icosahedron'
assert icosahedron.radius == 11.0
assert icosahedron.color == 'blue'
icosahedron.name = 'icosahedron1'
assert icosahedron.name == 'icosahedron1'
icosahedron.radius = 3.0
assert icosahedron.radius == 3.0
icosahedron.color = 'blue'
assert icosahedron.color == 'blue'
assert icosahedron.generate_dict() == {"color": "blue",
"type": "Icosahedron",
"name": "icosahedron1",
"radius": 3.0}
assert isinstance(icosahedron, Shape)
icosahedron_ = Icosahedron(11.0, color='blue')
assert icosahedron_.name == 'unnamed'
assert icosahedron_.__str__() == \
'Icosahedron unnamed color:blue radius:11.0'
assert icosahedron_.__repr__() == 'Icosahedron'
def test_torus():
torus = Torus(10.0, 2.0, name='torus', color='red')
assert torus.name == 'torus'
assert torus.__str__() == \
'Torus torus color:red radius:10.0 tube_radius:2.0'
assert torus.__repr__() == 'Torus'
assert torus.radius == 10.0
assert torus.tube_radius == 2.0
assert torus.color == 'red'
torus.name = 'torus1'
assert torus.name == 'torus1'
torus.radius = 15.0
assert torus.radius == 15.0
torus.tube_radius = 4.0
assert torus.tube_radius == 4.0
torus.color = '#FFFFFF'
assert torus.color == '#FFFFFF'
assert torus.generate_dict() == {"color": "#FFFFFF",
"type": "Torus",
"name": "torus1",
"radius": 15.0,
"tube_radius": 4.0}
assert isinstance(torus, Shape)
torus_ = Torus(10.0, 2.0, color='red')
assert torus_.name == 'unnamed'
assert torus_.__str__() == \
'Torus unnamed color:red radius:10.0 tube_radius:2.0'
assert torus_.__repr__() == 'Torus'
def test_tube():
point_list = [[2., 4., 5.], [2., 6., 4.], [1., 5., 8.]]
tube = Tube(10.0, point_list, name='tube', color='red')
assert tube.name == 'tube'
assert tube.__str__() == 'Tube tube color:red points:[[ 2. 4. 5.]\n [ 2. 6. 4.]\n [ 1. 5. 8.]] radius:10.0'
assert tube.__repr__() == 'Tube'
assert tube.radius == 10.0
assert_allclose(tube.points, point_list)
assert tube.color == 'red'
tube.name = 'tube1'
assert tube.name == 'tube1'
tube.radius = 15.0
assert tube.radius == 15.0
new_point_list = [[3., 4., 5.], [1, 6., 8.], [2., 7., 3.]]
tube.points = new_point_list
assert_allclose(tube.points, new_point_list)
tube.color = 'pink'
assert tube.color == 'pink'
actual = tube.generate_dict()
expected = {"color": "pink",
"type": "Tube",
"name": "tube1",
"radius": 15.0,
"points": np.asarray(new_point_list)}
for key in ['color', 'type', 'name', 'radius']:
actual[key] == expected[key]
assert_allclose(actual['points'], expected['points'])
assert isinstance(tube, Shape)
tube_ = Tube(10.0, point_list, color='red')
assert tube_.name == 'unnamed'
assert tube_.__str__() == 'Tube unnamed color:red points:[[ 2. 4. 5.]\n [ 2. 6. 4.]\n [ 1. 5. 8.]] radius:10.0'
assert tube_.__repr__() == 'Tube'
def test_torus_knot():
torus_knot = TorusKnot(10.0, 2.0, name='torus_knot', color='red')
assert torus_knot.name == 'torus_knot'
assert torus_knot.__str__() == \
'TorusKnot torus_knot color:red radius:10.0 tube_radius:2.0'
assert torus_knot.__repr__() == 'TorusKnot'
assert torus_knot.radius == 10.0
assert torus_knot.tube_radius == 2.0
assert torus_knot.color == 'red'
torus_knot.name = 'torus_knot1'
assert torus_knot.name == 'torus_knot1'
torus_knot.radius = 12.0
assert torus_knot.radius == 12.0
torus_knot.tube_radius = 1.0
assert torus_knot.tube_radius == 1.0
torus_knot.color = 'blue'
assert torus_knot.color == 'blue'
assert torus_knot.generate_dict() == {"color": "blue",
"type": "TorusKnot",
"name": "torus_knot1",
"radius": 12.0,
"tube_radius": 1}
assert isinstance(torus_knot, Shape)
torus_knot_ = TorusKnot(10.0, 2.0, color='red')
assert torus_knot_.name == 'unnamed'
assert torus_knot_.__str__() == \
'TorusKnot unnamed color:red radius:10.0 tube_radius:2.0'
assert torus_knot_.__repr__() == 'TorusKnot'
| nouiz/pydy | pydy/viz/tests/test_shapes.py | Python | bsd-3-clause | 17,023 |
# coding=utf-8
from __future__ import absolute_import, division, print_function
__metaclass__ = type
import os
import sys
import shutil
import hashlib
from copy import deepcopy
try:
# noinspection PyCompatibility
from StringIO import StringIO # python2
except ImportError:
from io import StringIO # python3
import subprocess as sp
try:
# noinspection PyUnresolvedReferences
from subprocess import TimeoutExpired # python 3 only
except ImportError:
pass
#from . import logger as module_logger
from astwro.config.logger import logger as module_logger
from astwro.config import get_config
from astwro.utils import tmpdir, TmpDir
from .output_processors import StreamKeeper, OutputProvider
class Runner(object):
"""
Base class for specific runners.
Maintains underlying process lifetime,
communication with process (streams, output processors chain), runner dir etc...
"""
class RunnerException(Exception):
"""Exceptions raised by `Runner` and subclasses"""
def __init__(self, message, runner):
self.commandline = str(runner.executable) + ' ' + ' '.join(runner.arguments)
self.stdin = runner.input
self.stdout = runner.output
self.stderr = runner.stderr
super(Runner.RunnerException, self).__init__(message)
def __str__(self):
return (super(Runner.RunnerException,self).__str__()
+ '\n>> Commandline:\n{}\n>> Stdin:\n{}\n>> Stdout:\n{}\n>> Stderr:\n{}\n'.format(
self.commandline, self.stdin, self.stdout, self.stderr
))
class ExitError(RunnerException):
"""Exceptions raised when underlying process returns error code on exit"""
def __init__(self, message, runner, code):
super(Runner.ExitError, self).__init__(message, runner)
self.code = code
def __str__(self):
return (super(Runner.ExitError,self).__str__()
+ '\n>> Process exit code: {}'.format(self.code))
class NoFileError(RunnerException):
def __init__(self, message, runner, filename):
super(Runner.NoFileError, self).__init__(message, runner)
self.filename = filename
class RunnerValueError(ValueError, RunnerException):
pass
class RunnerTypeError(TypeError, RunnerException):
pass
raise_on_nonzero_exitcode = True
preserve_process = False # not implemented
def __init__(self, dir=None, batch=False):
"""
:param dir: path name or TmpDir object, in not provided new temp dir will be used
:param bool batch: whether Daophot have to work in batch mode.
"""
self.logger = module_logger.getChild(type(self).__name__)
self.executable = None
self.arguments = []
self.batch_mode = batch
self.__stream_keeper = None
self._prepare_dir(dir)
self._reset()
def _reset(self):
"""Resets runner without cleaning/changing runner dir
allows execution of new sequence in same dir and files"""
self.input = None
self.output = None
self.stderr = None
self.returncode = None
self.__process = None
self.__commands = ''
self.ext_output_files = set()
if self.__stream_keeper is not None:
self.__stream_keeper.stream = None # new chain containing only old StreamKeeper
else:
self.__stream_keeper = StreamKeeper(runner=self) # new chain containing new StreamKeeper
self.__processors_chain_last = self.__stream_keeper
self.__processors_chain_first = None
def __deepcopy__(self, memo):
cls = self.__class__
new = cls.__new__(cls)
memo[id(self)] = new
new.__stream_keeper = None
new._reset()
new.logger = self.logger
new.executable = self.executable
# new.output = self.output
# new.stderr = self.stderr
# new.returncode = self.returncode
new.batch_mode = self.batch_mode
new.arguments = self.arguments
# new.__process = None
# new.__commands = self.__commands
# new.ext_output_files = set()
# new.__processors_chain_last = deepcopy(self.__processors_chain_last, memo) # copy chain
# new.__processors_chain_first = memo[id(self.__processors_chain_first)] # find StreamKeeper in copied chain
# new.__stream_keeper = memo[id(self.__stream_keeper)] # find StreamKeeper in copied chain
new.dir = deepcopy(self.dir, memo)
return new
def __del__(self):
self.close()
def __enter__(self):
return self
def __exit__(self, type, value, traceback):
self.close()
def clone(self):
"""Clones runner
If *runner directory* was provided in constructor, clone will share the same dir, else, if
*runner directory* is temp dir created implicitly by runner, clone will create it's own one, and
content of *runner directory* will be copied from source to clone."""
return deepcopy(self)
def close(self):
"""Cleans things up."""
self._on_exit()
self.dir = None
@property
def mode(self):
"""Either "normal" or "batch". In batch mode, commands are not executed but collected
on execution queue, then run together, in single process, one by one,
triggered by :py:meth:`run()` method"""
return 'batch' if self.batch_mode else 'normal'
@mode.setter
def mode(self, m):
if m.lowercase() == 'batch':
self.batch_mode = True
elif m.lowercase() == 'normal':
self.batch_mode = False
else:
raise Runner.RunnerValueError('mode have to be either "normal" or "batch"', self)
def _init_workdir_files(self, dir):
pass
def _update_executable(self, exe):
"""Find exe key in configuration and set as Runner executable"""
if self.executable is None:
self.executable = os.path.expanduser(get_config().get('executables', exe))
def _prepare_dir(self, dir=None, init_files=True):
if dir is None:
dir = tmpdir(prefix='pydaophot_tmp')
elif isinstance(dir, str):
dir = tmpdir(use_existing=dir)
elif not isinstance(dir, TmpDir):
raise Runner.RunnerTypeError('dir must be either: TmpDir object, str, None', self)
self.dir = dir
if init_files:
self._init_workdir_files(dir)
def copy_to_runner_dir(self, source, filename=None):
"""Copies source file to runner dir under name filename or the same
as original if filename is None. Overwrites existing file."""
dst = self.dir.path if filename is None else os.path.join(self.dir.path, filename)
shutil.copy(source, dst)
def link_to_runner_dir(self, source, link_filename=None):
# type: (str, str) -> None
"""Creates symlink in runner dir under name filename or the same
as original if filename is None. Overwrites existing link.
:param source: file patch
:param link_filename: worker dir link name, default: same as filename part of source"""
source = self.expand_path(source)
# if not os.path.isfile(source):
# raise IOError('Source file {} not found'.format(source))
if link_filename is None:
link_filename = os.path.basename(source)
dest = os.path.join(self.dir.path, link_filename)
try:
os.remove(dest)
except OSError:
pass
os.symlink(source, dest)
def copy_from_runner_dir(self, filename, dest='./'):
"""Copies file: filename from runner dir. Overwrites existing file."""
shutil.copy(os.path.join(self.dir.path, filename), dest)
def link_from_runner_dir(self, filename, dest='./'):
"""Creates symlink in dest of file from runner dir.
dest can be either file path for new symlink or directory.
In second case name of symlink will be filename. Overwrites existing file."""
if os.path.basename(dest) == '':
dest = os.path.join(dest, filename)
try:
os.remove(dest)
except OSError:
pass
os.symlink(os.path.join(self.dir.path, filename), dest)
def file_from_runner_dir(self, filename):
"""Simply adds runner dir path into filename"""
return os.path.join(self.dir.path, filename)
def exists_in_runner_dir(self, filename):
"""Checks for filename existence in runner dir"""
return os.path.exists(os.path.join(self.dir.path, filename))
def rm_from_runner_dir(self, filename):
"""Removes (if exists) file filename from runner dir"""
try:
os.remove(os.path.join(self.dir.path, filename))
except OSError:
pass
@staticmethod
def expand_path(path):
"""Expand user ~ directory and finds absolute path."""
if path is None:
path = ''
else:
path = os.path.abspath(os.path.expanduser(path))
return path
def absolute_path(self, path):
"""Returns absolute path for filepath parameter, if :arg:path contain filename only, runner dir is added"""
if os.path.basename(path) != path: # not in runner directory
absolute = self.expand_path(path)
else:
absolute = os.path.join(self.dir.path, path)
return absolute
@staticmethod
def _runner_dir_file_name(filepath='', prefix='', suffix='', signature=None, maxlen=30):
# type: (str, str, str, str, int) -> str
"""Generates name used in Runner local dir for filepath
Files processed by underlying process are always in it's working directory
(runner directory). For files from other location in filesystem, copies
or links in runner directory are maintained. Names of that files are prefixed
with hash (shortened) of filepath to avoid collisions.
"""
if signature is None:
signature = filepath
basename, ext = os.path.splitext(os.path.basename(filepath))
maxbasename = maxlen - 7 - len(suffix) - len(prefix) - len(ext)
if maxbasename < 0:
raise ValueError('Can not fit working dir filename in manxlen={} characters with prefix={} and suffix={}'
.format(maxlen, prefix, suffix))
basename = basename[: maxbasename] # cut basename to fit
return prefix + str(hashlib.md5(str(signature).encode()).hexdigest())[:6] + '_' + basename + suffix + ext
def _prepare_output_file(self, data, preservefilename=False):
# type: (str, bool) -> (str, str)
return self._prepare_io_file(data, output=True, preservefilename=preservefilename)
def _prepare_input_file(self, path, preservefilename=False):
# type: (str, bool) -> (str, str)
return self._prepare_io_file(path, output=False, preservefilename=preservefilename)
def _prepare_io_file(self, path, output, preservefilename=False):
# type: (str, bool) -> (str, str)
""" make link for non-local input files in runner dir, gen runner dir filename """
if not path:
return '',''
if os.path.dirname(os.path.abspath(path)) == self.dir.path: # path to runner dir provided, cut it
path = os.path.basename(path)
if os.path.basename(path) != path: # not in runner directory
absolute = self.expand_path(path)
local = os.path.basename(path) if preservefilename else self._runner_dir_file_name(absolute)
if output:
# add to list of files to update after run
self.ext_output_files.add(absolute)
elif absolute not in self.ext_output_files:
self.link_to_runner_dir(absolute, local)
self.logger.debug("Linking input file into runner directory: {} <- {}".format(local, absolute))
else:
absolute = os.path.join(self.dir.path, path)
local = path
if output:
# remove runner dir file if exist
self.rm_from_runner_dir(local)
return local, absolute
def _pre_run(self, wait):
pass
def run(self, wait=True):
"""
Execute commands queue.
In the "normal" :meth:`mode <mode>` there is no need to call :meth:`run`, because all commands are
executed immediately. In "batch" :meth:`mode <mode>`, commands execution is queued and postponed
until :meth:`.run`
:param bool wait:
If false, :meth:`run` exits without waiting for finishing commands executions (asynchronous processing).
Call :meth:`wait_for_results` before accessing results.
:return: None
"""
self._pre_run(wait)
try:
self.__process = sp.Popen([self.executable] + self.arguments,
stdin=sp.PIPE,
stdout=sp.PIPE,
stderr=sp.PIPE,
cwd=self.dir.path)
except OSError as e:
self.logger.error(
'Executable: %s is expected in PATH, configure executable name/path in ~/pydaophot.cfg e.g.',
self.executable)
raise e
self.logger.debug('STDIN:\n' + self.__commands)
if wait:
self.__communicate(self.__commands)
else:
if sys.version_info[0] > 2: # python 3 has timeout in communicate
try:
self.__communicate(self.__commands, timeout=0.01)
except TimeoutExpired:
pass
else: # python 2 - write directly to stdin and close it to flush end generate EOF
self.__process.stdin.write(self.__commands)
self.__process.stdin.close()
self.__process.stdin = None
def is_ready_to_run(self):
"""
Returns True if there are some commands waiting for run but process was not started yet
:return: bool
"""
return self.__commands and self.__process is None
@property
def running(self):
"""
Whether if runner is running
``True`` If executable was started in async mode :meth:`run(wait=False) <run>`, and no output collected yet.
.. Note::
Even if executable has finished, output will not be collected and :meth:`running <running>` will
return ``True`` until user asks for results or call :meth:`wait_for_results()`
:return: bool
"""
return self.__process is not None and self.output is None
def has_finished_run(self):
"""
Returns True if process has finished and output is available
:return: bool
"""
return self.output is not None
def wait_for_results(self):
"""In the "batch" mode, waits for commands completion if :meth:`run(wait=False) <run>` was called """
if self.running:
self.__communicate()
if self.is_ready_to_run():
self.run(wait=True)
def __communicate(self, inpt=None, timeout=None):
i = inpt.encode(encoding='ascii') if inpt else None
self.input = i
o, e = self.__process.communicate(i, timeout=timeout) if timeout else self.__process.communicate(i)
self.output = o.decode('ascii')
self.stderr = e.decode('ascii')
self.logger.debug('STDOUT:\n' + self.output)
self.__stream_keeper.stream = StringIO(self.output)
self.returncode = self.__process.returncode
if self.returncode != 0:
self.logger.warning('{} process finished with error code {}'.format(self.executable, self.returncode))
if self.raise_on_nonzero_exitcode:
raise Runner.ExitError('Execution failed, exit code {}'.format(self.returncode), self, self.returncode)
# copy results - output files from runners directory to user specified path
for f in self.ext_output_files:
try:
self.copy_from_runner_dir(self._runner_dir_file_name(f), f)
except FileNotFoundError as e:
msg = '{} process does not produce expected output file {}<--{}'.format(self.executable, f,
self._runner_dir_file_name(f))
self.logger.error(msg)
if self.raise_on_nonzero_exitcode:
raise Runner.NoFileError(msg, self, self.returncode)
# fill chained processors buffers
self.__processors_chain_last.get_output_stream()
def _get_ready_for_commands(self):
if self.running:
self.wait_for_results() # if running wait
if self.has_finished_run(): # if was running reset and get ready for new process
self._reset()
def _insert_processing_step(self, std_in, output_processor=None, on_beginning=False):
if on_beginning:
self.__commands = std_in + self.__commands
else:
self.__commands += std_in
if output_processor is not None:
if not isinstance(output_processor, OutputProvider):
raise Runner.RunnerTypeError('output_processor must OutputProvider subclass', self)
output_processor.logger = self.logger
# chain organisation:
# [stream_keeper]<-[processors_chain_first]<-[]<-[]<-[processors_chain_last]
if on_beginning:
output_processor._prev_in_chain = self.__stream_keeper
if self.__processors_chain_first is not None:
self.__processors_chain_first._prev_in_chain = output_processor
self.__processors_chain_first = output_processor
else:
output_processor._prev_in_chain = self.__processors_chain_last
self.__processors_chain_last = output_processor
if self.__processors_chain_first is None:
self.__processors_chain_first = output_processor
return output_processor
def _on_exit(self):
pass
| majkelx/astwro | astwro/exttools/Runner.py | Python | mit | 18,414 |
# Created By: Virgil Dupras
# Created On: 2011-04-19
# Copyright 2015 Hardcoded Software (http://www.hardcoded.net)
#
# This software is licensed under the "GPLv3" License as described in the "LICENSE" file,
# which should be included with this package. The terms are also available at
# http://www.gnu.org/licenses/gpl-3.0.html
import sys
import traceback
# Taken from http://bzimmer.ziclix.com/2008/12/17/python-thread-dumps/
def stacktraces():
code = []
for thread_id, stack in sys._current_frames().items():
code.append("\n# ThreadID: %s" % thread_id)
for filename, lineno, name, line in traceback.extract_stack(stack):
code.append('File: "%s", line %d, in %s' % (filename, lineno, name))
if line:
code.append(" %s" % (line.strip()))
return "\n".join(code)
| arsenetar/dupeguru | hscommon/debug.py | Python | gpl-3.0 | 834 |
#!/usr/bin/python
#
# ==-- process-stats-dir - summarize one or more Swift -stats-output-dirs --==#
#
# This source file is part of the Swift.org open source project
#
# Copyright (c) 2014-2017 Apple Inc. and the Swift project authors
# Licensed under Apache License v2.0 with Runtime Library Exception
#
# See https://swift.org/LICENSE.txt for license information
# See https://swift.org/CONTRIBUTORS.txt for the list of Swift project authors
#
# ==------------------------------------------------------------------------==#
#
# This file processes the contents of one or more directories generated by
# `swiftc -stats-output-dir` and emits summary data, traces etc. for analysis.
import argparse
import csv
import itertools
import json
import os
import platform
import re
import sys
import time
import urllib
import urllib2
from collections import namedtuple
from operator import attrgetter
from jobstats import load_stats_dir, merge_all_jobstats
MODULE_PAT = re.compile('^(\w+)\.')
def module_name_of_stat(name):
return re.match(MODULE_PAT, name).groups()[0]
def stat_name_minus_module(name):
return re.sub(MODULE_PAT, '', name)
# Perform any custom processing of args here, in particular the
# select_stats_from_csv_baseline step, which is a bit subtle.
def vars_of_args(args):
vargs = vars(args)
if args.select_stats_from_csv_baseline is not None:
b = read_stats_dict_from_csv(args.select_stats_from_csv_baseline)
if args.group_by_module:
vargs['select_stat'] = set(stat_name_minus_module(k)
for k in b.keys())
else:
vargs['select_stat'] = b.keys()
return vargs
# Passed args with 2-element remainder ["old", "new"], return a list of tuples
# of the form [(name, (oldstats, newstats))] where each name is a common subdir
# of each of "old" and "new", and the stats are those found in the respective
# dirs.
def load_paired_stats_dirs(args):
assert(len(args.remainder) == 2)
paired_stats = []
(old, new) = args.remainder
vargs = vars_of_args(args)
for p in sorted(os.listdir(old)):
full_old = os.path.join(old, p)
full_new = os.path.join(new, p)
if not (os.path.exists(full_old) and os.path.isdir(full_old) and
os.path.exists(full_new) and os.path.isdir(full_new)):
continue
old_stats = load_stats_dir(full_old, **vargs)
new_stats = load_stats_dir(full_new, **vargs)
if len(old_stats) == 0 or len(new_stats) == 0:
continue
paired_stats.append((p, (old_stats, new_stats)))
return paired_stats
def write_catapult_trace(args):
allstats = []
vargs = vars_of_args(args)
for path in args.remainder:
allstats += load_stats_dir(path, **vargs)
json.dump([s.to_catapult_trace_obj() for s in allstats], args.output)
def write_lnt_values(args):
vargs = vars_of_args(args)
for d in args.remainder:
stats = load_stats_dir(d, **vargs)
merged = merge_all_jobstats(stats, **vargs)
j = merged.to_lnt_test_obj(args)
if args.lnt_submit is None:
json.dump(j, args.output, indent=4)
else:
url = args.lnt_submit
print "\nsubmitting to LNT server: " + url
json_report = {'input_data': json.dumps(j), 'commit': '1'}
data = urllib.urlencode(json_report)
response_str = urllib2.urlopen(urllib2.Request(url, data))
response = json.loads(response_str.read())
print "### response:"
print response
if 'success' in response:
print "server response:\tSuccess"
else:
print "server response:\tError"
print "error:\t", response['error']
sys.exit(1)
def show_paired_incrementality(args):
fieldnames = ["old_pct", "old_skip",
"new_pct", "new_skip",
"delta_pct", "delta_skip",
"name"]
out = csv.DictWriter(args.output, fieldnames, dialect='excel-tab')
out.writeheader()
vargs = vars_of_args(args)
for (name, (oldstats, newstats)) in load_paired_stats_dirs(args):
olddriver = merge_all_jobstats((x for x in oldstats
if x.is_driver_job()), **vargs)
newdriver = merge_all_jobstats((x for x in newstats
if x.is_driver_job()), **vargs)
if olddriver is None or newdriver is None:
continue
oldpct = olddriver.incrementality_percentage()
newpct = newdriver.incrementality_percentage()
deltapct = newpct - oldpct
oldskip = olddriver.driver_jobs_skipped()
newskip = newdriver.driver_jobs_skipped()
deltaskip = newskip - oldskip
out.writerow(dict(name=name,
old_pct=oldpct, old_skip=oldskip,
new_pct=newpct, new_skip=newskip,
delta_pct=deltapct, delta_skip=deltaskip))
def show_incrementality(args):
fieldnames = ["incrementality", "name"]
out = csv.DictWriter(args.output, fieldnames, dialect='excel-tab')
out.writeheader()
vargs = vars_of_args(args)
for path in args.remainder:
stats = load_stats_dir(path, **vargs)
for s in stats:
if s.is_driver_job():
pct = s.incrementality_percentage()
out.writerow(dict(name=os.path.basename(path),
incrementality=pct))
def diff_and_pct(old, new):
if old == 0:
if new == 0:
return (0, 0.0)
else:
return (new, 100.0)
delta = (new - old)
delta_pct = round((float(delta) / float(old)) * 100.0, 2)
return (delta, delta_pct)
def update_epoch_value(d, name, epoch, value):
changed = 0
if name in d:
(existing_epoch, existing_value) = d[name]
if existing_epoch > epoch:
print("note: keeping newer value %d from epoch %d for %s"
% (existing_value, existing_epoch, name))
epoch = existing_epoch
value = existing_value
elif existing_value == value:
epoch = existing_epoch
else:
(_, delta_pct) = diff_and_pct(existing_value, value)
print ("note: changing value %d -> %d (%.2f%%) for %s" %
(existing_value, value, delta_pct, name))
changed = 1
d[name] = (epoch, value)
return (epoch, value, changed)
def read_stats_dict_from_csv(f, select_stat=''):
infieldnames = ["epoch", "name", "value"]
c = csv.DictReader(f, infieldnames,
dialect='excel-tab',
quoting=csv.QUOTE_NONNUMERIC)
d = {}
sre = re.compile('.*' if len(select_stat) == 0 else
'|'.join(select_stat))
for row in c:
epoch = int(row["epoch"])
name = row["name"]
if sre.search(name) is None:
continue
value = int(row["value"])
update_epoch_value(d, name, epoch, value)
return d
# The idea here is that a "baseline" is a (tab-separated) CSV file full of
# the counters you want to track, each prefixed by an epoch timestamp of
# the last time the value was reset.
#
# When you set a fresh baseline, all stats in the provided stats dir are
# written to the baseline. When you set against an _existing_ baseline,
# only the counters mentioned in the existing baseline are updated, and
# only if their values differ.
#
# Finally, since it's a line-oriented CSV file, you can put:
#
# mybaseline.csv merge=union
#
# in your .gitattributes file, and forget about merge conflicts. The reader
# function above will take the later epoch anytime it detects duplicates,
# so union-merging is harmless. Duplicates will be eliminated whenever the
# next baseline-set is done.
def set_csv_baseline(args):
existing = None
vargs = vars_of_args(args)
if os.path.exists(args.set_csv_baseline):
with open(args.set_csv_baseline, "r") as f:
ss = vargs['select_stat']
existing = read_stats_dict_from_csv(f, select_stat=ss)
print ("updating %d baseline entries in %s" %
(len(existing), args.set_csv_baseline))
else:
print "making new baseline " + args.set_csv_baseline
fieldnames = ["epoch", "name", "value"]
with open(args.set_csv_baseline, "wb") as f:
out = csv.DictWriter(f, fieldnames, dialect='excel-tab',
quoting=csv.QUOTE_NONNUMERIC)
m = merge_all_jobstats((s for d in args.remainder
for s in load_stats_dir(d, **vargs)),
**vargs)
if m is None:
print "no stats found"
return 1
changed = 0
newepoch = int(time.time())
for name in sorted(m.stats.keys()):
epoch = newepoch
value = m.stats[name]
if existing is not None:
if name not in existing:
continue
(epoch, value, chg) = update_epoch_value(existing, name,
epoch, value)
changed += chg
out.writerow(dict(epoch=int(epoch),
name=name,
value=int(value)))
if existing is not None:
print "changed %d entries in baseline" % changed
return 0
OutputRow = namedtuple("OutputRow",
["name", "old", "new",
"delta", "delta_pct"])
def compare_stats(args, old_stats, new_stats):
for name in sorted(old_stats.keys()):
old = old_stats[name]
new = new_stats.get(name, 0)
(delta, delta_pct) = diff_and_pct(old, new)
yield OutputRow(name=name,
old=int(old), new=int(new),
delta=int(delta),
delta_pct=delta_pct)
IMPROVED = -1
UNCHANGED = 0
REGRESSED = 1
def row_state(row, args):
delta_pct_over_thresh = abs(row.delta_pct) > args.delta_pct_thresh
if (row.name.startswith("time.") or '.time.' in row.name):
# Timers are judged as changing if they exceed
# the percentage _and_ absolute-time thresholds
delta_usec_over_thresh = abs(row.delta) > args.delta_usec_thresh
if delta_pct_over_thresh and delta_usec_over_thresh:
return (REGRESSED if row.delta > 0 else IMPROVED)
elif delta_pct_over_thresh:
return (REGRESSED if row.delta > 0 else IMPROVED)
return UNCHANGED
def write_comparison(args, old_stats, new_stats):
rows = list(compare_stats(args, old_stats, new_stats))
sort_key = (attrgetter('delta_pct')
if args.sort_by_delta_pct
else attrgetter('name'))
regressed = [r for r in rows if row_state(r, args) == REGRESSED]
unchanged = [r for r in rows if row_state(r, args) == UNCHANGED]
improved = [r for r in rows if row_state(r, args) == IMPROVED]
regressions = len(regressed)
if args.markdown:
def format_field(field, row):
if field == 'name' and args.group_by_module:
return stat_name_minus_module(row.name)
elif field == 'delta_pct':
s = str(row.delta_pct) + "%"
if args.github_emoji:
if row_state(row, args) == REGRESSED:
s += " :no_entry:"
elif row_state(row, args) == IMPROVED:
s += " :white_check_mark:"
return s
else:
return str(vars(row)[field])
def format_table(elts):
out = args.output
out.write('\n')
out.write(' | '.join(OutputRow._fields))
out.write('\n')
out.write(' | '.join('---:' for _ in OutputRow._fields))
out.write('\n')
for e in elts:
out.write(' | '.join(format_field(f, e)
for f in OutputRow._fields))
out.write('\n')
def format_details(name, elts, is_closed):
out = args.output
details = '<details>\n' if is_closed else '<details open>\n'
out.write(details)
out.write('<summary>%s (%d)</summary>\n'
% (name, len(elts)))
if args.group_by_module:
def keyfunc(e):
return module_name_of_stat(e.name)
elts.sort(key=attrgetter('name'))
for mod, group in itertools.groupby(elts, keyfunc):
groupelts = list(group)
groupelts.sort(key=sort_key, reverse=args.sort_descending)
out.write(details)
out.write('<summary>%s in %s (%d)</summary>\n'
% (name, mod, len(groupelts)))
format_table(groupelts)
out.write('</details>\n')
else:
elts.sort(key=sort_key, reverse=args.sort_descending)
format_table(elts)
out.write('</details>\n')
format_details('Regressed', regressed, args.close_regressions)
format_details('Improved', improved, True)
format_details('Unchanged (abs(delta) < %s%% or %susec)' %
(args.delta_pct_thresh, args.delta_usec_thresh),
unchanged, True)
else:
rows.sort(key=sort_key, reverse=args.sort_descending)
out = csv.DictWriter(args.output, OutputRow._fields,
dialect='excel-tab')
out.writeheader()
for row in rows:
if row_state(row, args) != UNCHANGED:
out.writerow(row._asdict())
return regressions
def compare_to_csv_baseline(args):
vargs = vars_of_args(args)
old_stats = read_stats_dict_from_csv(args.compare_to_csv_baseline,
select_stat=vargs['select_stat'])
m = merge_all_jobstats((s for d in args.remainder
for s in load_stats_dir(d, **vargs)),
**vargs)
old_stats = dict((k, v) for (k, (_, v)) in old_stats.items())
new_stats = m.stats
return write_comparison(args, old_stats, new_stats)
# Summarize immediate difference between two stats-dirs, optionally
def compare_stats_dirs(args):
if len(args.remainder) != 2:
raise ValueError("Expected exactly 2 stats-dirs")
vargs = vars_of_args(args)
(old, new) = args.remainder
old_stats = merge_all_jobstats(load_stats_dir(old, **vargs), **vargs)
new_stats = merge_all_jobstats(load_stats_dir(new, **vargs), **vargs)
return write_comparison(args, old_stats.stats, new_stats.stats)
# Evaluate a boolean expression in terms of the provided stats-dir; all stats
# are projected into python dicts (thus variables in the eval expr) named by
# the last identifier in the stat definition. This means you can evaluate
# things like 'NumIRInsts < 1000' or
# 'NumTypesValidated == NumTypesDeserialized'
def evaluate(args):
if len(args.remainder) != 1:
raise ValueError("Expected exactly 1 stats-dir to evaluate against")
d = args.remainder[0]
vargs = vars_of_args(args)
merged = merge_all_jobstats(load_stats_dir(d, **vargs), **vargs)
env = {}
ident = re.compile('(\w+)$')
for (k, v) in merged.stats.items():
if k.startswith("time.") or '.time.' in k:
continue
m = re.search(ident, k)
if m:
i = m.groups()[0]
if args.verbose:
print("%s => %s" % (i, v))
env[i] = v
try:
if eval(args.evaluate, env):
return 0
else:
print("evaluate condition failed: '%s'" % args.evaluate)
return 1
except Exception as e:
print(e)
return 1
def main():
parser = argparse.ArgumentParser()
parser.add_argument("--verbose", action="store_true",
help="Report activity verbosely")
parser.add_argument("--output", default="-",
type=argparse.FileType('wb', 0),
help="Write output to file")
parser.add_argument("--paired", action="store_true",
help="Process two dirs-of-stats-dirs, pairwise")
parser.add_argument("--delta-pct-thresh", type=float, default=0.01,
help="Percentage change required to report")
parser.add_argument("--delta-usec-thresh", type=int, default=100000,
help="Absolute delta on times required to report")
parser.add_argument("--lnt-machine", type=str, default=platform.node(),
help="Machine name for LNT submission")
parser.add_argument("--lnt-run-info", action='append', default=[],
type=lambda kv: kv.split("="),
help="Extra key=value pairs for LNT run-info")
parser.add_argument("--lnt-machine-info", action='append', default=[],
type=lambda kv: kv.split("="),
help="Extra key=value pairs for LNT machine-info")
parser.add_argument("--lnt-order", type=str,
default=str(int(time.time())),
help="Order for LNT submission")
parser.add_argument("--lnt-tag", type=str, default="swift-compile",
help="Tag for LNT submission")
parser.add_argument("--lnt-submit", type=str, default=None,
help="URL to submit LNT data to (rather than print)")
parser.add_argument("--select-module",
default=[],
action="append",
help="Select specific modules")
parser.add_argument("--group-by-module",
default=False,
action="store_true",
help="Group stats by module")
parser.add_argument("--select-stat",
default=[],
action="append",
help="Select specific statistics")
parser.add_argument("--select-stats-from-csv-baseline",
type=argparse.FileType('rb', 0), default=None,
help="Select statistics present in a CSV baseline")
parser.add_argument("--exclude-timers",
default=False,
action="store_true",
help="only select counters, exclude timers")
parser.add_argument("--sort-by-delta-pct",
default=False,
action="store_true",
help="Sort comparison results by delta-%%, not stat")
parser.add_argument("--sort-descending",
default=False,
action="store_true",
help="Sort comparison results in descending order")
parser.add_argument("--merge-by",
default="sum",
type=str,
help="Merge identical metrics by (sum|min|max)")
parser.add_argument("--markdown",
default=False,
action="store_true",
help="Write output in markdown table format")
parser.add_argument("--include-unchanged",
default=False,
action="store_true",
help="Include unchanged stats values in comparison")
parser.add_argument("--close-regressions",
default=False,
action="store_true",
help="Close regression details in markdown")
parser.add_argument("--github-emoji",
default=False,
action="store_true",
help="Add github-emoji indicators to markdown")
modes = parser.add_mutually_exclusive_group(required=True)
modes.add_argument("--catapult", action="store_true",
help="emit a 'catapult'-compatible trace of events")
modes.add_argument("--incrementality", action="store_true",
help="summarize the 'incrementality' of a build")
modes.add_argument("--set-csv-baseline", type=str, default=None,
help="Merge stats from a stats-dir into a CSV baseline")
modes.add_argument("--compare-to-csv-baseline",
type=argparse.FileType('rb', 0), default=None,
metavar="BASELINE.csv",
help="Compare stats dir to named CSV baseline")
modes.add_argument("--compare-stats-dirs",
action="store_true",
help="Compare two stats dirs directly")
modes.add_argument("--lnt", action="store_true",
help="Emit an LNT-compatible test summary")
modes.add_argument("--evaluate", type=str, default=None,
help="evaluate an expression of stat-names")
parser.add_argument('remainder', nargs=argparse.REMAINDER,
help="stats-dirs to process")
args = parser.parse_args()
if len(args.remainder) == 0:
parser.print_help()
return 1
if args.catapult:
write_catapult_trace(args)
elif args.compare_stats_dirs:
return compare_stats_dirs(args)
elif args.set_csv_baseline is not None:
return set_csv_baseline(args)
elif args.compare_to_csv_baseline is not None:
return compare_to_csv_baseline(args)
elif args.incrementality:
if args.paired:
show_paired_incrementality(args)
else:
show_incrementality(args)
elif args.lnt:
write_lnt_values(args)
elif args.evaluate:
return evaluate(args)
return None
sys.exit(main())
| OscarSwanros/swift | utils/process-stats-dir.py | Python | apache-2.0 | 22,063 |
import sys
def luhn_checksum(card_number):
def digits_of(n):
return [int(d) for d in str(n)]
digits = digits_of(card_number)
odd_digits = digits[-1::-2]
even_digits = digits[-2::-2]
checksum = 0
checksum += sum(odd_digits)
for d in even_digits:
checksum += sum(digits_of(d*2))
return checksum % 10
def is_luhn_valid(card_number):
return luhn_checksum(card_number) == 0
for arg in sys.argv[1:]:
print is_luhn_valid(int(arg))
| steffzahn/test | luhn.py | Python | apache-2.0 | 488 |
"""
Helper methods for testing cohorts.
"""
import factory
from factory import post_generation, Sequence
from factory.django import DjangoModelFactory
import json
from opaque_keys.edx.locations import SlashSeparatedCourseKey
from xmodule.modulestore.django import modulestore
from xmodule.modulestore import ModuleStoreEnum
from ..cohorts import set_course_cohort_settings
from ..models import CourseUserGroup, CourseCohort, CourseCohortsSettings, CohortMembership
class CohortFactory(DjangoModelFactory):
"""
Factory for constructing mock cohorts.
"""
class Meta(object):
model = CourseUserGroup
name = Sequence("cohort{}".format)
course_id = SlashSeparatedCourseKey("dummy", "dummy", "dummy")
group_type = CourseUserGroup.COHORT
@post_generation
def users(self, create, extracted, **kwargs): # pylint: disable=unused-argument
"""
Returns the users associated with the cohort.
"""
if extracted:
self.users.add(*extracted)
class CourseCohortFactory(DjangoModelFactory):
"""
Factory for constructing mock course cohort.
"""
class Meta(object):
model = CourseCohort
@post_generation
def memberships(self, create, extracted, **kwargs): # pylint: disable=unused-argument
"""
Returns the memberships linking users to this cohort.
"""
for user in self.course_user_group.users.all(): # pylint: disable=E1101
membership = CohortMembership(user=user, course_user_group=self.course_user_group)
membership.save()
course_user_group = factory.SubFactory(CohortFactory)
assignment_type = 'manual'
class CourseCohortSettingsFactory(DjangoModelFactory):
"""
Factory for constructing mock course cohort settings.
"""
class Meta(object):
model = CourseCohortsSettings
is_cohorted = False
course_id = SlashSeparatedCourseKey("dummy", "dummy", "dummy")
cohorted_discussions = json.dumps([])
# pylint: disable=invalid-name
always_cohort_inline_discussions = True
def topic_name_to_id(course, name):
"""
Given a discussion topic name, return an id for that name (includes
course and url_name).
"""
return "{course}_{run}_{name}".format(
course=course.location.course,
run=course.url_name,
name=name
)
def config_course_cohorts_legacy(
course,
discussions,
cohorted,
cohorted_discussions=None,
auto_cohort_groups=None,
always_cohort_inline_discussions=None # pylint: disable=invalid-name
):
"""
Given a course with no discussion set up, add the discussions and set
the cohort config on the course descriptor.
Since cohort settings are now stored in models.CourseCohortSettings,
this is only used for testing data migration from the CourseDescriptor
to the table.
Arguments:
course: CourseDescriptor
discussions: list of topic names strings. Picks ids and sort_keys
automatically.
cohorted: bool.
cohorted_discussions: optional list of topic names. If specified,
converts them to use the same ids as topic names.
auto_cohort_groups: optional list of strings
(names of groups to put students into).
Returns:
Nothing -- modifies course in place.
"""
def to_id(name):
"""
Helper method to convert a discussion topic name to a database identifier
"""
return topic_name_to_id(course, name)
topics = dict((name, {"sort_key": "A",
"id": to_id(name)})
for name in discussions)
course.discussion_topics = topics
config = {"cohorted": cohorted}
if cohorted_discussions is not None:
config["cohorted_discussions"] = [to_id(name)
for name in cohorted_discussions]
if auto_cohort_groups is not None:
config["auto_cohort_groups"] = auto_cohort_groups
if always_cohort_inline_discussions is not None:
config["always_cohort_inline_discussions"] = always_cohort_inline_discussions
course.cohort_config = config
try:
# Not implemented for XMLModulestore, which is used by test_cohorts.
modulestore().update_item(course, ModuleStoreEnum.UserID.test)
except NotImplementedError:
pass
# pylint: disable=dangerous-default-value
def config_course_cohorts(
course,
is_cohorted,
auto_cohorts=[],
manual_cohorts=[],
discussion_topics=[],
cohorted_discussions=[],
always_cohort_inline_discussions=True # pylint: disable=invalid-name
):
"""
Set discussions and configure cohorts for a course.
Arguments:
course: CourseDescriptor
is_cohorted (bool): Is the course cohorted?
auto_cohorts (list): Names of auto cohorts to create.
manual_cohorts (list): Names of manual cohorts to create.
discussion_topics (list): Discussion topic names. Picks ids and
sort_keys automatically.
cohorted_discussions: Discussion topics to cohort. Converts the
list to use the same ids as discussion topic names.
always_cohort_inline_discussions (bool): Whether inline discussions
should be cohorted by default.
Returns:
Nothing -- modifies course in place.
"""
def to_id(name):
"""Convert name to id."""
return topic_name_to_id(course, name)
set_course_cohort_settings(
course.id,
is_cohorted=is_cohorted,
cohorted_discussions=[to_id(name) for name in cohorted_discussions],
always_cohort_inline_discussions=always_cohort_inline_discussions
)
for cohort_name in auto_cohorts:
cohort = CohortFactory(course_id=course.id, name=cohort_name)
CourseCohortFactory(course_user_group=cohort, assignment_type=CourseCohort.RANDOM)
for cohort_name in manual_cohorts:
cohort = CohortFactory(course_id=course.id, name=cohort_name)
CourseCohortFactory(course_user_group=cohort, assignment_type=CourseCohort.MANUAL)
course.discussion_topics = dict((name, {"sort_key": "A", "id": to_id(name)})
for name in discussion_topics)
try:
# Not implemented for XMLModulestore, which is used by test_cohorts.
modulestore().update_item(course, ModuleStoreEnum.UserID.test)
except NotImplementedError:
pass
| ahmadiga/min_edx | openedx/core/djangoapps/course_groups/tests/helpers.py | Python | agpl-3.0 | 6,550 |