repo_name
stringlengths 5
100
| path
stringlengths 4
299
| copies
stringclasses 990
values | size
stringlengths 4
7
| content
stringlengths 666
1.03M
| license
stringclasses 15
values | hash
int64 -9,223,351,895,964,839,000
9,223,297,778B
| line_mean
float64 3.17
100
| line_max
int64 7
1k
| alpha_frac
float64 0.25
0.98
| autogenerated
bool 1
class |
---|---|---|---|---|---|---|---|---|---|---|
TNT-Samuel/Coding-Projects | DNS Server/Source - Copy/Lib/distutils/tests/test_install_headers.py | 24 | 1238 | """Tests for distutils.command.install_headers."""
import os
import unittest
from distutils.command.install_headers import install_headers
from distutils.tests import support
from test.support import run_unittest
class InstallHeadersTestCase(support.TempdirManager,
support.LoggingSilencer,
support.EnvironGuard,
unittest.TestCase):
def test_simple_run(self):
# we have two headers
header_list = self.mkdtemp()
header1 = os.path.join(header_list, 'header1')
header2 = os.path.join(header_list, 'header2')
self.write_file(header1)
self.write_file(header2)
headers = [header1, header2]
pkg_dir, dist = self.create_dist(headers=headers)
cmd = install_headers(dist)
self.assertEqual(cmd.get_inputs(), headers)
# let's run the command
cmd.install_dir = os.path.join(pkg_dir, 'inst')
cmd.ensure_finalized()
cmd.run()
# let's check the results
self.assertEqual(len(cmd.get_outputs()), 2)
def test_suite():
return unittest.makeSuite(InstallHeadersTestCase)
if __name__ == "__main__":
run_unittest(test_suite())
| gpl-3.0 | 6,207,654,004,272,567,000 | 30.74359 | 61 | 0.624394 | false |
2014c2g2/teamwork | w2/static/Brython2.0.0-20140209-164925/Lib/_random.py | 115 | 2198 | import _os
from os import urandom as _urandom
class Random:
"""Random number generator base class used by bound module functions.
Used to instantiate instances of Random to get generators that don't
share state.
Class Random can also be subclassed if you want to use a different basic
generator of your own devising: in that case, override the following
methods: random(), seed(), getstate(), and setstate().
Optionally, implement a getrandbits() method so that randrange()
can cover arbitrarily large ranges.
"""
#random
#seed
#getstate
#setstate
VERSION = 3 # used by getstate/setstate
def __init__(self, x=None):
"""Initialize an instance.
Optional argument x controls seeding, as for Random.seed().
"""
self._state=x
def seed(self, a=None, version=2):
"""Initialize internal state from hashable object.
None or no argument seeds from current time or from an operating
system specific randomness source if available.
For version 2 (the default), all of the bits are used if *a* is a str,
bytes, or bytearray. For version 1, the hash() of *a* is used instead.
If *a* is an int, all bits are used.
"""
self._state=a
self.gauss_next = None
def getstate(self):
"""Return internal state; can be passed to setstate() later."""
return self._state
def setstate(self, state):
"""Restore internal state from object returned by getstate()."""
self._state=state
def random(self):
"""Get the next random number in the range [0.0, 1.0)."""
return _os.random()
def getrandbits(self, k):
"""getrandbits(k) -> x. Generates a long int with k random bits."""
if k <= 0:
raise ValueError('number of bits must be greater than zero')
if k != int(k):
raise TypeError('number of bits should be an integer')
numbytes = (k + 7) // 8 # bits / 8 and rounded up
x = int.from_bytes(_urandom(numbytes), 'big')
return x >> (numbytes * 8 - k) # trim excess bits
| gpl-2.0 | 96,533,815,836,840,100 | 31.323529 | 79 | 0.60828 | false |
nickgaya/python2 | python2/shared/codec.py | 1 | 12116 | """
Encoding and decoding classes for Python 2 client/server communication.
This module is used by both the Python 2 server and the Python 3 client, with
some modification to handle object references.
The encoding supports basic types and data structures. Everything else will
be encoded as an opaque object reference.
The basic encoding algorithm involves recursively iterating over the members of
each container type. For each object traversed, the encoder adds the object
to an internal cache. If an object is seen again, it is encoded as a pointer
to the previous occurrence. The decoder traverses the encoding in the same
order and maintains a similar cache which is used to translate cache pointers.
This simple algorithm runs into trouble when dealing with tuples containing
circular references. During decoding, a tuple's members must be decoded
before the tuple can be created, since the tuple is immutable after
instantiation. But this would create a problem if we encounter a cache pointer
to the tuple before the tuple had been instantiated.
To resolve this issue, we must modify the simple preorder traversal initially
described. When we encounter a mutable collection (list or dict)*, we
initially create an empty placeholder for the collection and come back to it
later once we have traversed all other reachable objects. This ensures that by
the time we encounter a cache pointer the cached object is guaranteed to be
initialized.
For example, suppose we have the following tuple `T`::
T = ([T], 1)
When encoding, we initially add `T` to the cache. Then we encounter the list
`[T]`. We create a placeholder in the encoding and remember it for later.
Next we encode `1`. Finally, we return to `[T]` and recur into the list.
Since `T` is in the cache, we encode the nested occurrence as a cache pointer.
When decoding, we begin decoding the elements of `T`. When we get to the
encoding of `[T]`, we create an empty list and continue, remembering our place
for later. Then we decode `1` and initialize `T` to `([], 1)`, storing it in
the cache. Next we return to the encoded list update the list with its decoded
contents. When we get to the cache reference for `T`, we can look it up in the
cache with no problems since it has already been initialized.
*It is not necessary to do the placeholder procedure for sets, even though they
are mutable, because any circularly-referential data structure must contain a
mutable object, which makes it unhashable.
"""
# Possible improvements:
#
# - Is the complexity really worth it? How likely are circular references
# anyway?
#
# - Could detect and error out instead, or document as a limitation
#
# - Current algorithm requires encoding/decoding to occur in consistent order
# within session. Can we avoid this?
#
# - Is there a way to avoid incurring the costs of caching when not needed?
# maybe a two-pass algorithm that checks before encoding?
import base64
import collections
import sys
PYTHON_VERSION = sys.version_info[0]
if PYTHON_VERSION == 2:
_long = long # noqa
_bytes = str
_unicode = unicode # noqa
_range = xrange # noqa
_items = dict.iteritems
elif PYTHON_VERSION == 3:
_long = int
_bytes = bytes
_unicode = str
_range = range
_items = dict.items
else:
raise Exception("Unsupported Python version: {}".format(PYTHON_VERSION))
_value_types = frozenset({
type(None), type(NotImplemented), type(Ellipsis), bool, int, _long, float,
complex, _bytes, _unicode, bytearray, _range
})
_container_types = frozenset({slice, list, tuple, set, frozenset, dict})
_supported_types = _value_types | _container_types
class EncodingDepth(object):
""" Common values for encoding depth. """
REF = 0 # Encode as a reference
SHALLOW = 1 # Convert top-level object to value
DEEP = -1 # Recursively convert object to value
class BaseEncodingSession(object):
""" Base encoder for Python 2 client and server. """
def __init__(self):
self.session = {}
self.deferred = collections.deque()
def encode(self, obj, depth=EncodingDepth.DEEP):
""" Encode an object. """
data = self._enc(obj, depth)
while self.deferred:
self.deferred.popleft()()
return data
def _enc(self, obj, depth):
t = type(obj)
if depth and any(t is s for s in _supported_types):
if t in _container_types:
# For container types, we include the depth in the cache key.
# This means that if encoding to a finite depth, a given
# container object will be encoded separately at each depth
# where it occurs.
key = id(obj), max(depth, -1)
else:
key = id(obj)
if key in self.session:
return dict(type='cached', index=self.session[key][0])
# Store cached objects to prevent garbage collection
# This ensures that ids uniquely map to objects over the life of
# the session. We can't use a WeakKeyDictionary to avoid this
# because most builtin types do not support weak references.
self.session[key] = len(self.session), obj
# Singleton objects
if obj is None:
return dict(type='None')
elif obj is NotImplemented:
return dict(type='NotImplemented')
elif obj is Ellipsis:
return dict(type='Ellipsis')
# Numerical types
elif t is bool:
return dict(type='bool', value=obj)
elif t is int or t is _long:
return dict(type='int', value=obj)
elif t is float:
return dict(type='float', value=obj)
elif t is complex:
return dict(type='complex', real=obj.real, imag=obj.imag)
# String types
elif t is _bytes:
return self._enc_bdata('bytes', obj)
elif t is _unicode:
return self._enc_bdata('unicode', obj.encode('utf8'))
elif t is bytearray:
return self._enc_bdata('bytearray', obj)
# Range and slice
elif t is _range:
return self._enc_range(obj)
elif t is slice:
return dict(type='slice',
start=self._enc(obj.start, depth-1),
stop=self._enc(obj.stop, depth-1),
step=self._enc(obj.step, depth-1))
# Container types
elif t is list:
d = dict(type='list', items=Placeholder)
self.deferred.append(
lambda: d.update(items=self._enc_items(obj, depth-1)))
return d
elif t is tuple:
return dict(type='tuple', items=self._enc_items(obj, depth-1))
elif t is set:
return dict(type='set', items=self._enc_items(obj, depth-1))
elif t is frozenset:
return dict(type='frozenset',
items=self._enc_items(obj, depth-1))
elif t is dict:
d = dict(type='dict', items=Placeholder)
self.deferred.append(
lambda: d.update(items=[self._enc_kv(key, value, depth-1)
for key, value in _items(obj)]))
return d
else:
# Should never happen
raise AssertionError("Unexpected type: {}".format(t.__name__))
# Encode as reference
return self._enc_ref(obj)
def _enc_bdata(self, type_, data):
""" Encode binary data. """
return dict(type=type_, data=base64.b64encode(data).decode('ascii'))
def _enc_items(self, itr, depth):
""" Encode a collection of items. """
return [self._enc(item, depth) for item in itr]
if PYTHON_VERSION == 2:
def _enc_range(self, range_):
""" Encode a range object. """
start, stop, step = range_.__reduce__()[1]
return dict(type='range', start=start, stop=stop, step=step)
else:
def _enc_range(self, range_):
""" Encode a range object. """
return dict(type='range', start=range_.start, stop=range_.stop,
step=range_.step)
def _enc_kv(self, key, value, depth):
""" Encode a dict key-value pair. """
return dict(key=self._enc(key, depth), value=self._enc(value, depth))
def _enc_ref(self, obj):
""" Encode an object as a reference. """
# Implemented by client/server subclasses
raise NotImplemented()
class BaseDecodingSession(object):
""" Base decoder for Python 2 client and server. """
def __init__(self):
self.session = []
self.deferred = collections.deque()
def decode(self, data):
obj = self._dec(data)
while self.deferred:
self.deferred.popleft()()
return obj
def _dec(self, data):
""" Decode an encoded object. """
dtype = data['type']
if dtype == 'ref':
return self._dec_ref(data)
if dtype == 'cached':
assert self.session[data['index']] is not Placeholder
return self.session[data['index']]
cache_index = len(self.session)
self.session.append(Placeholder)
def _cache(obj):
self.session[cache_index] = obj
return obj
# Singleton objects
if dtype == 'None':
return _cache(None)
elif dtype == 'NotImplemented':
return _cache(NotImplemented)
elif dtype == 'Ellipsis':
return _cache(Ellipsis)
# Numeric types
elif dtype in ('bool', 'int', 'float'):
return _cache(data['value'])
elif dtype == 'complex':
return _cache(complex(real=data['real'], imag=data['imag']))
# String types
elif dtype == 'bytes':
return _cache(self._dec_bdata(data))
elif dtype == 'unicode':
return _cache(self._dec_bdata(data).decode('utf8'))
elif dtype == 'bytearray':
return _cache(bytearray(self._dec_bdata(data)))
# Range and slice
elif dtype == 'range':
return _cache(_range(data['start'], data['stop'], data['step']))
elif dtype == 'slice':
return _cache(slice(self._dec(data['start']),
self._dec(data['stop']),
self._dec(data['step'])))
# Container types
elif dtype == 'list':
lst = _cache([])
self.deferred.append(lambda: lst.extend(self._dec_items(data)))
return lst
elif dtype == 'tuple':
return _cache(tuple(self._dec_items(data)))
elif dtype == 'set':
return _cache(set(self._dec_items(data)))
elif dtype == 'frozenset':
return _cache(frozenset(self._dec_items(data)))
elif dtype == 'dict':
dct = _cache({})
self.deferred.append(
lambda: dct.update(self._dec_dict_items(data)))
return dct
else:
raise TypeError("Invalid data type: {}".format(dtype.__name__))
def _dec_bdata(self, data):
return base64.b64decode(data['data'].encode('ascii'))
def _dec_items(self, data):
return (self._dec(item) for item in data['items'])
def _dec_dict_items(self, data):
return ((self._dec(kv['key']), self._dec(kv['value']))
for kv in data['items'])
def _dec_ref(self, data):
""" Decode an object reference. """
# Implemented by client/server subclasses
raise NotImplemented()
class PlaceholderType(object):
"""
Type for a singleton object to be used as a placeholder.
"""
__slots__ = ()
__hash__ = None # Should not be used as a dict key or set member
Placeholder = PlaceholderType()
| mit | -4,245,268,213,380,566,000 | 35.384384 | 79 | 0.594503 | false |
encukou/cython | Cython/Plex/Lexicons.py | 8 | 6915 | #=======================================================================
#
# Python Lexical Analyser
#
# Lexical Analyser Specification
#
#=======================================================================
from __future__ import absolute_import
import types
from . import Actions
from . import DFA
from . import Errors
from . import Machines
from . import Regexps
# debug_flags for Lexicon constructor
DUMP_NFA = 1
DUMP_DFA = 2
class State(object):
"""
This class is used as part of a Plex.Lexicon specification to
introduce a user-defined state.
Constructor:
State(name, token_specifications)
"""
name = None
tokens = None
def __init__(self, name, tokens):
self.name = name
self.tokens = tokens
class Lexicon(object):
"""
Lexicon(specification) builds a lexical analyser from the given
|specification|. The specification consists of a list of
specification items. Each specification item may be either:
1) A token definition, which is a tuple:
(pattern, action)
The |pattern| is a regular axpression built using the
constructors defined in the Plex module.
The |action| is the action to be performed when this pattern
is recognised (see below).
2) A state definition:
State(name, tokens)
where |name| is a character string naming the state,
and |tokens| is a list of token definitions as
above. The meaning and usage of states is described
below.
Actions
-------
The |action| in a token specication may be one of three things:
1) A function, which is called as follows:
function(scanner, text)
where |scanner| is the relevant Scanner instance, and |text|
is the matched text. If the function returns anything
other than None, that value is returned as the value of the
token. If it returns None, scanning continues as if the IGNORE
action were specified (see below).
2) One of the following special actions:
IGNORE means that the recognised characters will be treated as
white space and ignored. Scanning will continue until
the next non-ignored token is recognised before returning.
TEXT causes the scanned text itself to be returned as the
value of the token.
3) Any other value, which is returned as the value of the token.
States
------
At any given time, the scanner is in one of a number of states.
Associated with each state is a set of possible tokens. When scanning,
only tokens associated with the current state are recognised.
There is a default state, whose name is the empty string. Token
definitions which are not inside any State definition belong to
the default state.
The initial state of the scanner is the default state. The state can
be changed in one of two ways:
1) Using Begin(state_name) as the action of a token.
2) Calling the begin(state_name) method of the Scanner.
To change back to the default state, use '' as the state name.
"""
machine = None # Machine
tables = None # StateTableMachine
def __init__(self, specifications, debug=None, debug_flags=7, timings=None):
if type(specifications) != types.ListType:
raise Errors.InvalidScanner("Scanner definition is not a list")
if timings:
from .Timing import time
total_time = 0.0
time1 = time()
nfa = Machines.Machine()
default_initial_state = nfa.new_initial_state('')
token_number = 1
for spec in specifications:
if isinstance(spec, State):
user_initial_state = nfa.new_initial_state(spec.name)
for token in spec.tokens:
self.add_token_to_machine(
nfa, user_initial_state, token, token_number)
token_number += 1
elif type(spec) == types.TupleType:
self.add_token_to_machine(
nfa, default_initial_state, spec, token_number)
token_number += 1
else:
raise Errors.InvalidToken(
token_number,
"Expected a token definition (tuple) or State instance")
if timings:
time2 = time()
total_time = total_time + (time2 - time1)
time3 = time()
if debug and (debug_flags & 1):
debug.write("\n============= NFA ===========\n")
nfa.dump(debug)
dfa = DFA.nfa_to_dfa(nfa, debug=(debug_flags & 3) == 3 and debug)
if timings:
time4 = time()
total_time = total_time + (time4 - time3)
if debug and (debug_flags & 2):
debug.write("\n============= DFA ===========\n")
dfa.dump(debug)
if timings:
timings.write("Constructing NFA : %5.2f\n" % (time2 - time1))
timings.write("Converting to DFA: %5.2f\n" % (time4 - time3))
timings.write("TOTAL : %5.2f\n" % total_time)
self.machine = dfa
def add_token_to_machine(self, machine, initial_state, token_spec, token_number):
try:
(re, action_spec) = self.parse_token_definition(token_spec)
# Disabled this -- matching empty strings can be useful
#if re.nullable:
# raise Errors.InvalidToken(
# token_number, "Pattern can match 0 input symbols")
if isinstance(action_spec, Actions.Action):
action = action_spec
else:
try:
action_spec.__call__
except AttributeError:
action = Actions.Return(action_spec)
else:
action = Actions.Call(action_spec)
final_state = machine.new_state()
re.build_machine(machine, initial_state, final_state,
match_bol=1, nocase=0)
final_state.set_action(action, priority=-token_number)
except Errors.PlexError, e:
raise e.__class__("Token number %d: %s" % (token_number, e))
def parse_token_definition(self, token_spec):
if type(token_spec) != types.TupleType:
raise Errors.InvalidToken("Token definition is not a tuple")
if len(token_spec) != 2:
raise Errors.InvalidToken("Wrong number of items in token definition")
pattern, action = token_spec
if not isinstance(pattern, Regexps.RE):
raise Errors.InvalidToken("Pattern is not an RE instance")
return (pattern, action)
def get_initial_state(self, name):
return self.machine.get_initial_state(name)
| apache-2.0 | -5,717,951,326,010,610,000 | 33.575 | 85 | 0.577007 | false |
wmbutler/courtlistener | alert/userHandling/migrations/0002_auto.py | 2 | 13264 | # -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding M2M table for field donation on 'UserProfile'
db.create_table('UserProfile_donation', (
('id', models.AutoField(verbose_name='ID', primary_key=True, auto_created=True)),
('userprofile', models.ForeignKey(orm['userHandling.userprofile'], null=False)),
('donation', models.ForeignKey(orm['donate.donation'], null=False))
))
db.create_unique('UserProfile_donation', ['userprofile_id', 'donation_id'])
def backwards(self, orm):
# Removing M2M table for field donation on 'UserProfile'
db.delete_table('UserProfile_donation')
models = {
'alerts.alert': {
'Meta': {'ordering': "['alertFrequency', 'alertText']", 'object_name': 'Alert', 'db_table': "'Alert'"},
'alertFrequency': ('django.db.models.fields.CharField', [], {'max_length': '10'}),
'alertName': ('django.db.models.fields.CharField', [], {'max_length': '75'}),
'alertText': ('django.db.models.fields.CharField', [], {'max_length': '2500'}),
'alertUUID': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'lastHitDate': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'sendNegativeAlert': ('django.db.models.fields.BooleanField', [], {'default': 'False'})
},
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'donate.donation': {
'Meta': {'object_name': 'Donation'},
'amount': ('django.db.models.fields.DecimalField', [], {'max_digits': '9', 'decimal_places': '2'}),
'currency': ('django.db.models.fields.CharField', [], {'max_length': '10'}),
'date_created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'db_index': 'True', 'blank': 'True'}),
'date_modified': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'db_index': 'True', 'blank': 'True'}),
'email_address': ('django.db.models.fields.EmailField', [], {'max_length': '254'}),
'frequency': ('django.db.models.fields.CharField', [], {'max_length': '10'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'payment_provider': ('django.db.models.fields.CharField', [], {'max_length': '50'}),
'referrer': ('django.db.models.fields.TextField', [], {}),
'renew_annually': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'total': ('django.db.models.fields.DecimalField', [], {'max_digits': '10', 'decimal_places': '2'})
},
'favorites.favorite': {
'Meta': {'object_name': 'Favorite', 'db_table': "'Favorite'"},
'doc_id': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['search.Document']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'notes': ('django.db.models.fields.TextField', [], {'max_length': '500', 'blank': 'True'})
},
'search.citation': {
'Meta': {'object_name': 'Citation', 'db_table': "'Citation'"},
'case_name': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'citationUUID': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'docket_number': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'lexis_cite': ('django.db.models.fields.CharField', [], {'max_length': '50', 'null': 'True', 'blank': 'True'}),
'neutral_cite': ('django.db.models.fields.CharField', [], {'max_length': '50', 'null': 'True', 'blank': 'True'}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '50', 'null': 'True'}),
'west_cite': ('django.db.models.fields.CharField', [], {'max_length': '50', 'null': 'True', 'blank': 'True'}),
'west_state_cite': ('django.db.models.fields.CharField', [], {'max_length': '50', 'null': 'True', 'blank': 'True'})
},
'search.court': {
'Meta': {'ordering': "['position']", 'object_name': 'Court', 'db_table': "'Court'"},
'URL': ('django.db.models.fields.URLField', [], {'max_length': '200'}),
'citation_string': ('django.db.models.fields.CharField', [], {'max_length': '100', 'blank': 'True'}),
'courtUUID': ('django.db.models.fields.CharField', [], {'max_length': '15', 'primary_key': 'True'}),
'end_date': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'full_name': ('django.db.models.fields.CharField', [], {'max_length': "'200'"}),
'in_use': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'jurisdiction': ('django.db.models.fields.CharField', [], {'max_length': '3'}),
'notes': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'position': ('django.db.models.fields.FloatField', [], {'unique': 'True', 'null': 'True', 'db_index': 'True'}),
'short_name': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'start_date': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'})
},
'search.document': {
'Meta': {'object_name': 'Document', 'db_table': "'Document'"},
'blocked': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'db_index': 'True'}),
'cases_cited': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': "'citing_cases'", 'null': 'True', 'symmetrical': 'False', 'to': "orm['search.Citation']"}),
'citation': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['search.Citation']", 'null': 'True', 'blank': 'True'}),
'citation_count': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'court': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['search.Court']"}),
'date_blocked': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'date_filed': ('django.db.models.fields.DateField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'}),
'documentUUID': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'download_URL': ('django.db.models.fields.URLField', [], {'max_length': '200', 'db_index': 'True'}),
'extracted_by_ocr': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'html': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'html_with_citations': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'judges': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'local_path': ('django.db.models.fields.files.FileField', [], {'db_index': 'True', 'max_length': '100', 'blank': 'True'}),
'nature_of_suit': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'plain_text': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'precedential_status': ('django.db.models.fields.CharField', [], {'max_length': '50', 'blank': 'True'}),
'sha1': ('django.db.models.fields.CharField', [], {'max_length': '40', 'db_index': 'True'}),
'source': ('django.db.models.fields.CharField', [], {'max_length': '3', 'blank': 'True'}),
'time_retrieved': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'db_index': 'True', 'blank': 'True'})
},
'userHandling.barmembership': {
'Meta': {'ordering': "['barMembership']", 'object_name': 'BarMembership', 'db_table': "'BarMembership'"},
'barMembership': ('django.contrib.localflavor.us.models.USStateField', [], {'max_length': '2'}),
'barMembershipUUID': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'})
},
'userHandling.userprofile': {
'Meta': {'object_name': 'UserProfile', 'db_table': "'UserProfile'"},
'activationKey': ('django.db.models.fields.CharField', [], {'max_length': '40'}),
'alert': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'to': "orm['alerts.Alert']", 'null': 'True', 'blank': 'True'}),
'avatar': ('django.db.models.fields.files.ImageField', [], {'max_length': '100', 'blank': 'True'}),
'barmembership': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'to': "orm['userHandling.BarMembership']", 'null': 'True', 'blank': 'True'}),
'donation': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['donate.Donation']", 'symmetrical': 'False'}),
'emailConfirmed': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'employer': ('django.db.models.fields.CharField', [], {'max_length': '100', 'blank': 'True'}),
'favorite': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': "'users'", 'null': 'True', 'symmetrical': 'False', 'to': "orm['favorites.Favorite']"}),
'key_expires': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'location': ('django.db.models.fields.CharField', [], {'max_length': '100', 'blank': 'True'}),
'plaintextPreferred': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']", 'unique': 'True'}),
'userProfileUUID': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'wantsNewsletter': ('django.db.models.fields.BooleanField', [], {'default': 'False'})
}
}
complete_apps = ['userHandling'] | agpl-3.0 | 8,443,363,478,626,818,000 | 80.882716 | 208 | 0.555187 | false |
wardweistra/hipfrog | glassfrog/functions/apiCalls.py | 2 | 5598 | from flask import json, url_for
import requests
from .messageFunctions import createMessageDict
import glassfrog.strings as strings
class GlassfrogApiHandler(object):
def __init__(self):
pass
def glassfrogApiCall(self, apiEndpoint, glassfrogToken):
headers = {'X-Auth-Token': glassfrogToken}
apiUrl = 'https://glassfrog.holacracy.org/api/v3/'+apiEndpoint
apiResponse = requests.get(apiUrl, headers=headers)
code = apiResponse.status_code
responsebody = json.loads(apiResponse.text)
return code, responsebody
def getCircleForCircleId(self, circleId, glassfrogToken):
apiEndpoint = 'circles/{}'.format(circleId)
code, responsebody = self.glassfrogApiCall(apiEndpoint, glassfrogToken)
return code, responsebody
class HipchatApiHandler(object):
def __init__(self):
pass
def getCapabilitiesData(self, capabilitiesUrl):
return json.loads(requests.get(capabilitiesUrl).text)
def getTokenData(self, tokenUrl, client_auth, post_data):
return json.loads(requests.post(tokenUrl, auth=client_auth, data=post_data).text)
def sendMessage(self, color, message, installation):
messageUrl = '{}/room/{}/notification'.format(installation.hipchatApiProvider_url,
installation.roomId)
token_header = {"Authorization": "Bearer "+installation.access_token}
data = createMessageDict(color, message)
messageresponse = requests.post(messageUrl,
headers=token_header,
data=data)
def getRoomMembers(self, installation):
token_header = {"Authorization": "Bearer "+installation.access_token}
requestUrl = '{}/room/{}'.format(installation.hipchatApiProvider_url,
installation.roomId)
messageresponse = requests.get(requestUrl, headers=token_header)
if messageresponse.status_code != 200:
return messageresponse.status_code, json.loads(messageresponse.text)
privacy = json.loads(messageresponse.text)['privacy']
if privacy == 'public':
requestUrl = '{}/room/{}/participant'.format(installation.hipchatApiProvider_url,
installation.roomId)
elif privacy == 'private':
requestUrl = '{}/room/{}/member'.format(installation.hipchatApiProvider_url,
installation.roomId)
messageresponse = requests.get(requestUrl, headers=token_header)
return messageresponse.status_code, json.loads(messageresponse.text)
def getCapabilitiesDict(publicUrl):
capabilities_dict = \
{
"name": "HipFrog",
"description": "A Hipchat bot for accessing Glassfrog",
"key": "hipfrog",
"links": {
"homepage": publicUrl,
"self": publicUrl+"/capabilities.json"
},
"vendor": {
"name": "The Hyve",
"url": "https://www.thehyve.nl/"
},
"capabilities": {
"hipchatApiConsumer": {
"fromName": "HipFrog",
"scopes": [
"send_notification",
"view_room"
],
"avatar": {
"url": publicUrl+'/static/hipfrog.png',
"url@2x": publicUrl+'/static/hipfrog.png'
}
},
"installable": {
"allowGlobal": False,
"allowRoom": True,
"callbackUrl": publicUrl+"/installed"
},
"webhook": [
{
"event": "room_message",
"pattern": strings.regex_hipfrog,
"url": publicUrl+"/hipfrog",
"name": "Hipfrog webhook",
"authentication": "jwt"
},
{
"event": "room_message",
"pattern": strings.regex_at_role,
"url": publicUrl+"/atrole",
"name": "At Role webhook",
"authentication": "jwt"
},
{
"event": "room_message",
"pattern": strings.regex_at_circle,
"url": publicUrl+"/atcircle",
"name": "At Circle webhook",
"authentication": "jwt"
},
{
"event": "room_message",
"pattern": strings.regex_slash_role,
"url": publicUrl+"/slashrole",
"name": "Slash Role webhook",
"authentication": "jwt"
},
{
"event": "room_message",
"pattern": strings.regex_slash_circle,
"url": publicUrl+"/slashcircle",
"name": "Slash Circle webhook",
"authentication": "jwt"
}
],
"configurable": {
"url": publicUrl+"/configure.html"
}
}
}
return capabilities_dict
| lgpl-3.0 | -945,431,130,803,103,400 | 38.702128 | 93 | 0.483208 | false |
treycausey/scikit-learn | sklearn/externals/joblib/test/test_pool.py | 5 | 12733 | import os
import shutil
import tempfile
from nose import SkipTest
from nose.tools import with_setup
from nose.tools import assert_equal
from nose.tools import assert_raises
from nose.tools import assert_false
from nose.tools import assert_true
from .common import with_numpy, np
from .common import setup_autokill
from .common import teardown_autokill
from .._multiprocessing import mp
if mp is not None:
from ..pool import MemmapingPool
from ..pool import has_shareable_memory
from ..pool import ArrayMemmapReducer
from ..pool import reduce_memmap
TEMP_FOLDER = None
def setup_module():
setup_autokill(__name__, timeout=30)
def teardown_module():
teardown_autokill(__name__)
def check_multiprocessing():
if mp is None:
raise SkipTest('Need multiprocessing to run')
with_multiprocessing = with_setup(check_multiprocessing)
def setup_temp_folder():
global TEMP_FOLDER
TEMP_FOLDER = tempfile.mkdtemp(prefix='joblib_test_pool_')
def teardown_temp_folder():
global TEMP_FOLDER
if TEMP_FOLDER is not None:
shutil.rmtree(TEMP_FOLDER)
TEMP_FOLDER = None
with_temp_folder = with_setup(setup_temp_folder, teardown_temp_folder)
def double(input):
"""Dummy helper function to be executed in subprocesses"""
assert_array_equal = np.testing.assert_array_equal
data, position, expected = input
if expected is not None:
assert_equal(data[position], expected)
data[position] *= 2
if expected is not None:
assert_array_equal(data[position], 2 * expected)
@with_numpy
@with_multiprocessing
@with_temp_folder
def test_memmap_based_array_reducing():
"""Check that it is possible to reduce a memmap backed array"""
assert_array_equal = np.testing.assert_array_equal
filename = os.path.join(TEMP_FOLDER, 'test.mmap')
# Create a file larger than what will be used by a
buffer = np.memmap(filename, dtype=np.float64, shape=500, mode='w+')
# Fill the original buffer with negative markers to detect over of
# underflow in case of test failures
buffer[:] = - 1.0 * np.arange(buffer.shape[0], dtype=buffer.dtype)
buffer.flush()
# Memmap a 2D fortran array on a offseted subsection of the previous
# buffer
a = np.memmap(filename, dtype=np.float64, shape=(3, 5, 4),
mode='r+', order='F', offset=4)
a[:] = np.arange(60).reshape(a.shape)
# Build various views that share the buffer with the original memmap
# b is an memmap sliced view on an memmap instance
b = a[1:-1, 2:-1, 2:4]
# c and d are array views
c = np.asarray(b)
d = c.T
# Array reducer with auto dumping disabled
reducer = ArrayMemmapReducer(None, TEMP_FOLDER, 'c')
def reconstruct_array(x):
cons, args = reducer(x)
return cons(*args)
def reconstruct_memmap(x):
cons, args = reduce_memmap(x)
return cons(*args)
# Reconstruct original memmap
a_reconstructed = reconstruct_memmap(a)
assert_true(has_shareable_memory(a_reconstructed))
assert_true(isinstance(a_reconstructed, np.memmap))
assert_array_equal(a_reconstructed, a)
# Reconstruct strided memmap view
b_reconstructed = reconstruct_memmap(b)
assert_true(has_shareable_memory(b_reconstructed))
assert_array_equal(b_reconstructed, b)
# Reconstruct arrays views on memmap base
c_reconstructed = reconstruct_array(c)
assert_false(isinstance(c_reconstructed, np.memmap))
assert_true(has_shareable_memory(c_reconstructed))
assert_array_equal(c_reconstructed, c)
d_reconstructed = reconstruct_array(d)
assert_false(isinstance(d_reconstructed, np.memmap))
assert_true(has_shareable_memory(d_reconstructed))
assert_array_equal(d_reconstructed, d)
# Test graceful degradation on fake memmap instances with in-memory
# buffers
a3 = a * 3
assert_false(has_shareable_memory(a3))
a3_reconstructed = reconstruct_memmap(a3)
assert_false(has_shareable_memory(a3_reconstructed))
assert_false(isinstance(a3_reconstructed, np.memmap))
assert_array_equal(a3_reconstructed, a * 3)
# Test graceful degradation on arrays derived from fake memmap instances
b3 = np.asarray(a3)
assert_false(has_shareable_memory(b3))
b3_reconstructed = reconstruct_array(b3)
assert_true(isinstance(b3_reconstructed, np.ndarray))
assert_false(has_shareable_memory(b3_reconstructed))
assert_array_equal(b3_reconstructed, b3)
@with_numpy
@with_multiprocessing
@with_temp_folder
def test_pool_with_memmap():
"""Check that subprocess can access and update shared memory memmap"""
assert_array_equal = np.testing.assert_array_equal
# Fork the subprocess before allocating the objects to be passed
pool_temp_folder = os.path.join(TEMP_FOLDER, 'pool')
os.makedirs(pool_temp_folder)
p = MemmapingPool(10, max_nbytes=2, temp_folder=pool_temp_folder)
try:
filename = os.path.join(TEMP_FOLDER, 'test.mmap')
a = np.memmap(filename, dtype=np.float32, shape=(3, 5), mode='w+')
a.fill(1.0)
p.map(double, [(a, (i, j), 1.0)
for i in range(a.shape[0])
for j in range(a.shape[1])])
assert_array_equal(a, 2 * np.ones(a.shape))
# Open a copy-on-write view on the previous data
b = np.memmap(filename, dtype=np.float32, shape=(5, 3), mode='c')
p.map(double, [(b, (i, j), 2.0)
for i in range(b.shape[0])
for j in range(b.shape[1])])
# Passing memmap instances to the pool should not trigger the creation
# of new files on the FS
assert_equal(os.listdir(pool_temp_folder), [])
# the original data is untouched
assert_array_equal(a, 2 * np.ones(a.shape))
assert_array_equal(b, 2 * np.ones(b.shape))
# readonly maps can be read but not updated
c = np.memmap(filename, dtype=np.float32, shape=(10,), mode='r',
offset=5 * 4)
assert_raises(AssertionError, p.map, double,
[(c, i, 3.0) for i in range(c.shape[0])])
# depending on the version of numpy one can either get a RuntimeError
# or a ValueError
assert_raises((RuntimeError, ValueError), p.map, double,
[(c, i, 2.0) for i in range(c.shape[0])])
finally:
# Clean all filehandlers held by the pool
p.terminate()
del p
@with_numpy
@with_multiprocessing
@with_temp_folder
def test_pool_with_memmap_array_view():
"""Check that subprocess can access and update shared memory array"""
assert_array_equal = np.testing.assert_array_equal
# Fork the subprocess before allocating the objects to be passed
pool_temp_folder = os.path.join(TEMP_FOLDER, 'pool')
os.makedirs(pool_temp_folder)
p = MemmapingPool(10, max_nbytes=2, temp_folder=pool_temp_folder)
try:
filename = os.path.join(TEMP_FOLDER, 'test.mmap')
a = np.memmap(filename, dtype=np.float32, shape=(3, 5), mode='w+')
a.fill(1.0)
# Create an ndarray view on the memmap instance
a_view = np.asarray(a)
assert_false(isinstance(a_view, np.memmap))
assert_true(has_shareable_memory(a_view))
p.map(double, [(a_view, (i, j), 1.0)
for i in range(a.shape[0])
for j in range(a.shape[1])])
# Both a and the a_view have been updated
assert_array_equal(a, 2 * np.ones(a.shape))
assert_array_equal(a_view, 2 * np.ones(a.shape))
# Passing memmap array view to the pool should not trigger the
# creation of new files on the FS
assert_equal(os.listdir(pool_temp_folder), [])
finally:
p.terminate()
del p
@with_numpy
@with_multiprocessing
@with_temp_folder
def test_memmaping_pool_for_large_arrays():
"""Check that large arrays are not copied in memory"""
assert_array_equal = np.testing.assert_array_equal
# Check that the tempfolder is empty
assert_equal(os.listdir(TEMP_FOLDER), [])
# Build an array reducers that automaticaly dump large array content
# to filesystem backed memmap instances to avoid memory explosion
p = MemmapingPool(3, max_nbytes=40, temp_folder=TEMP_FOLDER)
try:
# The tempory folder for the pool is not provisioned in advance
assert_equal(os.listdir(TEMP_FOLDER), [])
assert_false(os.path.exists(p._temp_folder))
small = np.ones(5, dtype=np.float32)
assert_equal(small.nbytes, 20)
p.map(double, [(small, i, 1.0) for i in range(small.shape[0])])
# Memory has been copied, the pool filesystem folder is unused
assert_equal(os.listdir(TEMP_FOLDER), [])
# Try with a file larger than the memmap threshold of 40 bytes
large = np.ones(100, dtype=np.float64)
assert_equal(large.nbytes, 800)
p.map(double, [(large, i, 1.0) for i in range(large.shape[0])])
# By defaul, the mmap_mode is copy-on-write to make the pool
# process able to modify their view individually as if they would have
# received their own copy of the original array. The original array
# (which is not a shared memmap instance is untouched)
assert_false(has_shareable_memory(large))
assert_array_equal(large, np.ones(100))
# The data has been dump in a temp folder for subprocess to share it
# without per-child memory copies
assert_true(os.path.isdir(p._temp_folder))
dumped_filenames = os.listdir(p._temp_folder)
assert_equal(len(dumped_filenames), 2)
finally:
# check FS garbage upon pool termination
p.terminate()
assert_false(os.path.exists(p._temp_folder))
del p
@with_numpy
@with_multiprocessing
@with_temp_folder
def test_memmaping_pool_for_large_arrays_disabled():
"""Check that large arrays memmaping can be disabled"""
# Set max_nbytes to None to disable the auto memmaping feature
p = MemmapingPool(3, max_nbytes=None, temp_folder=TEMP_FOLDER)
try:
# Check that the tempfolder is empty
assert_equal(os.listdir(TEMP_FOLDER), [])
# Try with a file largish than the memmap threshold of 40 bytes
large = np.ones(100, dtype=np.float64)
assert_equal(large.nbytes, 800)
p.map(double, [(large, i, 1.0) for i in range(large.shape[0])])
# Check that the tempfolder is still empty
assert_equal(os.listdir(TEMP_FOLDER), [])
finally:
# Cleanup open file descriptors
p.terminate()
del p
@with_numpy
@with_multiprocessing
@with_temp_folder
def test_memmaping_pool_for_large_arrays_in_return():
"""Check that large arrays are not copied in memory in return"""
assert_array_equal = np.testing.assert_array_equal
# Build an array reducers that automaticaly dump large array content
# but check that the returned datastructure are regular arrays to avoid
# passing a memmap array pointing to a pool controlled temp folder that
# might be confusing to the user
# The MemmapingPool user can always return numpy.memmap object explicitly
# to avoid memory copy
p = MemmapingPool(3, max_nbytes=10, temp_folder=TEMP_FOLDER)
try:
res = p.apply_async(np.ones, args=(1000,))
large = res.get()
assert_false(has_shareable_memory(large))
assert_array_equal(large, np.ones(1000))
finally:
p.terminate()
del p
def _worker_multiply(a, n_times):
"""Multiplication function to be executed by subprocess"""
assert_true(has_shareable_memory(a))
return a * n_times
@with_numpy
@with_multiprocessing
@with_temp_folder
def test_workaround_against_bad_memmap_with_copied_buffers():
"""Check that memmaps with a bad buffer are returned as regular arrays
Unary operations and ufuncs on memmap instances return a new memmap
instance with an in-memory buffer (probably a numpy bug).
"""
assert_array_equal = np.testing.assert_array_equal
p = MemmapingPool(3, max_nbytes=10, temp_folder=TEMP_FOLDER)
try:
# Send a complex, large-ish view on a array that will be converted to
# a memmap in the worker process
a = np.asarray(np.arange(6000).reshape((1000, 2, 3)),
order='F')[:, :1, :]
# Call a non-inplace multiply operation on the worker and memmap and
# send it back to the parent.
b = p.apply_async(_worker_multiply, args=(a, 3)).get()
assert_false(has_shareable_memory(b))
assert_array_equal(b, 3 * a)
finally:
p.terminate()
del p
| bsd-3-clause | 4,695,593,111,768,645,000 | 32.774536 | 78 | 0.657975 | false |
karulont/combopt | project4/schedule.py | 1 | 1052 | import json
from data import read_instance_from_file
from gurobipy import *
from sys import argv
print("Reading file...")
inst = read_instance_from_file(argv[1] if len(argv) > 1 else "rnd-6000.sch")
costs = inst[0]
precedence = inst[1]
n = len(costs)
print("Number of tasks: " + str(n))
m = Model('schedule')
# Start time variables
start = {}
for i in range(0,n):
start[i] = m.addVar(obj=1, name='start_%s' % i)
m.update()
# Precedence constraints
for p in precedence:
i1 = p[0]
i2 = p[1]
m.addConstr(start[i1] + costs[i1], GRB.LESS_EQUAL, start[i2],
'cap_%s_%s' % (i1,i2))
m.optimize()
# Print solution
if m.status == GRB.status.OPTIMAL:
solution = m.getAttr('x', start)
s = []
max_endtime = -1
for i in range(n):
s.append(solution[i])
if solution[i] + costs[i] > max_endtime:
max_endtime = solution[i] + costs[i]
print('max: %s' % max_endtime)
with open(argv[2] if len(argv) == 3 else "rnd-6000.sol", 'w') as f:
json.dump(s,f)
else:
print("No solution")
| mit | -7,187,988,382,677,800,000 | 22.377778 | 76 | 0.604563 | false |
Eagles2F/sync-engine | migrations/versions/078_events.py | 6 | 2006 | """events
Revision ID: 1c2253a0e997
Revises: 3c02d8204335
Create Date: 2014-08-07 00:12:40.148311
"""
# revision identifiers, used by Alembic.
revision = '1c2253a0e997'
down_revision = '3c74cbe7882e'
from alembic import op
import sqlalchemy as sa
def upgrade():
op.create_table(
'event',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('uid', sa.String(length=64), nullable=False),
sa.Column('provider_name', sa.String(length=64), nullable=False),
sa.Column('public_id', sa.BINARY(length=16), nullable=False),
sa.Column('raw_data', sa.Text(), nullable=False),
sa.Column('account_id', sa.Integer(), nullable=False),
sa.Column('subject', sa.String(length=255), nullable=True),
sa.Column('body', sa.Text(), nullable=True),
sa.Column('location', sa.String(length=255), nullable=True),
sa.Column('busy', sa.Boolean(), nullable=False),
sa.Column('locked', sa.Boolean(), nullable=False),
sa.Column('reminders', sa.String(length=255), nullable=True),
sa.Column('recurrence', sa.String(length=255), nullable=True),
sa.Column('start', sa.DateTime(), nullable=False),
sa.Column('end', sa.DateTime(), nullable=True),
sa.Column('all_day', sa.Boolean(), nullable=False),
sa.Column('time_zone', sa.Integer(), nullable=False),
sa.Column('source', sa.Enum('remote', 'local'), nullable=False),
sa.Column('created_at', sa.DateTime(), nullable=True),
sa.Column('updated_at', sa.DateTime(), nullable=True),
sa.Column('deleted_at', sa.DateTime(), nullable=True),
sa.ForeignKeyConstraint(['account_id'], ['account.id'],
ondelete='CASCADE'),
sa.PrimaryKeyConstraint('id'),
)
op.add_column('account', sa.Column('last_synced_events', sa.DateTime(),
nullable=True))
def downgrade():
op.drop_table('event')
op.drop_column('account', 'last_synced_events')
| agpl-3.0 | 6,082,821,760,659,040,000 | 38.333333 | 75 | 0.623629 | false |
azlanismail/prismgames | examples/games/car/networkx/algorithms/centrality/betweenness.py | 1 | 10983 | """
Betweenness centrality measures.
"""
# Copyright (C) 2004-2011 by
# Aric Hagberg <[email protected]>
# Dan Schult <[email protected]>
# Pieter Swart <[email protected]>
# All rights reserved.
# BSD license.
import heapq
import networkx as nx
import random
__author__ = """Aric Hagberg ([email protected])"""
__all__ = ['betweenness_centrality',
'edge_betweenness_centrality',
'edge_betweenness']
def betweenness_centrality(G, k=None, normalized=True, weight=None,
endpoints=False,
seed=None):
r"""Compute the shortest-path betweenness centrality for nodes.
Betweenness centrality of a node `v` is the sum of the
fraction of all-pairs shortest paths that pass through `v`:
.. math::
c_B(v) =\sum_{s,t \in V} \frac{\sigma(s, t|v)}{\sigma(s, t)}
where `V` is the set of nodes, `\sigma(s, t)` is the number of
shortest `(s, t)`-paths, and `\sigma(s, t|v)` is the number of those
paths passing through some node `v` other than `s, t`.
If `s = t`, `\sigma(s, t) = 1`, and if `v \in {s, t}`,
`\sigma(s, t|v) = 0` [2]_.
Parameters
----------
G : graph
A NetworkX graph
k : int, optional (default=None)
If k is not None use k node samples to estimate betweenness.
The value of k <= n where n is the number of nodes in the graph.
Higher values give better approximation.
normalized : bool, optional
If True the betweenness values are normalized by `2/((n-1)(n-2))`
for graphs, and `1/((n-1)(n-2))` for directed graphs where `n`
is the number of nodes in G.
weight : None or string, optional
If None, all edge weights are considered equal.
Otherwise holds the name of the edge attribute used as weight.
endpoints : bool, optional
If True include the endpoints in the shortest path counts.
Returns
-------
nodes : dictionary
Dictionary of nodes with betweenness centrality as the value.
See Also
--------
edge_betweenness_centrality
load_centrality
Notes
-----
The algorithm is from Ulrik Brandes [1]_.
See [2]_ for details on algorithms for variations and related metrics.
For approximate betweenness calculations set k=#samples to use
k nodes ("pivots") to estimate the betweenness values. For an estimate
of the number of pivots needed see [3]_.
For weighted graphs the edge weights must be greater than zero.
Zero edge weights can produce an infinite number of equal length
paths between pairs of nodes.
References
----------
.. [1] A Faster Algorithm for Betweenness Centrality.
Ulrik Brandes,
Journal of Mathematical Sociology 25(2):163-177, 2001.
http://www.inf.uni-konstanz.de/algo/publications/b-fabc-01.pdf
.. [2] Ulrik Brandes: On Variants of Shortest-Path Betweenness
Centrality and their Generic Computation.
Social Networks 30(2):136-145, 2008.
http://www.inf.uni-konstanz.de/algo/publications/b-vspbc-08.pdf
.. [3] Ulrik Brandes and Christian Pich:
Centrality Estimation in Large Networks.
International Journal of Bifurcation and Chaos 17(7):2303-2318, 2007.
http://www.inf.uni-konstanz.de/algo/publications/bp-celn-06.pdf
"""
betweenness=dict.fromkeys(G,0.0) # b[v]=0 for v in G
if k is None:
nodes = G
else:
random.seed(seed)
nodes = random.sample(G.nodes(), k)
for s in nodes:
# single source shortest paths
if weight is None: # use BFS
S,P,sigma=_single_source_shortest_path_basic(G,s)
else: # use Dijkstra's algorithm
S,P,sigma=_single_source_dijkstra_path_basic(G,s,weight)
# accumulation
if endpoints:
betweenness=_accumulate_endpoints(betweenness,S,P,sigma,s)
else:
betweenness=_accumulate_basic(betweenness,S,P,sigma,s)
# rescaling
betweenness=_rescale(betweenness, len(G),
normalized=normalized,
directed=G.is_directed(),
k=k)
return betweenness
def edge_betweenness_centrality(G,normalized=True,weight=None):
r"""Compute betweenness centrality for edges.
Betweenness centrality of an edge `e` is the sum of the
fraction of all-pairs shortest paths that pass through `e`:
.. math::
c_B(v) =\sum_{s,t \in V} \frac{\sigma(s, t|e)}{\sigma(s, t)}
where `V` is the set of nodes,`\sigma(s, t)` is the number of
shortest `(s, t)`-paths, and `\sigma(s, t|e)` is the number of
those paths passing through edge `e` [2]_.
Parameters
----------
G : graph
A NetworkX graph
normalized : bool, optional
If True the betweenness values are normalized by `2/(n(n-1))`
for graphs, and `1/(n(n-1))` for directed graphs where `n`
is the number of nodes in G.
weight : None or string, optional
If None, all edge weights are considered equal.
Otherwise holds the name of the edge attribute used as weight.
Returns
-------
edges : dictionary
Dictionary of edges with betweenness centrality as the value.
See Also
--------
betweenness_centrality
edge_load
Notes
-----
The algorithm is from Ulrik Brandes [1]_.
For weighted graphs the edge weights must be greater than zero.
Zero edge weights can produce an infinite number of equal length
paths between pairs of nodes.
References
----------
.. [1] A Faster Algorithm for Betweenness Centrality. Ulrik Brandes,
Journal of Mathematical Sociology 25(2):163-177, 2001.
http://www.inf.uni-konstanz.de/algo/publications/b-fabc-01.pdf
.. [2] Ulrik Brandes: On Variants of Shortest-Path Betweenness
Centrality and their Generic Computation.
Social Networks 30(2):136-145, 2008.
http://www.inf.uni-konstanz.de/algo/publications/b-vspbc-08.pdf
"""
betweenness=dict.fromkeys(G,0.0) # b[v]=0 for v in G
# b[e]=0 for e in G.edges()
betweenness.update(dict.fromkeys(G.edges(),0.0))
for s in G:
# single source shortest paths
if weight is None: # use BFS
S,P,sigma=_single_source_shortest_path_basic(G,s)
else: # use Dijkstra's algorithm
S,P,sigma=_single_source_dijkstra_path_basic(G,s,weight)
# accumulation
betweenness=_accumulate_edges(betweenness,S,P,sigma,s)
# rescaling
for n in G: # remove nodes to only return edges
del betweenness[n]
betweenness=_rescale_e(betweenness, len(G),
normalized=normalized,
directed=G.is_directed())
return betweenness
# obsolete name
def edge_betweenness(G,normalized=True,weight=None):
return edge_betweenness_centrality(G,normalized,weight)
# helpers for betweenness centrality
def _single_source_shortest_path_basic(G,s):
S=[]
P={}
for v in G:
P[v]=[]
sigma=dict.fromkeys(G,0.0) # sigma[v]=0 for v in G
D={}
sigma[s]=1.0
D[s]=0
Q=[s]
while Q: # use BFS to find shortest paths
v=Q.pop(0)
S.append(v)
Dv=D[v]
sigmav=sigma[v]
for w in G[v]:
if w not in D:
Q.append(w)
D[w]=Dv+1
if D[w]==Dv+1: # this is a shortest path, count paths
sigma[w] += sigmav
P[w].append(v) # predecessors
return S,P,sigma
def _single_source_dijkstra_path_basic(G,s,weight='weight'):
# modified from Eppstein
S=[]
P={}
for v in G:
P[v]=[]
sigma=dict.fromkeys(G,0.0) # sigma[v]=0 for v in G
D={}
sigma[s]=1.0
push=heapq.heappush
pop=heapq.heappop
seen = {s:0}
Q=[] # use Q as heap with (distance,node id) tuples
push(Q,(0,s,s))
while Q:
(dist,pred,v)=pop(Q)
if v in D:
continue # already searched this node.
sigma[v] += sigma[pred] # count paths
S.append(v)
D[v] = dist
for w,edgedata in G[v].items():
vw_dist = dist + edgedata.get(weight,1)
if w not in D and (w not in seen or vw_dist < seen[w]):
seen[w] = vw_dist
push(Q,(vw_dist,v,w))
sigma[w]=0.0
P[w]=[v]
elif vw_dist==seen[w]: # handle equal paths
sigma[w] += sigma[v]
P[w].append(v)
return S,P,sigma
def _accumulate_basic(betweenness,S,P,sigma,s):
delta=dict.fromkeys(S,0)
while S:
w=S.pop()
coeff=(1.0+delta[w])/sigma[w]
for v in P[w]:
delta[v] += sigma[v]*coeff
if w != s:
betweenness[w]+=delta[w]
return betweenness
def _accumulate_endpoints(betweenness,S,P,sigma,s):
betweenness[s]+=len(S)-1
delta=dict.fromkeys(S,0)
while S:
w=S.pop()
coeff=(1.0+delta[w])/sigma[w]
for v in P[w]:
delta[v] += sigma[v]*coeff
if w != s:
betweenness[w] += delta[w]+1
return betweenness
def _accumulate_edges(betweenness,S,P,sigma,s):
delta=dict.fromkeys(S,0)
while S:
w=S.pop()
coeff=(1.0+delta[w])/sigma[w]
for v in P[w]:
c=sigma[v]*coeff
if (v,w) not in betweenness:
betweenness[(w,v)]+=c
else:
betweenness[(v,w)]+=c
delta[v]+=c
if w != s:
betweenness[w]+=delta[w]
return betweenness
def _rescale(betweenness,n,normalized,directed=False,k=None):
if normalized is True:
if n <=2:
scale=None # no normalization b=0 for all nodes
else:
scale=1.0/((n-1)*(n-2))
else: # rescale by 2 for undirected graphs
if not directed:
scale=1.0/2.0
else:
scale=None
if scale is not None:
if k is not None:
scale=scale*n/k
for v in betweenness:
betweenness[v] *= scale
return betweenness
def _rescale_e(betweenness,n,normalized,directed=False):
if normalized is True:
if n <=1:
scale=None # no normalization b=0 for all nodes
else:
scale=1.0/(n*(n-1))
else: # rescale by 2 for undirected graphs
if not directed:
scale=1.0/2.0
else:
scale=None
if scale is not None:
for v in betweenness:
betweenness[v] *= scale
return betweenness
| gpl-2.0 | 6,443,564,717,491,557,000 | 30.883234 | 76 | 0.565783 | false |
yavalvas/yav_com | build/matplotlib/doc/mpl_examples/pylab_examples/fonts_demo.py | 12 | 2765 | #!/usr/bin/env python
"""
Show how to set custom font properties.
For interactive users, you can also use kwargs to the text command,
which requires less typing. See examples/fonts_demo_kw.py
"""
from matplotlib.font_manager import FontProperties
from pylab import *
subplot(111, axisbg='w')
font0 = FontProperties()
alignment = {'horizontalalignment':'center', 'verticalalignment':'baseline'}
### Show family options
family = ['serif', 'sans-serif', 'cursive', 'fantasy', 'monospace']
font1 = font0.copy()
font1.set_size('large')
t = text(-0.8, 0.9, 'family', fontproperties=font1,
**alignment)
yp = [0.7, 0.5, 0.3, 0.1, -0.1, -0.3, -0.5]
for k in range(5):
font = font0.copy()
font.set_family(family[k])
if k == 2:
font.set_name('Script MT')
t = text(-0.8, yp[k], family[k], fontproperties=font,
**alignment)
### Show style options
style = ['normal', 'italic', 'oblique']
t = text(-0.4, 0.9, 'style', fontproperties=font1,
**alignment)
for k in range(3):
font = font0.copy()
font.set_family('sans-serif')
font.set_style(style[k])
t = text(-0.4, yp[k], style[k], fontproperties=font,
**alignment)
### Show variant options
variant= ['normal', 'small-caps']
t = text(0.0, 0.9, 'variant', fontproperties=font1,
**alignment)
for k in range(2):
font = font0.copy()
font.set_family('serif')
font.set_variant(variant[k])
t = text( 0.0, yp[k], variant[k], fontproperties=font,
**alignment)
### Show weight options
weight = ['light', 'normal', 'medium', 'semibold', 'bold', 'heavy', 'black']
t = text( 0.4, 0.9, 'weight', fontproperties=font1,
**alignment)
for k in range(7):
font = font0.copy()
font.set_weight(weight[k])
t = text( 0.4, yp[k], weight[k], fontproperties=font,
**alignment)
### Show size options
size = ['xx-small', 'x-small', 'small', 'medium', 'large',
'x-large', 'xx-large']
t = text( 0.8, 0.9, 'size', fontproperties=font1,
**alignment)
for k in range(7):
font = font0.copy()
font.set_size(size[k])
t = text( 0.8, yp[k], size[k], fontproperties=font,
**alignment)
### Show bold italic
font = font0.copy()
font.set_style('italic')
font.set_weight('bold')
font.set_size('x-small')
t = text(0, 0.1, 'bold italic', fontproperties=font,
**alignment)
font = font0.copy()
font.set_style('italic')
font.set_weight('bold')
font.set_size('medium')
t = text(0, 0.2, 'bold italic', fontproperties=font,
**alignment)
font = font0.copy()
font.set_style('italic')
font.set_weight('bold')
font.set_size('x-large')
t = text(0, 0.3, 'bold italic', fontproperties=font,
**alignment)
axis([-1,1,0,1])
show()
| mit | -1,019,785,043,437,855,100 | 23.043478 | 76 | 0.609042 | false |
SOM-st/RPySOM | src/rlib/objectmodel.py | 1 | 1155 | import types
import sys
if sys.version_info.major > 2:
str_type = str
else:
str_type = (str, unicode)
try:
from rpython.rlib.objectmodel import we_are_translated, compute_identity_hash, compute_hash, instantiate
from rpython.rlib.longlong2float import longlong2float, float2longlong
except ImportError:
"NOT_RPYTHON"
def we_are_translated():
return False
def compute_identity_hash(x):
assert x is not None
return object.__hash__(x)
def compute_hash(x):
if isinstance(x, str_type):
return hash(x)
if isinstance(x, int):
return x
if isinstance(x, float):
return hash(x)
if isinstance(x, tuple):
return hash(x)
if x is None:
return 0
return compute_identity_hash(x)
def instantiate(cls, nonmovable=False):
"Create an empty instance of 'cls'."
if isinstance(cls, type):
return cls.__new__(cls)
else:
return types.InstanceType(cls)
def longlong2float(value):
return value
def float2longlong(value):
return value
| mit | 5,702,269,066,021,179,000 | 24.108696 | 108 | 0.602597 | false |
adwiputra/LUMENS-repo | processing/molusce/aboutdialog.py | 2 | 3005 | # -*- coding: utf-8 -*-
#******************************************************************************
#
# MOLUSCE
# ---------------------------------------------------------
# Modules for Land Use Change Simulations
#
# Copyright (C) 2012-2013 NextGIS ([email protected])
#
# This source is free software; you can redistribute it and/or modify it under
# the terms of the GNU General Public License as published by the Free
# Software Foundation, either version 2 of the License, or (at your option)
# any later version.
#
# This code is distributed in the hope that it will be useful, but WITHOUT ANY
# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
# details.
#
# A copy of the GNU General Public License is available on the World Wide Web
# at <http://www.gnu.org/licenses/>. You can also obtain it by writing
# to the Free Software Foundation, 51 Franklin Street, Suite 500 Boston,
# MA 02110-1335 USA.
#
#******************************************************************************
import os
import ConfigParser
from PyQt4.QtCore import *
from PyQt4.QtGui import *
from ui.ui_aboutdialogbase import Ui_Dialog
import resources_rc
class AboutDialog(QDialog, Ui_Dialog):
def __init__(self):
QDialog.__init__(self)
self.setupUi(self)
self.btnHelp = self.buttonBox.button(QDialogButtonBox.Help)
cfg = ConfigParser.SafeConfigParser()
cfg.read(os.path.join(os.path.dirname(__file__), "metadata.txt"))
version = cfg.get("general", "version")
self.lblLogo.setPixmap(QPixmap(":/icons/molusce.png"))
self.lblVersion.setText(self.tr("Version: %s") % (version))
doc = QTextDocument()
doc.setHtml(self.getAboutText())
self.textBrowser.setDocument(doc)
self.buttonBox.helpRequested.connect(self.openHelp)
def reject(self):
QDialog.reject(self)
def openHelp(self):
overrideLocale = QSettings().value("locale/overrideFlag", False)
if not overrideLocale:
localeFullName = QLocale.system().name()
else:
localeFullName = QSettings().value("locale/userLocale", "")
localeShortName = localeFullName[ 0:2 ]
if localeShortName in [ "ru", "uk" ]:
QDesktopServices.openUrl(QUrl("http://hub.qgis.org/projects/molusce/wiki"))
else:
QDesktopServices.openUrl(QUrl("http://hub.qgis.org/projects/molusce/wiki"))
def getAboutText(self):
return self.tr("""<p>Modules for Land Use Change Simulations.</p>
<p>Plugin provides a set of algorithms for land use change simulations such as
ANN, LR, WoE, MCE. There is also validation using kappa statistics.</p>
<p>Developed by <a href="http://www.asiaairsurvey.com/">Asia Air Survey</a> and <a href="http://nextgis.org">NextGIS</a>.</p>
<p><strong>Homepage</strong>: <a href="http://hub.qgis.org/projects/molusce">http://hub.qgis.org/projects/molusce</a></p>
<p>Please report bugs at <a href="http://hub.qgis.org/projects/molusce/issues">bugtracker</a></p>
""")
| gpl-2.0 | 5,081,687,952,249,034,000 | 36.5625 | 125 | 0.663894 | false |
zimmerman-zimmerman/OIPA | OIPA/iati/migrations/0044_auto_20180917_1527.py | 2 | 1162 | # Generated by Django 2.0.6 on 2018-09-17 15:27
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('iati_codelists', '0010_aidtype_vocabulary'),
('iati', '0043_remove_transaction_aid_type'),
]
operations = [
migrations.CreateModel(
name='ActivityDefaultAidType',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
],
),
migrations.RemoveField(
model_name='activity',
name='default_aid_type',
),
migrations.AddField(
model_name='activitydefaultaidtype',
name='activity',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='default_aid_types', to='iati.Activity'),
),
migrations.AddField(
model_name='activitydefaultaidtype',
name='aid_type',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='iati_codelists.AidType'),
),
]
| agpl-3.0 | 7,323,482,272,445,350,000 | 32.2 | 135 | 0.600688 | false |
prculley/gramps | gramps/gui/widgets/labels.py | 2 | 8895 | #
# Gramps - a GTK+/GNOME based genealogy program
#
# Copyright (C) 2000-2006 Donald N. Allingham
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
__all__ = ["LinkLabel", "EditLabel", "BasicLabel", "GenderLabel",
"MarkupLabel", "DualMarkupLabel"]
#-------------------------------------------------------------------------
#
# Standard python modules
#
#-------------------------------------------------------------------------
import os
from html import escape
from gramps.gen.const import GRAMPS_LOCALE as glocale
_ = glocale.translation.gettext
import logging
_LOG = logging.getLogger(".widgets.labels")
#-------------------------------------------------------------------------
#
# GTK/Gnome modules
#
#-------------------------------------------------------------------------
from gi.repository import Gdk
from gi.repository import Gtk
from gi.repository import Pango
#-------------------------------------------------------------------------
#
# Gramps modules
#
#-------------------------------------------------------------------------
from gramps.gen.constfunc import has_display, win
from ..utils import get_link_color
#-------------------------------------------------------------------------
#
# Constants
#
#-------------------------------------------------------------------------
if has_display():
HAND_CURSOR = Gdk.Cursor.new_for_display(Gdk.Display.get_default(),
Gdk.CursorType.HAND2)
#-------------------------------------------------------------------------
#
# Module functions
#
#-------------------------------------------------------------------------
def realize_cb(widget):
widget.get_window().set_cursor(HAND_CURSOR)
#-------------------------------------------------------------------------
#
# LinkLabel class
#
#-------------------------------------------------------------------------
class LinkLabel(Gtk.EventBox):
def __init__(self, label, func, handle, emph=False, theme="CLASSIC"):
self.theme = theme
self.emph = emph
Gtk.EventBox.__init__(self)
st_cont = self.get_style_context()
self.color = get_link_color(st_cont)
if emph:
#emphasize a link
if theme == "CLASSIC":
format = 'underline="single" weight="heavy" style="italic"'
elif theme == "WEBPAGE":
format = 'foreground="' + self.color + '" weight="heavy"'
else:
raise AttributeError("invalid theme: '%s'" % theme)
elif emph is None:
#emphasize, but not a link
if theme == "CLASSIC":
format = 'weight="heavy"'
elif theme == "WEBPAGE":
format = 'weight="heavy"'
else:
raise AttributeError("invalid theme: '%s'" % theme)
else:
#no emphasize, a link
if theme == "CLASSIC":
format = 'underline="single"'
elif theme == "WEBPAGE":
format = 'foreground="' + self.color + '"'
else:
raise AttributeError("invalid theme: '%s'" % theme)
self.orig_text = escape(label[0])
self.gender = label[1]
self.decoration = format
text = '<span %s>%s</span>' % (self.decoration, self.orig_text)
if func:
msg = _('Click to make this person active\n'
'Right click to display the edit menu\n'
'Click Edit icon (enable in configuration dialog) to edit')
self.set_tooltip_text(msg)
self.label = Gtk.Label(label=text)
self.label.set_use_markup(True)
self.label.set_halign(Gtk.Align.START)
hbox = Gtk.Box()
hbox.pack_start(self.label, False, False, 0)
if label[1]:
hbox.pack_start(GenderLabel(label[1]), False, False, 0)
hbox.set_spacing(4)
self.add(hbox)
if func:
self.connect('button-press-event', func, handle)
self.connect('enter-notify-event', self.enter_text, handle)
self.connect('leave-notify-event', self.leave_text, handle)
self.connect('realize', realize_cb)
def set_padding(self, x, y):
self.label.set_padding(x, y)
def enter_text(self, obj, event, handle):
if self.emph:
#emphasize a link
if self.theme == "CLASSIC":
format = 'foreground="' + self.color + '" underline="single" '\
'weight="heavy" style="italic"'
elif self.theme == "WEBPAGE":
format = 'underline="single" foreground="' + self.color + '" '\
'weight="heavy"'
else:
raise AttributeError("invalid theme: '%s'" % self.theme)
elif self.emph is None:
# no link, no change on enter_text
if self.theme == "CLASSIC":
format = 'weight="heavy"'
elif self.theme == "WEBPAGE":
format = 'weight="heavy"'
else:
raise AttributeError("invalid theme: '%s'" % self.theme)
else:
#no emphasize, a link
if self.theme == "CLASSIC":
format = 'foreground="' + self.color + '" underline="single"'
elif self.theme == "WEBPAGE":
format = 'underline="single" foreground="' + self.color + '"'
else:
raise AttributeError("invalid theme: '%s'" % self.theme)
text = '<span %s>%s</span>' % (format, self.orig_text)
self.label.set_text(text)
self.label.set_use_markup(True)
def leave_text(self, obj, event, handle):
text = '<span %s>%s</span>' % (self.decoration, self.orig_text)
self.label.set_text(text)
self.label.set_use_markup(True)
#-------------------------------------------------------------------------
#
# EditLabel class
#
#-------------------------------------------------------------------------
class EditLabel(Gtk.Box):
def __init__(self, text):
Gtk.Box.__init__(self)
label = BasicLabel(text)
self.pack_start(label, False, True, 0)
self.pack_start(Gtk.Image.new_from_icon_name('gtk-edit',
Gtk.IconSize.MENU), False)
self.set_spacing(4)
self.show_all()
#-------------------------------------------------------------------------
#
# BasicLabel class
#
#-------------------------------------------------------------------------
class BasicLabel(Gtk.Label):
def __init__(self, text, ellipsize=Pango.EllipsizeMode.NONE):
Gtk.Label.__init__(self, label=text)
self.set_halign(Gtk.Align.START)
self.set_ellipsize(ellipsize)
self.show()
#-------------------------------------------------------------------------
#
# GenderLabel class
#
#-------------------------------------------------------------------------
class GenderLabel(Gtk.Label):
def __init__(self, text):
Gtk.Label.__init__(self, label=text)
self.set_halign(Gtk.Align.START)
if win():
pangoFont = Pango.FontDescription('Arial')
self.override_font(pangoFont)
self.show()
#-------------------------------------------------------------------------
#
# MarkupLabel class
#
#-------------------------------------------------------------------------
class MarkupLabel(Gtk.Label):
def __init__(self, text, halign=Gtk.Align.START):
Gtk.Label.__init__(self, label=text)
self.set_halign(halign)
self.set_use_markup(True)
self.show_all()
#-------------------------------------------------------------------------
#
# DualMarkupLabel class
#
#-------------------------------------------------------------------------
class DualMarkupLabel(Gtk.Box):
def __init__(self, text, alt, halign=Gtk.Align.START):
Gtk.Box.__init__(self)
label = Gtk.Label(label=text)
label.set_halign(halign)
label.set_use_markup(True)
self.pack_start(label, False, False, 0)
b = GenderLabel(alt)
b.set_use_markup(True)
self.pack_start(b, False, False, 4)
self.show()
| gpl-2.0 | -3,742,808,405,553,585,000 | 34.158103 | 79 | 0.469365 | false |
eduNEXT/edunext-platform | lms/djangoapps/teams/tests/test_serializers.py | 3 | 11183 | # -*- coding: utf-8 -*-
"""
Tests for custom Teams Serializers.
"""
import six
from django.core.paginator import Paginator
from django.test.client import RequestFactory
from lms.djangoapps.teams.serializers import BulkTeamCountTopicSerializer, MembershipSerializer, TopicSerializer
from lms.djangoapps.teams.tests.factories import CourseTeamFactory, CourseTeamMembershipFactory
from openedx.core.lib.teams_config import TeamsConfig
from student.tests.factories import CourseEnrollmentFactory, UserFactory
from xmodule.modulestore.tests.django_utils import SharedModuleStoreTestCase
from xmodule.modulestore.tests.factories import CourseFactory
class SerializerTestCase(SharedModuleStoreTestCase):
"""
Base test class to set up a course with topics
"""
def setUp(self):
"""
Set up a course with a teams configuration.
"""
super(SerializerTestCase, self).setUp()
self.course = CourseFactory.create(
teams_configuration=TeamsConfig({
"max_team_size": 10,
"topics": [{u'name': u'Tøpic', u'description': u'The bést topic!', u'id': u'0'}]
}),
)
class MembershipSerializerTestCase(SerializerTestCase):
"""
Tests for the membership serializer.
"""
def setUp(self):
super(MembershipSerializerTestCase, self).setUp()
self.team = CourseTeamFactory.create(
course_id=self.course.id,
topic_id=self.course.teamsets[0].teamset_id,
)
self.user = UserFactory.create()
CourseEnrollmentFactory.create(user=self.user, course_id=self.course.id)
self.team_membership = CourseTeamMembershipFactory.create(team=self.team, user=self.user)
def test_membership_serializer_expand_user_and_team(self):
"""Verify that the serializer only expands the user and team one level."""
data = MembershipSerializer(self.team_membership, context={
'expand': [u'team', u'user'],
'request': RequestFactory().get('/api/team/v0/team_membership')
}).data
username = self.user.username
self.assertEqual(data['user'], {
'url': 'http://testserver/api/user/v1/accounts/' + username,
'username': username,
'profile_image': {
'image_url_full': 'http://testserver/static/default_500.png',
'image_url_large': 'http://testserver/static/default_120.png',
'image_url_medium': 'http://testserver/static/default_50.png',
'image_url_small': 'http://testserver/static/default_30.png',
'has_image': False
},
'account_privacy': 'private'
})
self.assertNotIn('membership', data['team'])
class TopicSerializerTestCase(SerializerTestCase):
"""
Tests for the `TopicSerializer`, which should serialize team count data for
a single topic.
"""
def test_topic_with_no_team_count(self):
"""
Verifies that the `TopicSerializer` correctly displays a topic with a
team count of 0, and that it only takes one SQL query.
"""
with self.assertNumQueries(1):
serializer = TopicSerializer(
self.course.teamsets[0].cleaned_data,
context={'course_id': self.course.id},
)
self.assertEqual(
serializer.data,
{
u'name': u'Tøpic',
u'description': u'The bést topic!',
u'id': u'0',
u'team_count': 0,
u'type': u'open',
u'max_team_size': None
}
)
def test_topic_with_team_count(self):
"""
Verifies that the `TopicSerializer` correctly displays a topic with a
positive team count, and that it only takes one SQL query.
"""
CourseTeamFactory.create(
course_id=self.course.id, topic_id=self.course.teamsets[0].teamset_id
)
with self.assertNumQueries(1):
serializer = TopicSerializer(
self.course.teamsets[0].cleaned_data,
context={'course_id': self.course.id},
)
self.assertEqual(
serializer.data,
{
u'name': u'Tøpic',
u'description': u'The bést topic!',
u'id': u'0',
u'team_count': 1,
u'type': u'open',
u'max_team_size': None
}
)
def test_scoped_within_course(self):
"""Verify that team count is scoped within a course."""
duplicate_topic = self.course.teamsets[0].cleaned_data
second_course = CourseFactory.create(
teams_configuration=TeamsConfig({
"max_team_size": 10,
"topics": [duplicate_topic]
}),
)
CourseTeamFactory.create(course_id=self.course.id, topic_id=duplicate_topic[u'id'])
CourseTeamFactory.create(course_id=second_course.id, topic_id=duplicate_topic[u'id'])
with self.assertNumQueries(1):
serializer = TopicSerializer(
self.course.teamsets[0].cleaned_data,
context={'course_id': self.course.id},
)
self.assertEqual(
serializer.data,
{
u'name': u'Tøpic',
u'description': u'The bést topic!',
u'id': u'0',
u'team_count': 1,
u'type': u'open',
u'max_team_size': None
}
)
class BaseTopicSerializerTestCase(SerializerTestCase):
"""
Base class for testing the two paginated topic serializers.
"""
__test__ = False
PAGE_SIZE = 5
# Extending test classes should specify their serializer class.
serializer = None
def _merge_dicts(self, first, second):
"""Convenience method to merge two dicts in a single expression"""
result = first.copy()
result.update(second)
return result
def setup_topics(self, num_topics=5, teams_per_topic=0):
"""
Helper method to set up topics on the course. Returns a list of
created topics.
"""
topics = [
{
'name': 'Tøpic {}'.format(i),
'description': 'The bést topic! {}'.format(i),
'id': six.text_type(i),
'type': 'open',
'max_team_size': i + 10
}
for i in six.moves.range(num_topics)
]
for topic in topics:
for _ in six.moves.range(teams_per_topic):
CourseTeamFactory.create(course_id=self.course.id, topic_id=topic['id'])
self.course.teams_configuration = TeamsConfig({
'max_team_size': self.course.teams_configuration.default_max_team_size,
'topics': topics,
})
return topics
def assert_serializer_output(self, topics, num_teams_per_topic, num_queries):
"""
Verify that the serializer produced the expected topics.
"""
with self.assertNumQueries(num_queries):
page = Paginator(
self.course.teams_configuration.cleaned_data['teamsets'],
self.PAGE_SIZE,
).page(1)
# pylint: disable=not-callable
serializer = self.serializer(instance=page, context={'course_id': self.course.id})
self.assertEqual(
serializer.data['results'],
[self._merge_dicts(topic, {u'team_count': num_teams_per_topic}) for topic in topics]
)
def test_no_topics(self):
"""
Verify that we return no results and make no SQL queries for a page
with no topics.
"""
self.course.teams_configuration = TeamsConfig({'topics': []})
self.assert_serializer_output([], num_teams_per_topic=0, num_queries=0)
class BulkTeamCountTopicSerializerTestCase(BaseTopicSerializerTestCase):
"""
Tests for the `BulkTeamCountTopicSerializer`, which should serialize team_count
data for many topics with constant time SQL queries.
"""
__test__ = True
serializer = BulkTeamCountTopicSerializer
NUM_TOPICS = 6
def test_topics_with_no_team_counts(self):
"""
Verify that we serialize topics with no team count, making only one SQL
query.
"""
topics = self.setup_topics(teams_per_topic=0)
self.assert_serializer_output(topics, num_teams_per_topic=0, num_queries=1)
def test_topics_with_team_counts(self):
"""
Verify that we serialize topics with a positive team count, making only
one SQL query.
"""
teams_per_topic = 10
topics = self.setup_topics(teams_per_topic=teams_per_topic)
self.assert_serializer_output(topics, num_teams_per_topic=teams_per_topic, num_queries=1)
def test_subset_of_topics(self):
"""
Verify that we serialize a subset of the course's topics, making only
one SQL query.
"""
teams_per_topic = 10
topics = self.setup_topics(num_topics=self.NUM_TOPICS, teams_per_topic=teams_per_topic)
self.assert_serializer_output(topics, num_teams_per_topic=teams_per_topic, num_queries=1)
def test_scoped_within_course(self):
"""Verify that team counts are scoped within a course."""
teams_per_topic = 10
first_course_topics = self.setup_topics(num_topics=self.NUM_TOPICS, teams_per_topic=teams_per_topic)
duplicate_topic = first_course_topics[0]
second_course = CourseFactory.create(
teams_configuration=TeamsConfig({
"max_team_size": 10,
"topics": [duplicate_topic]
}),
)
CourseTeamFactory.create(course_id=second_course.id, topic_id=duplicate_topic[u'id'])
self.assert_serializer_output(first_course_topics, num_teams_per_topic=teams_per_topic, num_queries=1)
def _merge_dicts(self, first, second):
"""Convenience method to merge two dicts in a single expression"""
result = first.copy()
result.update(second)
return result
def assert_serializer_output(self, topics, num_teams_per_topic, num_queries):
"""
Verify that the serializer produced the expected topics.
"""
with self.assertNumQueries(num_queries):
serializer = self.serializer(topics, context={'course_id': self.course.id}, many=True)
self.assertEqual(
serializer.data,
[self._merge_dicts(topic, {u'team_count': num_teams_per_topic}) for topic in topics]
)
def test_no_topics(self):
"""
Verify that we return no results and make no SQL queries for a page
with no topics.
"""
self.course.teams_configuration = TeamsConfig({'topics': []})
self.assert_serializer_output([], num_teams_per_topic=0, num_queries=0)
| agpl-3.0 | 9,129,519,232,389,384,000 | 37.133106 | 112 | 0.585877 | false |
JensWehner/votca-scripts | lib/Carlstuff/evaporation2/__evp_system__.py | 2 | 25082 | from __future__ import division
from __pyosshell__ import *
from __proptions__ import *
from __mdpassist__ import *
from __molecules__ import *
class System(object):
def __init__(self, grofile, topfile, ndxfile, ctrlfile, histfile, cmdlineopt, verbose=True):
# ==========================================================
# Set Population -> Set Options -> Set History -> Evaporator
# ==========================================================
self.grofile = grofile
self.topfile = topfile
self.ndxfile = ndxfile
self.ctrlfile = ctrlfile
self.histfile = histfile
# System components
self.pop = Population(grofile,topfile,verbose)
self.opt = dict_from_bracket_file(ctrlfile)
self.cmdlineopt = cmdlineopt
self.hst = file_to_table(histfile)
self.tag = self.opt['SYSTEM']['tag'][0]
self.evp = None
# Time-keeping
self.set_time(0.0,0.0)
# System dimensions
self.a = None
self.b = None
self.n = None
self.a_min = None
self.a_max = None
self.b_min = None
self.b_max = None
self.n_min = None
self.n_max = None
# Injection point
self.in_pt = None
self.in_dir = None
self.xy_inj_pts = []
# Density profile
self.hst_n_d = None
# System groups
self.fze_idcs = []
self.sub_idcs = []
self.thf_idcs = []
self.iph_idcs = []
def set_time(self,t_in,t_run):
self.pop.t = t_in
self.opt['MDP']['T'] = [t_run]
return
def set_dimensions(self):
# ==========================================================
# () set_dimensions -> get_injection_point -> evaporate_mol
# ==========================================================
self.a = normVector(np.array(self.opt['SUBSTRATE']['a']))
self.b = normVector(np.array(self.opt['SUBSTRATE']['b']))
self.n = normVector(np.array(self.opt['SUBSTRATE']['n']))
a_dist = []
b_dist = []
n_dist = []
for mol in self.pop.mols:
for atom in mol.atoms:
a_dist.append( np.dot(atom.pos,self.a) )
b_dist.append( np.dot(atom.pos,self.b) )
n_dist.append( np.dot(atom.pos,self.n) )
self.a_min = min(a_dist)
self.a_max = max(a_dist)
self.b_min = min(b_dist)
self.b_max = max(b_dist)
self.n_min = min(n_dist)
self.n_max = max(n_dist)
return
def get_height_profile(self, outfile = 'system_height_profile.dat'):
x_res = self.opt['SYSTEM']['res_x'][0]
y_res = self.opt['SYSTEM']['res_y'][0]
# Convert triclinic to cartesian frame for analysis
self.a = normVector(np.array(self.opt['SUBSTRATE']['a']))
self.b = normVector(np.array(self.opt['SUBSTRATE']['b']))
self.n = normVector(np.array(self.opt['SUBSTRATE']['n']))
skewed_to_cart = np.array( [ [self.a[0], self.b[0], self.n[0]],
[self.a[1], self.b[1], self.n[1]],
[self.a[2], self.b[2], self.n[2]] ] )
cart_to_skewed = np.linalg.inv(skewed_to_cart)
pos_in_skewed = []
height_in_skewed = []
for mol in self.pop.mols:
for atom in mol.atoms:
skewed = np.dot(cart_to_skewed,atom.pos)
pos_in_skewed.append( np.array([skewed[0],skewed[1]]) )
height_in_skewed.append( skewed[2] )
#atom.pos = np.dot(cart_to_skewed,atom.pos) # Messing up
base_h = min(height_in_skewed)
# XY Height profile
i_j_xy, i_j_n, i_j_h, ij_xy, ij_n, RES_X, RES_Y = \
list2hist_2d_height(pos_in_skewed, height_in_skewed, x_res, y_res, PROC_H = calc_avg, RETURN_2D = '2d1d')
x_s = []
y_s = []
h_s = []
for i in range(len(i_j_xy)):
for j in range(len(i_j_xy[i])):
x_s.append(i_j_xy[i][j][0])
y_s.append(i_j_xy[i][j][1])
h_s.append(i_j_h[i][j]-base_h)
xm = min(x_s); xM = max(x_s)
ym = min(y_s); yM = max(y_s)
hm = min(h_s); hM = max(h_s)
outt = open(outfile,'w')
outt.write('# X %2.3f %2.3f Y %2.3f %2.3f H %2.3f %2.3f\n' % (xm,xM,ym,yM,hm,hM))
for i in range(len(i_j_xy)):
for j in range(len(i_j_xy[i])):
outt.write('%4.7f %4.7f %4.7f %4d\n' % (i_j_xy[i][j][0],i_j_xy[i][j][1], i_j_h[i][j]-base_h, i_j_n[i][j]))
outt.write('\n')
outt.close()
return
def xy_density(self):
# Figure out substrate molecules
self.auto_group()
x_res = self.opt['SYSTEM']['res_x'][0]
y_res = self.opt['SYSTEM']['res_y'][0]
# Convert triclinic to cartesian frame for analysis
self.a = normVector(np.array(self.opt['SUBSTRATE']['a']))
self.b = normVector(np.array(self.opt['SUBSTRATE']['b']))
self.n = normVector(np.array(self.opt['SUBSTRATE']['n']))
skewed_to_cart = np.array( [ [self.a[0], self.b[0], self.n[0]],
[self.a[1], self.b[1], self.n[1]],
[self.a[2], self.b[2], self.n[2]] ] )
cart_to_skewed = np.linalg.inv(skewed_to_cart)
pos_in_skewed = []
for mol in self.pop.mols:
if mol.Id in self.mol_sub_idcs: continue
for atom in mol.atoms:
skewed = np.dot(cart_to_skewed,atom.pos)
pos_in_skewed.append( np.array([skewed[0],skewed[1]]) )
#atom.pos = np.dot(cart_to_skewed,atom.pos) # Messing up
# XY Height profile
xy_2d, z_2d, xy, z, x_res, y_res = list2hist_2d(pos_in_skewed, x_res, y_res, RETURN_2D = '2d1d')
print "=== XY Height profile === dx %1.3f dy %1.3f" % (x_res, y_res)
if len(z) < 101:
for x in range(len(z_2d)):
for y in range(len(z_2d[x])):
print "%4d " % (z_2d[x][y]),
print ""
else:
print "-> see system_height_profile.dat"
outt = open('system_height_profile.dat','w')
outt.write('# MIN MAX %4.7f %4.7f\n' % (min(z),max(z)))
for ix in range(len(xy_2d)):
for iy in range(len(xy_2d[ix])):
outt.write('%+4.7f %+4.7f %+4.7f\n' % (xy_2d[ix][iy][0],xy_2d[ix][iy][1],z_2d[ix][iy]))
outt.write('\n')
outt.close()
# XY insertion probability
h_min = min(z)
h_max = max(z)
if h_min == h_max:
print "h_min == h_max => Homogeneous insertion."
z = [ 1 for h in z ]
else:
print "Linear insertion weighting in place."
z = [ 1 - (h - h_min) / (h_max - h_min) for h in z ]
Z = sum(z)
p = [ h / sum(z) for h in z ]
# Cumulative probability
cum_p = []
for i in range(len(p)):
cum_p_i = 0.0
for j in range(i,len(p)):
cum_p_i += p[j]
cum_p.append(cum_p_i)
cum_p.append(0)
# TRJCONV -f in -o out -pbc atom -ur tric before anything happens
# !!! NEED TO CORRECT FOR REAL RESOLUTION IN X AND Y !!!
# (list2hist_2d adapts this to fit an integer number of bins into [min,max])
self.xy_inj_pts = []
print "Performing binary search to generate injection points ..."
for i in range(100):
rnd = np.random.uniform()
idx = binary_search_idx(rnd,cum_p)
ab = xy[idx]
a = (ab[0] - 0.5*x_res) + np.random.uniform() * (x_res)
b = (ab[1] - 0.5*y_res) + np.random.uniform() * (y_res)
ab = np.array([a,b,0])
ab_cart = np.dot(skewed_to_cart,ab)
self.xy_inj_pts.append(ab_cart)
#outt = open('inj_pts.xyz','w')
#outt.write('10000\n\n')
#for pt in inj_ab_comp_s:
# outt.write('P %4.7f %4.7f 0.000\n' % (pt[0],pt[1]))
#outt.close()
return
def get_injection_point(self):
# Measure extension of system along n axis (i.e. substrate normal)
self.set_dimensions()
# If not done yet, calc inj pts from xy density as to avoid piling
if self.xy_inj_pts == []:
self.xy_density()
safety_radius = self.opt['SYSTEM']['rad'][0]
injection_height = self.opt['SUBSTRATE']['h'][0]
# n-vector coefficient describing height above lower substrate plane
nc = self.n_min + injection_height
print "Shifting",
while nc < self.n_max + 2*safety_radius:
nc += 2*safety_radius # [nm]
print "...",
print " - Done."
# Exceeds maximum allowed height?
try:
obey_h_max = self.opt['SUBSTRATE']['obey_h_max'][0]
except KeyError:
obey_h_max = False
if nc - self.n_min > self.opt['SUBSTRATE']['h_max'][0]:
if obey_h_max:
return False
else:
print "NOTE Max. injection height exceeded - ignore ",
print "(empty space, if present, may harm parallelization)."
ipt_ab = self.xy_inj_pts.pop(0) # in substrate plane
ipt_n = nc * self.n # along substrate normal
self.in_pt = ipt_ab + ipt_n
self.in_dir = - self.n
return True
def get_injection_point_simple(self):
self.set_dimensions()
safety_radius = self.opt['SYSTEM']['rad'][0]
injection_height = self.opt['SUBSTRATE']['h'][0]
ac = np.random.uniform(self.a_min,self.a_max)
bc = np.random.uniform(self.b_min,self.b_max)
nc = self.n_min + injection_height
while nc < self.n_max + 2*safety_radius:
print "Shifting..."
nc += 2*safety_radius # [nm]
# Exceeds maximum allowed height?
if nc - self.n_min > self.opt['SUBSTRATE']['h_max'][0]:
return False
ipt_ab = ac * self.a + bc * self.b
ipt_n = nc * self.n
self.in_pt = ipt_ab + ipt_n
self.in_dir = - self.n
return True
def evaporate_mol(self, evap_mol):
del self.evp
self.evp = Evaporator(self.opt['EVAPORANT_%1s' % evap_mol])
ret_pt = self.get_injection_point()
if not ret_pt:
print "Cancelled injection: reached maximum allowed h."
return False
try:
const_vel = self.opt['EVAPORANT_%s' % evap_mol]['const_vel'][0]
enforce_const_vel = True
except KeyError:
const_vel = None
enforce_const_vel = False
new_mol = self.evp.create_mol(self.in_pt, self.in_dir, enforce_const_vel, const_vel)
self.pop.append_mol(new_mol)
return True
def group_system(self):
# ==========================================================
# Update dimensions -> density profile -> group system
# ==========================================================
auto = False
try:
auto_group = self.opt['SYSTEM']['auto_group'][0]
if auto_group == 'yes':
auto = True
except KeyError:
pass
if auto:
self.auto_group()
else:
self.set_dimensions()
self.get_density_profile()
self.evaluate_density_profile()
return
def get_density_profile(self, write_to_file=True):
n_dp = self.n
z_dp = []
# Collect projections
for mol in self.pop.mols:
for atom in mol.atoms:
z_dp.append( np.dot(n_dp, atom.pos) )
# Create histogram
min_z = self.n_min
max_z = self.n_max
res_z = self.opt['SYSTEM']['res'][0]
bin_z = int((max_z-min_z)/res_z + 0.5) + 1
hst_z = [ 0 for i in range(bin_z) ]
for z in z_dp:
bin = int((z-min_z)/res_z + 0.5)
hst_z[bin] += 1
max_d = max(hst_z)
hst_z = [ d / max_d for d in hst_z ]
# Store results
self.hst_n_d = [ [min_z+bin*res_z,hst_z[bin]] for bin in range(len(hst_z)) ]
if write_to_file:
outt = open('%1s_density_profile.dat' % (self.grofile[:-4]), 'w')
outt.write("# == DENSITY PROFILE ==\n")
for n_d in self.hst_n_d:
outt.write("%-+02.3f nm %1.7f\n" % (n_d[0], n_d[1]))
outt.close()
return
def evaluate_density_profile(self):
if len(self.hst_n_d) == 0:
self.get_density_profile()
smooth_n_d = []
for bin in range(1,len(self.hst_n_d)-1):
smooth_n = self.hst_n_d[bin][0]
smooth_d = 1/3. * (self.hst_n_d[bin-1][1] + self.hst_n_d[bin][1] + self.hst_n_d[bin+1][1])
smooth_n_d.append([smooth_n,smooth_d])
sub_idcs = []
thf_idcs = []
iph_idcs = []
thf_start_d = self.opt['THINFILM']['density_start'][0]
iph_start_d = self.opt['INTERPHASE']['density_start'][0]
thf_start_n = None
iph_start_n = None
iph_set = False
thf_set = False
smooth_n_d.reverse()
prev_n = smooth_n_d[0][0]
prev_d = smooth_n_d[0][1]
for s in smooth_n_d:
n = s[0]
d = s[1]
if not iph_set and d > iph_start_d:
iph_set = True
iph_start_n = prev_n
if not thf_set and d > thf_start_d:
thf_set = True
thf_start_n = prev_n
else:
pass
prev_n = n
prev_d = d
print "thf everything farther along normal than", thf_start_n
print "iph ... ... ... ...", iph_start_n
self.fze_idcs = []
self.sub_idcs = []
self.thf_idcs = []
self.iph_idcs = []
sub_first = int(self.opt['SUBSTRATE']['first'][0]+0.5)
sub_last = int(self.opt['SUBSTRATE']['last'][0]+0.5)
fze_first = int(self.opt['FREEZE']['first'][0]+0.5)
fze_last = int(self.opt['FREEZE']['last'][0]+0.5)
outt = open('groups_next_iter.gro','w')
outt.write('GROUP ASSIGNMENT FZE SUB THF IPH\n')
outt.write('%7d\n' % self.pop.atom_count())
for mol in self.pop.mols:
proj = np.dot(self.n,mol.com())
for atom in mol.atoms:
# Substrate atom?
if atom.Id >= sub_first and atom.Id <= sub_last:
self.sub_idcs.append(atom.Id)
atom.write_gro_ln(outt, fragName = 'SUB')
continue
# Frozen atom?
if atom.Id >= fze_first and atom.Id <= fze_last:
self.fze_idcs.append(atom.Id)
atom.write_gro_ln(outt, fragName = 'FZE')
continue
if proj >= iph_start_n:
# Interphase ...
self.iph_idcs.append(atom.Id)
atom.write_gro_ln(outt, fragName = 'IPH')
else:
# Thin film ...
self.thf_idcs.append(atom.Id)
atom.write_gro_ln(outt, fragName = 'THF')
outt.write('%1s' % self.pop.box_str)
outt.close()
print "[ freeze ] :", len(self.fze_idcs)
print "[ substrate ] :", len(self.sub_idcs)
print "[ thinfilm ] :", len(self.thf_idcs)
print "[ interphase ] :", len(self.iph_idcs)
def auto_box(self):
try:
auto_scale = int(self.opt['SYSTEM']['auto_box'][0])
except KeyError:
auto_scale = -1
if auto_scale < 1: return
a_dist = []
b_dist = []
n_dist = []
for mol in self.pop.mols:
for atom in mol.atoms:
a_dist.append( np.dot(atom.pos,self.a) )
b_dist.append( np.dot(atom.pos,self.b) )
n_dist.append( np.dot(atom.pos,self.n) )
self.a_min = min(a_dist)
self.a_max = max(a_dist)
self.b_min = min(b_dist)
self.b_max = max(b_dist)
self.n_min = min(n_dist)
self.n_max = max(n_dist)
assert auto_scale in [1,2,3]
print "Auto-scale box ..."
print "Ctrl: Evap. normal coincides with axis %d (1<>a, 2<>b, 3<>c)" % auto_scale
cutoff_corr = 2*float(self.opt['MDP']['_CUTOFF'][0])
print "Apply cut-off correction: %+2.3f" % cutoff_corr
if auto_scale == 3:
prev_length = magnitude(self.pop.c)
new_length = self.n_max - self.n_min + cutoff_corr
self.pop.c = self.pop.c / prev_length * new_length
print "Scaled box vector from %2.3fnm to %2.3fnm" % (prev_length, new_length)
else:
assert False # Not implemented
# Shift system
shift_vec = - self.n / magnitude(self.n) * (self.n_min - 0.5*cutoff_corr)
print "Shift system by", shift_vec
for mol in self.pop.mols:
mol.shift(shift_vec)
return
def estimate_bulk_z(self):
# TODO Use this function at the beginning of ::auto_group()
self.set_dimensions()
self.get_density_profile(write_to_file=False)
hst_n_d = self.hst_n_d
hst_n_d.reverse()
z_bulk_min = hst_n_d[-1][0]
z_bulk_max = hst_n_d[-1][0]
for n_d in self.hst_n_d:
if n_d[1] < 0.5:
continue
else:
z_bulk_max = n_d[0]
break
hst_n_d.reverse()
return z_bulk_min, z_bulk_max
def auto_group(self):
print "Auto-group: Use freeze group = %s" % (not self.cmdlineopt.nofreeze)
self.set_dimensions()
self.get_density_profile()
hst_n_d = self.hst_n_d
hst_n_d.reverse()
z_bulk_min = hst_n_d[-1][0]
z_bulk_max = hst_n_d[-1][0]
for n_d in self.hst_n_d:
if n_d[1] < 0.5:
continue
else:
z_bulk_max = n_d[0]
break
hst_n_d.reverse()
print "Bulk extends over %1.2fnm." % (z_bulk_max - z_bulk_min)
# MIN z_bulk_fze z_bulk_sub z_bulk_thf MAX
# FREEZE ---------|SUBSTRATE ----| THINFILM -----|INTERPHASE ----|
z_bulk_sub = z_bulk_max - 2.5 * self.opt['SYSTEM']['rad'][0]
z_bulk_fze = z_bulk_sub - 2.5 * self.opt['SYSTEM']['rad'][0]
print "MIN z_bulk_fze| z_bulk_sub| z_bulk_thf| MAX|"
print "FREEZE ---------|SUBSTRATE ----| THINFILM -----|INTERPHASE ----|"
print " %1.3f %1.3f | %1.3f | %1.3f | %1.3f |" % (z_bulk_min,z_bulk_fze,z_bulk_sub,z_bulk_max,self.hst_n_d[-1][0])
outt = open('auto_group.gro','w')
outt.write('GROUP ASSIGNMENT FZE SUB THF IPH\n')
outt.write('%7d\n' % self.pop.atom_count())
self.iph_idcs = []
self.thf_idcs = []
self.sub_idcs = []
self.fze_idcs = []
self.mol_sub_idcs = []
# List of molecules forced frozen
fze_idcs_forced = range(int(self.opt['FREEZE']['first'][0]),int(self.opt['FREEZE']['last'][0]+1))
if fze_idcs_forced != [0]:
print "Freezing all molecules with ID in (%d ... %d), as requested." % (fze_idcs_forced[0],fze_idcs_forced[-1])
for mol in self.pop.mols:
prj = np.dot(mol.com(), self.n)
grp = 'nogroup'
if prj > z_bulk_max:
# Interphase
grp = 'iph'
com_vel = mol.com_vel()
z_prj_vel = np.dot(com_vel,self.n)
if z_prj_vel > 0.0 and prj > z_bulk_max + 2:
for atom in mol.atoms:
atom.vel = atom.vel - 2*z_prj_vel * self.n
print "Boosted reflected molecule ID %1d (%1s)" % (mol.Id, mol.name)
print "... v", com_vel, " ->", mol.com_vel()
elif prj > z_bulk_sub:
# Thin film
grp = 'thf'
elif prj > z_bulk_fze:
# Substrate
grp = 'sub'
else:
# Freeze
if self.cmdlineopt.nofreeze == True:
grp = 'sub'
else:
grp = 'fze'
if mol.Id in fze_idcs_forced:
# Forced frozen
grp = 'fze'
print "Freezing mol %d %s" % (mol.Id, mol.name)
for atom in mol.atoms:
atom.write_gro_ln(outt, fragName = grp.upper())
if grp == 'fze':
self.fze_idcs.append(atom.Id)
elif grp == 'sub':
self.sub_idcs.append(atom.Id)
elif grp == 'thf':
self.thf_idcs.append(atom.Id)
elif grp == 'iph':
self.iph_idcs.append(atom.Id)
# Containers for moleculer ID's (used in ::xy_density)
if grp == 'sub':
self.mol_sub_idcs.append(mol.Id)
outt.write('%1s' % self.pop.box_str)
outt.close()
print "Auto-grouped system based on cell population:"
print "[ freeze ] :", len(self.fze_idcs)
print "[ substrate ] :", len(self.sub_idcs)
print "[ thinfilm ] :", len(self.thf_idcs)
print "[ interphase ] :", len(self.iph_idcs)
return
def assemble_here(self, path = None):
# ==========================================================
# Path -> gro/top/ndx -> ctrl/hist -> grompp.mdp/qmd.sh
# ==========================================================
# Determine path if not supplied
here = os.getcwd()
if path == None and '_' in here.split('/')[-1]:
orig = here.split('/')[-1]
stem = orig.split('_')[0]
Iter = int(orig.split('_')[1])
path = '../%1s_%1d/' % (stem, Iter+1)
elif path == None:
path = './ASSEMBLE/'
else:
if path[-1] == '/':
pass
else:
path = path + '/'
# Create directory, if necessary
try:
os.chdir(path)
os.chdir(here)
except OSError:
os.mkdir(path)
print "Assemble system in %1s" % path
# Write system.top, system.gro
self.pop.write_top(path)
self.pop.write_gro(path)
# Write system.ndx
outt = open(path+self.ndxfile,'w')
outt.write('[ freeze ]\n')
for i in range(len(self.fze_idcs)):
if i % 10 == 0:
outt.write('\n')
outt.write('%7d ' % self.fze_idcs[i])
outt.write('\n\n')
outt.write('[ substrate ]\n')
for i in range(len(self.sub_idcs)):
if i % 10 == 0:
outt.write('\n')
outt.write('%7d ' % self.sub_idcs[i])
outt.write('\n\n')
outt.write('[ thinfilm ]\n')
for i in range(len(self.thf_idcs)):
if i % 10 == 0:
outt.write('\n')
outt.write('%7d ' % self.thf_idcs[i])
outt.write('\n\n')
outt.write('[ interphase ]\n')
for i in range(len(self.iph_idcs)):
if i % 10 == 0:
outt.write('\n')
outt.write('%7d ' % self.iph_idcs[i])
outt.write('\n\n')
outt.close()
# Copy system.ctrl
os.system('cp ./%1s %1s' % (self.ctrlfile, path+self.ctrlfile))
# Write system.hist
os.system('cp ./%1s %1s' % (self.histfile, path+self.histfile))
# Write grompp.mdp
MD = MD_Operator()
# ==========================================================
# MDP first order
# ==========================================================
# Time step, span [ps]
dt = self.opt['MDP']['dt'][0]
T = self.opt['MDP']['T'][0]
dt_out = self.opt['MDP']['dt_out'][0]
# Input files
_t = self.topfile
_c = self.grofile
_n = self.ndxfile
# Convenience
tag = 't_%1d_%1s' % (self.pop.t, self.tag)
# Temperatures
Tfze = self.opt['FREEZE']['ref_t'][0]
Tsub = self.opt['SUBSTRATE']['ref_t'][0]
Tthf = self.opt['THINFILM']['ref_t'][0]
Tiph = self.opt['INTERPHASE']['ref_t'][0]
# Other
maxwarn = self.opt['MDP']['maxwarn'][0]
# Override ctrl-options from command line arguments
if self.cmdlineopt.tag != None:
tag = 'T%d_%s_%d' % (self.pop.t, self.cmdlineopt.tag, os.getpid())
print "Override tag, new tag = %s" % tag
if self.cmdlineopt.temperature != None:
Tsub = self.cmdlineopt.temperature
Tthf = self.cmdlineopt.temperature
print "Override coupling temperature (sub,thf) from ctrl-file, new T =", Tsub
if self.cmdlineopt.maxwarn != None:
maxwarn = self.cmdlineopt.maxwarn
print "Override max. accepted grompp warnings, maxwarn =", maxwarn
MD.Set('_DT', dt)
MD.Set('_NSTEPS', int(T/dt+0.5))
MD.Set('_LOGOUT', int(dt_out/dt+0.5))
MD.Set('_XTCOUT', int(dt_out/dt+0.5))
# ==========================================================
# MDP second order
# ==========================================================
for key in self.opt['MDP'].keys():
if not key[0:1] == '_':
continue
else:
MD.Set(key, self.opt['MDP'][key][0])
MD.Set('_TC_GRPS', 'freeze substrate thinfilm interphase')
MD.Set('_TAU_T', '%1.3f %1.3f %1.3f %1.3f ' % (self.opt['FREEZE']['tau_t'][0],
self.opt['SUBSTRATE']['tau_t'][0],
self.opt['THINFILM']['tau_t'][0],
self.opt['INTERPHASE']['tau_t'][0]))
MD.Set('_REF_T', '%1.3f %1.3f %1.3f %1.3f ' % (Tfze,
Tsub,
Tthf,
Tiph))
MD.Set('_COMM_GRPS', 'substrate thinfilm')
MD.Set('_ENERGYGRPS', 'freeze substrate thinfilm interphase')
if self.opt['FREEZE']['freeze_dim'][0].replace(' ','') == 'YYY':
MD.Set('_ENERGYGRP_EXCL', 'freeze freeze')
else:
MD.Set('_ENERGYGRP_EXCL', ' ')
MD.Set('_FREEZEGRPS', 'freeze')
MD.Set('_FREEZEDIM', self.opt['FREEZE']['freeze_dim'][0])
MD.Tag(tag)
# ==========================================================
# MDP third order
# ==========================================================
mdrun_cmd = MD.gen_mdrun_cmd(
_s = 'topol.tpr',
_o = 'traj.trr',
_x = 'traj.xtc',
_c = 'confout.gro',
_cpo = 'state.cpt',
_cpt = 18,
_maxh = 36,
_d = self.opt['MDP']['precision'][0])
grompp_cmd = MD.gen_grompp_cmd(
_c = _c,
_p = _t,
_f = 'grompp.mdp',
_n = _n,
_o = 'topol.tpr',
_maxnum = maxwarn)
MD.write_grompp_mdp(path+'grompp.mdp')
MD.write_qmd_sh(path+'qmd.sh',self.cmdlineopt.username)
outt = open(path+'mdp.sh','w')
outt.write('#! /bin/bash\n')
outt.write(grompp_cmd)
outt.write('\n')
outt.close()
outt = open(path+'run.sh','w')
outt.write('#! /bin/bash\n')
outt.write(mdrun_cmd)
outt.write('\n')
outt.close()
class Evaporator(object):
def __init__(self, opt_evaporant):
self.grofile = opt_evaporant['gro'][0]
self.topfile = opt_evaporant['top'][0]
self.pop = Population(self.grofile,self.topfile)
self.opt = opt_evaporant
self.ref_t = self.opt['ref_t'][0]
def create_mol(self, start_here, fly_along, enforce_const_vel = False, const_vel = None):
new_mol = Molecule(-1,'noname')
new_mol.import_from(self.pop.mols[0])
new_mol.shift(-new_mol.com()+start_here)
# Generate velocity in nm/ps
mol_mass = new_mol.mass()
mag_v = 1e-3 * ( 2 * 1.38e-23 * self.ref_t / mol_mass / 1.67e-27 )**0.5
if enforce_const_vel:
print "Enforcing constant CoM velocity for molecule %s:" % new_mol.name
print "v(CoM)=%1.3f nm/ps <=> T=%1.3fK" % (const_vel, (const_vel/mag_v*self.ref_t))
mag_v = const_vel
com_v = mag_v * fly_along
new_mol.boost(com_v)
com = new_mol.com()
x = com[0]; y = com[1]; z = com[2]
vx = com_v[0]; vy = com_v[1]; vz = com_v[2]
print "Created molecule %1s: r = %1.3f %1.3f %1.3f, v = %1.4f %1.4f %1.4f" % (new_mol.name, x,y,z,vx,vy,vz)
return new_mol
| apache-2.0 | -5,107,440,915,815,813,000 | 27.213723 | 137 | 0.543019 | false |
gangadharkadam/contributionerp | erpnext/selling/doctype/installation_note/installation_note.py | 18 | 4066 | # Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors
# License: GNU General Public License v3. See license.txt
from __future__ import unicode_literals
import frappe
from frappe.utils import cstr, getdate
from frappe import _
from erpnext.stock.utils import get_valid_serial_nos
from erpnext.utilities.transaction_base import TransactionBase
from erpnext.accounts.utils import validate_fiscal_year
class InstallationNote(TransactionBase):
def __init__(self, arg1, arg2=None):
super(InstallationNote, self).__init__(arg1, arg2)
self.status_updater = [{
'source_dt': 'Installation Note Item',
'target_dt': 'Delivery Note Item',
'target_field': 'installed_qty',
'target_ref_field': 'qty',
'join_field': 'prevdoc_detail_docname',
'target_parent_dt': 'Delivery Note',
'target_parent_field': 'per_installed',
'source_field': 'qty',
'percent_join_field': 'prevdoc_docname',
'status_field': 'installation_status',
'keyword': 'Installed',
'overflow_type': 'installation'
}]
def validate(self):
validate_fiscal_year(self.inst_date, self.fiscal_year, _("Installation Date"), self)
self.validate_installation_date()
self.check_item_table()
from erpnext.controllers.selling_controller import check_active_sales_items
check_active_sales_items(self)
def is_serial_no_added(self, item_code, serial_no):
has_serial_no = frappe.db.get_value("Item", item_code, "has_serial_no")
if has_serial_no == 1 and not serial_no:
frappe.throw(_("Serial No is mandatory for Item {0}").format(item_code))
elif has_serial_no != 1 and cstr(serial_no).strip():
frappe.throw(_("Item {0} is not a serialized Item").format(item_code))
def is_serial_no_exist(self, item_code, serial_no):
for x in serial_no:
if not frappe.db.exists("Serial No", x):
frappe.throw(_("Serial No {0} does not exist").format(x))
def is_serial_no_installed(self,cur_s_no,item_code):
for x in cur_s_no:
status = frappe.db.sql("select status from `tabSerial No` where name = %s", x)
status = status and status[0][0] or ''
if status == 'Installed':
frappe.throw(_("Item {0} with Serial No {1} is already installed").format(item_code, x))
def get_prevdoc_serial_no(self, prevdoc_detail_docname):
serial_nos = frappe.db.get_value("Delivery Note Item",
prevdoc_detail_docname, "serial_no")
return get_valid_serial_nos(serial_nos)
def is_serial_no_match(self, cur_s_no, prevdoc_s_no, prevdoc_docname):
for sr in cur_s_no:
if sr not in prevdoc_s_no:
frappe.throw(_("Serial No {0} does not belong to Delivery Note {1}").format(sr, prevdoc_docname))
def validate_serial_no(self):
prevdoc_s_no, sr_list = [], []
for d in self.get('items'):
self.is_serial_no_added(d.item_code, d.serial_no)
if d.serial_no:
sr_list = get_valid_serial_nos(d.serial_no, d.qty, d.item_code)
self.is_serial_no_exist(d.item_code, sr_list)
prevdoc_s_no = self.get_prevdoc_serial_no(d.prevdoc_detail_docname)
if prevdoc_s_no:
self.is_serial_no_match(sr_list, prevdoc_s_no, d.prevdoc_docname)
self.is_serial_no_installed(sr_list, d.item_code)
def validate_installation_date(self):
for d in self.get('items'):
if d.prevdoc_docname:
d_date = frappe.db.get_value("Delivery Note", d.prevdoc_docname, "posting_date")
if d_date > getdate(self.inst_date):
frappe.throw(_("Installation date cannot be before delivery date for Item {0}").format(d.item_code))
def check_item_table(self):
if not(self.get('items')):
frappe.throw(_("Please pull items from Delivery Note"))
def on_update(self):
frappe.db.set(self, 'status', 'Draft')
def on_submit(self):
self.validate_serial_no()
self.update_prevdoc_status()
frappe.db.set(self, 'status', 'Submitted')
def on_cancel(self):
for d in self.get('items'):
if d.serial_no:
d.serial_no = d.serial_no.replace(",", "\n")
for sr_no in d.serial_no.split("\n"):
frappe.db.set_value("Serial No", sr_no, "status", "Delivered")
self.update_prevdoc_status()
frappe.db.set(self, 'status', 'Cancelled')
| agpl-3.0 | -7,984,588,396,301,521,000 | 35.303571 | 105 | 0.689129 | false |
almarklein/scikit-image | skimage/segmentation/tests/test_random_walker.py | 2 | 9054 | import numpy as np
from skimage.segmentation import random_walker
from skimage.transform import resize
def make_2d_syntheticdata(lx, ly=None):
if ly is None:
ly = lx
np.random.seed(1234)
data = np.zeros((lx, ly)) + 0.1 * np.random.randn(lx, ly)
small_l = int(lx // 5)
data[lx // 2 - small_l:lx // 2 + small_l,
ly // 2 - small_l:ly // 2 + small_l] = 1
data[lx // 2 - small_l + 1:lx // 2 + small_l - 1,
ly // 2 - small_l + 1:ly // 2 + small_l - 1] = (
0.1 * np.random.randn(2 * small_l - 2, 2 * small_l - 2))
data[lx // 2 - small_l, ly // 2 - small_l // 8:ly // 2 + small_l // 8] = 0
seeds = np.zeros_like(data)
seeds[lx // 5, ly // 5] = 1
seeds[lx // 2 + small_l // 4, ly // 2 - small_l // 4] = 2
return data, seeds
def make_3d_syntheticdata(lx, ly=None, lz=None):
if ly is None:
ly = lx
if lz is None:
lz = lx
np.random.seed(1234)
data = np.zeros((lx, ly, lz)) + 0.1 * np.random.randn(lx, ly, lz)
small_l = int(lx // 5)
data[lx // 2 - small_l:lx // 2 + small_l,
ly // 2 - small_l:ly // 2 + small_l,
lz // 2 - small_l:lz // 2 + small_l] = 1
data[lx // 2 - small_l + 1:lx // 2 + small_l - 1,
ly // 2 - small_l + 1:ly // 2 + small_l - 1,
lz // 2 - small_l + 1:lz // 2 + small_l - 1] = 0
# make a hole
hole_size = np.max([1, small_l // 8])
data[lx // 2 - small_l,
ly // 2 - hole_size:ly // 2 + hole_size,
lz // 2 - hole_size:lz // 2 + hole_size] = 0
seeds = np.zeros_like(data)
seeds[lx // 5, ly // 5, lz // 5] = 1
seeds[lx // 2 + small_l // 4,
ly // 2 - small_l // 4,
lz // 2 - small_l // 4] = 2
return data, seeds
def test_2d_bf():
lx = 70
ly = 100
data, labels = make_2d_syntheticdata(lx, ly)
labels_bf = random_walker(data, labels, beta=90, mode='bf')
assert (labels_bf[25:45, 40:60] == 2).all()
assert data.shape == labels.shape
full_prob_bf = random_walker(data, labels, beta=90, mode='bf',
return_full_prob=True)
assert (full_prob_bf[1, 25:45, 40:60] >=
full_prob_bf[0, 25:45, 40:60]).all()
assert data.shape == labels.shape
# Now test with more than two labels
labels[55, 80] = 3
full_prob_bf = random_walker(data, labels, beta=90, mode='bf',
return_full_prob=True)
assert (full_prob_bf[1, 25:45, 40:60] >=
full_prob_bf[0, 25:45, 40:60]).all()
assert len(full_prob_bf) == 3
assert data.shape == labels.shape
def test_2d_cg():
lx = 70
ly = 100
data, labels = make_2d_syntheticdata(lx, ly)
labels_cg = random_walker(data, labels, beta=90, mode='cg')
assert (labels_cg[25:45, 40:60] == 2).all()
assert data.shape == labels.shape
full_prob = random_walker(data, labels, beta=90, mode='cg',
return_full_prob=True)
assert (full_prob[1, 25:45, 40:60] >=
full_prob[0, 25:45, 40:60]).all()
assert data.shape == labels.shape
return data, labels_cg
def test_2d_cg_mg():
lx = 70
ly = 100
data, labels = make_2d_syntheticdata(lx, ly)
labels_cg_mg = random_walker(data, labels, beta=90, mode='cg_mg')
assert (labels_cg_mg[25:45, 40:60] == 2).all()
assert data.shape == labels.shape
full_prob = random_walker(data, labels, beta=90, mode='cg_mg',
return_full_prob=True)
assert (full_prob[1, 25:45, 40:60] >=
full_prob[0, 25:45, 40:60]).all()
assert data.shape == labels.shape
return data, labels_cg_mg
def test_types():
lx = 70
ly = 100
data, labels = make_2d_syntheticdata(lx, ly)
data = 255 * (data - data.min()) // (data.max() - data.min())
data = data.astype(np.uint8)
labels_cg_mg = random_walker(data, labels, beta=90, mode='cg_mg')
assert (labels_cg_mg[25:45, 40:60] == 2).all()
assert data.shape == labels.shape
return data, labels_cg_mg
def test_reorder_labels():
lx = 70
ly = 100
data, labels = make_2d_syntheticdata(lx, ly)
labels[labels == 2] = 4
labels_bf = random_walker(data, labels, beta=90, mode='bf')
assert (labels_bf[25:45, 40:60] == 2).all()
assert data.shape == labels.shape
return data, labels_bf
def test_2d_inactive():
lx = 70
ly = 100
data, labels = make_2d_syntheticdata(lx, ly)
labels[10:20, 10:20] = -1
labels[46:50, 33:38] = -2
labels = random_walker(data, labels, beta=90)
assert (labels.reshape((lx, ly))[25:45, 40:60] == 2).all()
assert data.shape == labels.shape
return data, labels
def test_3d():
n = 30
lx, ly, lz = n, n, n
data, labels = make_3d_syntheticdata(lx, ly, lz)
labels = random_walker(data, labels, mode='cg')
assert (labels.reshape(data.shape)[13:17, 13:17, 13:17] == 2).all()
assert data.shape == labels.shape
return data, labels
def test_3d_inactive():
n = 30
lx, ly, lz = n, n, n
data, labels = make_3d_syntheticdata(lx, ly, lz)
old_labels = np.copy(labels)
labels[5:25, 26:29, 26:29] = -1
after_labels = np.copy(labels)
labels = random_walker(data, labels, mode='cg')
assert (labels.reshape(data.shape)[13:17, 13:17, 13:17] == 2).all()
assert data.shape == labels.shape
return data, labels, old_labels, after_labels
def test_multispectral_2d():
lx, ly = 70, 100
data, labels = make_2d_syntheticdata(lx, ly)
data = data[..., np.newaxis].repeat(2, axis=-1) # Expect identical output
multi_labels = random_walker(data, labels, mode='cg', multichannel=True)
assert data[..., 0].shape == labels.shape
single_labels = random_walker(data[..., 0], labels, mode='cg')
assert (multi_labels.reshape(labels.shape)[25:45, 40:60] == 2).all()
assert data[..., 0].shape == labels.shape
return data, multi_labels, single_labels, labels
def test_multispectral_3d():
n = 30
lx, ly, lz = n, n, n
data, labels = make_3d_syntheticdata(lx, ly, lz)
data = data[..., np.newaxis].repeat(2, axis=-1) # Expect identical output
multi_labels = random_walker(data, labels, mode='cg', multichannel=True)
assert data[..., 0].shape == labels.shape
single_labels = random_walker(data[..., 0], labels, mode='cg')
assert (multi_labels.reshape(labels.shape)[13:17, 13:17, 13:17] == 2).all()
assert (single_labels.reshape(labels.shape)[13:17, 13:17, 13:17] == 2).all()
assert data[..., 0].shape == labels.shape
return data, multi_labels, single_labels, labels
def test_depth():
n = 30
lx, ly, lz = n, n, n
data, _ = make_3d_syntheticdata(lx, ly, lz)
# Rescale `data` along Z axis
data_aniso = np.zeros((n, n, n // 2))
for i, yz in enumerate(data):
data_aniso[i, :, :] = resize(yz, (n, n // 2))
# Generate new labels
small_l = int(lx // 5)
labels_aniso = np.zeros_like(data_aniso)
labels_aniso[lx // 5, ly // 5, lz // 5] = 1
labels_aniso[lx // 2 + small_l // 4,
ly // 2 - small_l // 4,
lz // 4 - small_l // 8] = 2
# Test with `depth` kwarg
labels_aniso = random_walker(data_aniso, labels_aniso, mode='cg',
depth=0.5)
assert (labels_aniso[13:17, 13:17, 7:9] == 2).all()
def test_spacing():
n = 30
lx, ly, lz = n, n, n
data, _ = make_3d_syntheticdata(lx, ly, lz)
# Rescale `data` along Y axis
# `resize` is not yet 3D capable, so this must be done by looping in 2D.
data_aniso = np.zeros((n, n * 2, n))
for i, yz in enumerate(data):
data_aniso[i, :, :] = resize(yz, (n * 2, n))
# Generate new labels
small_l = int(lx // 5)
labels_aniso = np.zeros_like(data_aniso)
labels_aniso[lx // 5, ly // 5, lz // 5] = 1
labels_aniso[lx // 2 + small_l // 4,
ly - small_l // 2,
lz // 2 - small_l // 4] = 2
# Test with `spacing` kwarg
# First, anisotropic along Y
labels_aniso = random_walker(data_aniso, labels_aniso, mode='cg',
spacing=(1., 2., 1.))
assert (labels_aniso[13:17, 26:34, 13:17] == 2).all()
# Rescale `data` along X axis
# `resize` is not yet 3D capable, so this must be done by looping in 2D.
data_aniso = np.zeros((n, n * 2, n))
for i in range(data.shape[1]):
data_aniso[i, :, :] = resize(data[:, 1, :], (n * 2, n))
# Generate new labels
small_l = int(lx // 5)
labels_aniso2 = np.zeros_like(data_aniso)
labels_aniso2[lx // 5, ly // 5, lz // 5] = 1
labels_aniso2[lx - small_l // 2,
ly // 2 + small_l // 4,
lz // 2 - small_l // 4] = 2
# Anisotropic along X
labels_aniso2 = random_walker(data_aniso,
labels_aniso2,
mode='cg', spacing=(2., 1., 1.))
assert (labels_aniso2[26:34, 13:17, 13:17] == 2).all()
if __name__ == '__main__':
from numpy import testing
testing.run_module_suite()
| bsd-3-clause | 4,296,883,676,515,704,300 | 33.823077 | 80 | 0.549702 | false |
haphaeu/yoshimi | Qt/TalkingFiles/stack_overflow_answer.py | 1 | 1104 | # -*- coding: utf-8 -*-
"""
Resposta a minha pergunta:
https://stackoverflow.com/a/46817275/5069105
Created on Thu Oct 19 08:36:41 2017
@author: rarossi
"""
import sys
from PyQt4 import QtGui, QtCore
class Thread(QtCore.QThread):
def run(self):
QtCore.QThread.sleep(2)
class Example(QtGui.QWidget):
def __init__(self):
super(Example, self).__init__()
self.initUI()
def initUI(self):
grid = QtGui.QGridLayout()
self.setLayout(grid)
self.btn = QtGui.QPushButton('Count')
grid.addWidget(self.btn, 1, 1)
self.txt1 = QtGui.QTextEdit()
grid.addWidget(self.txt1, 1, 2)
self.btn.clicked.connect(self.click)
self.thread = Thread()
self.thread.finished.connect(lambda: self.btn.setEnabled(True))
self.show()
def click(self):
self.txt1.append('click')
if not self.thread.isRunning():
self.btn.setEnabled(False)
self.thread.start()
if __name__ == '__main__':
app = QtGui.QApplication(sys.argv)
ex = Example()
sys.exit(app.exec_())
| lgpl-3.0 | 6,767,804,709,810,246,000 | 21.530612 | 71 | 0.602355 | false |
googleapis/python-speech | samples/snippets/quickstart.py | 1 | 1633 | #!/usr/bin/env python
# Copyright 2016 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
def run_quickstart():
# [START speech_quickstart]
# Imports the Google Cloud client library
# [START speech_python_migration_imports]
from google.cloud import speech
# [END speech_python_migration_imports]
# Instantiates a client
# [START speech_python_migration_client]
client = speech.SpeechClient()
# [END speech_python_migration_client]
# The name of the audio file to transcribe
gcs_uri = "gs://cloud-samples-data/speech/brooklyn_bridge.raw"
audio = speech.RecognitionAudio(uri=gcs_uri)
config = speech.RecognitionConfig(
encoding=speech.RecognitionConfig.AudioEncoding.LINEAR16,
sample_rate_hertz=16000,
language_code="en-US",
)
# Detects speech in the audio file
response = client.recognize(config=config, audio=audio)
for result in response.results:
print("Transcript: {}".format(result.alternatives[0].transcript))
# [END speech_quickstart]
if __name__ == "__main__":
run_quickstart()
| apache-2.0 | -5,257,935,808,176,347,000 | 30.403846 | 74 | 0.710349 | false |
borisroman/vdsm | tests/cPopenTests.py | 4 | 5408 | #
# Copyright 2012 Red Hat, Inc.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
#
# Refer to the README and COPYING files for full details of the license
#
from __future__ import print_function
import os
import sys
import subprocess
from nose.plugins.skip import SkipTest
from testlib import VdsmTestCase as TestCaseBase
EXT_ECHO = "/bin/echo"
if __name__ != "__main__":
# This will not be available when we use this module as a subprocess
from cpopen import CPopen
class TestCPopen(TestCaseBase):
def testEcho(self):
data = "Hello"
p = CPopen([EXT_ECHO, "-n", data])
p.wait()
self.assertTrue(p.returncode == 0,
"Process failed: %s" % os.strerror(p.returncode))
self.assertEquals(p.stdout.read(), data)
def testCat(self):
path = "/etc/passwd"
p = CPopen(["cat", path])
p.wait()
self.assertTrue(p.returncode == 0,
"Process failed: %s" % os.strerror(p.returncode))
with open(path, "r") as f:
self.assertEquals(p.stdout.read(), f.read())
def _subTest(self, name, params, *args, **kwargs):
p = CPopen(["python", __file__, name] + params, *args, **kwargs)
p.wait()
self.assertTrue(p.returncode == 0,
"Process failed: %s" % os.strerror(p.returncode))
self.assertEquals(p.stdout.read().strip(), "True")
def testCloseFDs(self):
fds = os.pipe()
try:
self._subTest("fds", [str(fds[1])], close_fds=True)
finally:
os.close(fds[0])
os.close(fds[1])
def testNoCloseFds(self):
fds = os.pipe()
try:
self._subTest("nofds", [str(fds[1])], close_fds=False)
finally:
os.close(fds[0])
os.close(fds[1])
def testEnv(self):
env = os.environ.copy()
env["TEST"] = "True"
self._subTest("env", [], env=env)
def testCwd(self):
cwd = "/proc"
p = CPopen(["python", "-c", "import os; print os.getcwd()"], cwd=cwd)
p.wait()
self.assertTrue(p.returncode == 0,
"Process failed: %s" % os.strerror(p.returncode))
self.assertEquals(p.stdout.read().strip(), cwd)
def testRunNonExecutable(self):
self.assertRaises(OSError, CPopen, ["/tmp"])
def testBadCwd(self):
self.assertRaises(OSError, CPopen, ["echo", "hello"],
cwd="/~~~~~dasdas~~~~")
def testUnicodeArg(self):
data = u'hello'
cmd = [EXT_ECHO, "-n", data]
p = CPopen(cmd)
p.wait()
p2 = subprocess.Popen(cmd, stdin=subprocess.PIPE,
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
p2.wait()
self.assertEquals(p.stdout.read(), p2.stdout.read())
def testNonASCIIUnicodeArg(self):
data = u'\u05e9\u05dc\u05d5\u05dd'
# If the default encoding is not utf-8 the test *should* fail as non
# ascii conversion shouldn't work
if sys.getfilesystemencoding() != "UTF-8":
raise SkipTest("The default encoding isn't unicode")
cmd = [EXT_ECHO, "-n", data]
p = CPopen(cmd)
p.wait()
p2 = subprocess.Popen(cmd, stdin=subprocess.PIPE,
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
p2.wait()
self.assertEquals(p.stdout.read(), p2.stdout.read())
def testStdin(self):
data = "Hello World"
p = CPopen(["cat"])
p.stdin.write(data)
p.stdin.flush()
p.stdin.close()
p.wait()
self.assertTrue(p.returncode == 0,
"Process failed: %s" % os.strerror(p.returncode))
self.assertEquals(p.stdout.read(), data)
def testStdinEpoll(self):
import select
data = "Hello World"
p = CPopen(["cat"])
ep = select.epoll()
ep.register(p.stdin, select.EPOLLOUT)
fd, ev = ep.poll(1)[0]
ep.close()
os.write(fd, data)
p.stdin.close()
p.wait()
self.assertTrue(p.returncode == 0,
"Process failed: %s" % os.strerror(p.returncode))
self.assertEquals(p.stdout.read(), data)
if __name__ == "__main__":
cmd = sys.argv[1]
if cmd == "fds":
try:
os.close(int(sys.argv[2]))
print("False")
except:
print("True")
elif cmd == "nofds":
try:
os.close(int(sys.argv[2]))
print("True")
except:
print("False")
elif cmd == "env":
try:
print(os.environ.get("TEST", "False"))
except:
print("False")
| gpl-2.0 | 6,968,033,768,327,014,000 | 30.260116 | 79 | 0.56176 | false |
fedora-infra/faitout | faitout/faitoutlib/__init__.py | 1 | 15008 | #-*- coding: utf-8 -*-
"""
faitoutlib - the backend library performing the actual work of this project.
(c) 2013-2017 - Copyright Red Hat Inc.
Authors:
- Pierre-Yves Chibon <[email protected]>
Distributed under License GPLv3 or later
You can find a copy of this license on the website
http://www.gnu.org/licenses/gpl.html
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston,
MA 02110-1301, USA.
"""
import random
import string
import sys
from sqlalchemy import create_engine
from sqlalchemy.orm import sessionmaker
from sqlalchemy.orm import scoped_session
from sqlalchemy.orm.exc import NoResultFound
import model
class FaitoutException(Exception):
""" Top level exceptions for all the customs exception of Faitout.
"""
pass
class TooManyConnectionException(FaitoutException):
""" Exception thrown when the user has requested to many database
connection within a certain time frame.
"""
pass
class WrongOriginException(FaitoutException):
""" Exception thrown when someone has requested to drop a database
from a different IP than the person that requested it.
"""
pass
class NoDatabaseException(FaitoutException):
""" Exception thrown when someone has requested to drop a database
that does not exist.
"""
pass
def get_engine(db_url, debug=False, pool_recycle=3600):
""" Create the engine use to connect to the database.
:arg db_url: URL used to connect to the database. The URL contains
information with regards to the database engine, the host to connect
to, the user and password and the database name.
ie: <engine>://<user>:<password>@<host>/<dbname>
:arg debug: a boolean specifying wether we should have the verbose
output of sqlalchemy or not.
:return an engine that can be used to connect the database.
"""
return create_engine(db_url, echo=debug, pool_recycle=pool_recycle)
def create_session(db_url, debug=False, pool_recycle=3600):
""" Create the Session object to use to query the database.
:arg db_url: URL used to connect to the database. The URL contains
information with regards to the database engine, the host to connect
to, the user and password and the database name.
ie: <engine>://<user>:<password>@<host>/<dbname>
:arg debug: a boolean specifying wether we should have the verbose
output of sqlalchemy or not.
:return a Session that can be used to query the database.
"""
engine = get_engine(db_url, debug=debug, pool_recycle=pool_recycle)
scopedsession = scoped_session(sessionmaker(bind=engine))
return scopedsession
def get_new_connection(
session, admin_engine, remote_ip, host, port, max_con=3,
outformat='text', unlimited=False):
""" Create a new connection to the database for the specified IP
address.
In case the IP address provided is not part of the whitelist and has
required more than 3 connection over the last X minutes, the method
will throw a TooManyConnection exception.
A FaitoutException is thrown if something went wrong at the database
level.
:arg session: the session with which to connect to the database.
:arg admin_engine: the engine with which to connect to the postgresql
database to create the new database and user.
:arg remote_ip: the IP address of the user that requested a new
connection.
:arg host: the host / ip of the postgresql server to return in the
database url.
:arg port: the port of the postgresql server to return in the database
url.
:kwarg max_con: specify the maximum number of active connections
allowed per IP at the same time.
:kwarg outformat: specify the return format of the connection
information. At the moment 'text' and 'json' are supported, 'text'
being the default.
:kwarg unlimited: a boolean specifying wether this IP address is allowed
unlimited active connections.
:raise TooManyConnectionException: if the user requested too many
connection too quickly.
:raise FaitoutException: generic exception raised in case of problem.
:return: a string of the URL to connect to the database if outformat
is 'text', a dictionnary of the same information if outformat is
'json'.
"""
## Check if user is allowed to ask for a new connection
if not unlimited \
and model.Connection.search(
session, ip=remote_ip, active=True, cnt=True) >= max_con:
raise TooManyConnectionException(
'%s has already 3 active connection, please re-try later' %
remote_ip
)
## Generate user
user = string_generator(20)
## Generate password
password = string_generator(40)
## Generate database name
db_name = string_generator(15)
connection = model.Connection(user, password, db_name, remote_ip)
session.add(connection)
try:
session.commit()
except Exception as err: # pragma: no cover
session.rollback()
print >> sys.stderr, err
raise FaitoutException(
'An error has occured, please contact the administrator'
)
## Create database, user and grant permission
try:
create_database(admin_engine, db_name, user, password)
except Exception as err: # pragma: no cover
print >> sys.stderr, err
raise FaitoutException(
'An error has occured, please contact the administrator'
)
info = {
"dbname": db_name,
"username": user,
"password": password,
"port": port,
"host": host,
}
if outformat == 'json':
return info
else:
return 'postgresql://%(username)s:%(password)s@' \
'%(host)s:%(port)s/%(dbname)s' % info
def string_generator(length=15):
""" Return a randomly generated string of lower and upper ASCII
characters and number.
:kwarg length: the length of the string to return
"""
chars = string.ascii_uppercase + string.digits + string.ascii_lowercase
return ''.join(random.choice(chars) for x in range(length))
def create_database(admin_engine, db_name, username, password): # pragma: no cover
""" Using the with the provided engine, create a new database with the
specified name, create a new database user with the specified username
and password and set this user as admin of this database.
:arg admin_engine: the engine used to connect to the database
:arg db_name: the name of the database to create
:arg username: the name of the user to create
:arg password: the password of the user to create
"""
conn = admin_engine.connect()
try:
try:
conn.execute("commit")
except:
pass
conn.execute("CREATE USER \"%s\" WITH PASSWORD '%s';" %
(username, password))
conn.execute("commit")
conn.execute('create database "%s";' % db_name)
conn.execute("commit")
conn.execute("GRANT ALL PRIVILEGES ON DATABASE \"%s\" to \"%s\";" %
(db_name, username))
finally:
conn.close()
def clean_connection(session, admin_engine, remote_ip, db_name):
""" Drop all the table of the specified database.
In case the IP address provided is not the IP that requested the
connection a WrongOriginException exception is thrown.
A FaitoutException is thrown if something went wrong at the database
level.
:arg session: the session with which to connect to the database.
:arg admin_engine: the engine with which to connect to the postgresql
database to create the new database and user.
:arg remote_ip: the IP address of the user that requested a new
connection.
:arg db_name: the name of the database to drop.
:raise NoDatabaseException: thrown when the database name provided
could not be associated with any database.
:raise WrongOriginException: if the user requested to drop the db from
a different IP than the user asking for the db.
:raise FaitoutException: generic exception raised in case of problem.
:return: a string of the URL to connect to the database if outformat
is 'text', a dictionnary of the same information if outformat is
'json'.
"""
try:
connection = model.Connection.by_db_name(session, db_name)
except NoResultFound:
raise NoDatabaseException(
'Database %s could not be found' % db_name)
if connection.connection_active is False:
raise NoDatabaseException(
'No active database named %s could be found' % db_name)
if connection.connection_ip != remote_ip:
raise WrongOriginException(
'%s did not request this database and thus is not allowed to '
'clean it.' % remote_ip)
try:
clean_database(admin_engine, db_name)
except Exception as err: # pragma: no cover
print >> sys.stderr, 'ERROR while cleaning\n', err
raise FaitoutException(
'An error has occured, please contact the administrator'
)
return 'Database %s has been cleaned' % db_name
def clean_database(admin_engine, db_name): # pragma: no cover
""" Using the provided engine, drop all tables ofthe specified database.
:arg admin_engine: the engine used to connect to the database
:arg db_name: the name of the database to clean
"""
conn = admin_engine.connect()
try:
try:
conn.execute("commit")
except:
pass
conn.execute("SELECT pg_terminate_backend(pg_stat_activity.pid)"
" FROM pg_stat_activity"
" WHERE pg_stat_activity.datname = '%s'"
" AND pid <> pg_backend_pid();" % db_name)
conn.execute("commit")
conn.execute('drop database "%s";' % db_name)
conn.execute("commit")
conn.execute('create database "%s";' % db_name)
conn.execute("commit")
finally:
conn.close()
def drop_connection(session, admin_engine, remote_ip, db_name):
""" Drop the specified database and the user associated with it.
In case the IP address provided is not the IP that requested the
connection a WrongOriginException exception is thrown.
A FaitoutException is thrown if something went wrong at the database
level.
:arg session: the session with which to connect to the database.
:arg admin_engine: the engine with which to connect to the postgresql
database to create the new database and user.
:arg remote_ip: the IP address of the user that requested a new
connection.
:arg db_name: the name of the database to drop.
:raise NoDatabaseException: thrown when the database name provided
could not be associated with any database.
:raise WrongOriginException: if the user requested to drop the db from
a different IP than the user asking for the db.
:raise FaitoutException: generic exception raised in case of problem.
:return: a string of the URL to connect to the database if outformat
is 'text', a dictionnary of the same information if outformat is
'json'.
"""
try:
connection = model.Connection.by_db_name(session, db_name)
except NoResultFound:
raise NoDatabaseException(
'Database %s could not be found' % db_name)
if connection.connection_active is False:
raise NoDatabaseException(
'No active database named %s could be found' % db_name)
if connection.connection_ip != remote_ip:
raise WrongOriginException(
'%s did not request this database and thus is not allowed to '
'drop it.' % remote_ip)
try:
drop_database(admin_engine, db_name, connection.connection_user)
except Exception as err: # pragma: no cover
print >> sys.stderr, 'ERROR while dropping\n', err
raise FaitoutException(
'An error has occured, please contact the administrator'
)
connection.connection_active = False
try:
session.commit()
except Exception as err: # pragma: no cover
session.rollback()
print >> sys.stderr, err
raise FaitoutException(
'An error has occured, please contact the administrator'
)
return 'Database %s has been dropped' % db_name
def drop_database(admin_engine, db_name, username): # pragma: no cover
""" Using the provided engine, drop the specified database and user.
:arg admin_engine: the engine used to connect to the database
:arg db_name: the name of the database to drop
:arg username: the name of the user to drop
"""
conn = admin_engine.connect()
try:
try:
conn.execute("commit")
except:
pass
conn.execute("SELECT pg_terminate_backend(pg_stat_activity.pid)"
" FROM pg_stat_activity"
" WHERE pg_stat_activity.datname = '%s'"
" AND pid <> pg_backend_pid();" % db_name)
conn.execute("commit")
conn.execute('drop database "%s";' % db_name)
conn.execute("commit")
conn.execute('drop user "%s";' % username)
conn.execute("commit")
finally:
conn.close()
def get_stats(session):
""" Retrieve some statistics about the current usage of faitout.
:arg session: the session with which to connect to the faitout database.
"""
output = {}
output['total_connections'] = model.Connection.search(
session, cnt=True)
output['active_connections'] = model.Connection.search(
session, active=True, cnt=True)
output['unique_ip'] = model.Connection.cnt_unique_ip(session)
return output
def get_ip_stats(session, ipaddr):
""" Retrieve some statistics about the current usage of faitout by a
given IP address.
:arg session: the session with which to connect to the faitout database.
:arg ipaddr: the IP address of interest.
"""
output = {}
output['total_connections'] = model.Connection.search(
session, ip=ipaddr, cnt=True)
output['active_connections'] = model.Connection.search(
session, ip=ipaddr, active=True)
return output
| gpl-3.0 | -823,578,071,481,065,200 | 34.396226 | 83 | 0.665578 | false |
guettli/django | tests/swappable_models/tests.py | 16 | 2006 | from __future__ import unicode_literals
from swappable_models.models import Article
from django.contrib.auth.models import Permission
from django.contrib.contenttypes.models import ContentType
from django.core import management
from django.test import TestCase, override_settings
from django.utils.six import StringIO
class SwappableModelTests(TestCase):
available_apps = [
'swappable_models',
'django.contrib.auth',
'django.contrib.contenttypes',
]
@override_settings(TEST_ARTICLE_MODEL='swappable_models.AlternateArticle')
def test_generated_data(self):
"Permissions and content types are not created for a swapped model"
# Delete all permissions and content_types
Permission.objects.filter(content_type__app_label='swappable_models').delete()
ContentType.objects.filter(app_label='swappable_models').delete()
# Re-run migrate. This will re-build the permissions and content types.
new_io = StringIO()
management.call_command('migrate', interactive=False, stdout=new_io)
# Check that content types and permissions exist for the swapped model,
# but not for the swappable model.
apps_models = [(p.content_type.app_label, p.content_type.model)
for p in Permission.objects.all()]
self.assertIn(('swappable_models', 'alternatearticle'), apps_models)
self.assertNotIn(('swappable_models', 'article'), apps_models)
apps_models = [(ct.app_label, ct.model)
for ct in ContentType.objects.all()]
self.assertIn(('swappable_models', 'alternatearticle'), apps_models)
self.assertNotIn(('swappable_models', 'article'), apps_models)
@override_settings(TEST_ARTICLE_MODEL='swappable_models.article')
def test_case_insensitive(self):
"Model names are case insensitive. Check that model swapping honors this."
Article.objects.all()
self.assertIsNone(Article._meta.swapped)
| bsd-3-clause | -5,841,470,906,766,234,000 | 40.791667 | 86 | 0.693918 | false |
0x46616c6b/ansible | lib/ansible/modules/network/aos/aos_logical_device.py | 13 | 7985 | #!/usr/bin/python
#
# (c) 2017 Apstra Inc, <[email protected]>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
ANSIBLE_METADATA = {'status': ['preview'],
'supported_by': 'community',
'version': '1.0'}
DOCUMENTATION = '''
---
module: aos_logical_device
author: Damien Garros (@dgarros)
version_added: "2.3"
short_description: Manage AOS Logical Device
description:
- Apstra AOS Logical Device module let you manage your Logical Devices easily.
You can create create and delete Logical Device by Name, ID or by using a JSON File.
This module is idempotent and support the I(check) mode.
It's using the AOS REST API.
requirements:
- "aos-pyez >= 0.6.0"
options:
session:
description:
- An existing AOS session as obtained by M(aos_login) module.
required: true
name:
description:
- Name of the Logical Device to manage.
Only one of I(name), I(id) or I(content) can be set.
id:
description:
- AOS Id of the Logical Device to manage (can't be used to create a new Logical Device),
Only one of I(name), I(id) or I(content) can be set.
content:
description:
- Datastructure of the Logical Device to create. The data can be in YAML / JSON or
directly a variable. It's the same datastructure that is returned
on success in I(value).
state:
description:
- Indicate what is the expected state of the Logical Device (present or not).
default: present
choices: ['present', 'absent']
'''
EXAMPLES = '''
- name: "Delete a Logical Device by name"
aos_logical_device:
session: "{{ aos_session }}"
name: "my-logical-device"
state: absent
- name: "Delete a Logical Device by id"
aos_logical_device:
session: "{{ aos_session }}"
id: "45ab26fc-c2ed-4307-b330-0870488fa13e"
state: absent
# Save a Logical Device to a file
- name: "Access Logical Device 1/3"
aos_logical_device:
session: "{{ aos_session }}"
name: "my-logical-device"
state: present
register: logical_device
- name: "Save Logical Device into a JSON file 2/3"
copy:
content: "{{ logical_device.value | to_nice_json }}"
dest: logical_device_saved.json
- name: "Save Logical Device into a YAML file 3/3"
copy:
content: "{{ logical_device.value | to_nice_yaml }}"
dest: logical_device_saved.yaml
- name: "Load Logical Device from a JSON file"
aos_logical_device:
session: "{{ aos_session }}"
content: "{{ lookup('file', 'resources/logical_device_saved.json') }}"
state: present
- name: "Load Logical Device from a YAML file"
aos_logical_device:
session: "{{ aos_session }}"
content: "{{ lookup('file', 'resources/logical_device_saved.yaml') }}"
state: present
'''
RETURNS = '''
name:
description: Name of the Logical Device
returned: always
type: str
sample: AOS-1x25-1
id:
description: AOS unique ID assigned to the Logical Device
returned: always
type: str
sample: fcc4ac1c-e249-4fe7-b458-2138bfb44c06
value:
description: Value of the object as returned by the AOS Server
returned: always
type: dict
sample: {'...'}
'''
import json
import time
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.aos import get_aos_session, find_collection_item, do_load_resource, check_aos_version, content_to_dict
#########################################################
# State Processing
#########################################################
def logical_device_absent(module, aos, my_logical_dev):
margs = module.params
# If the module do not exist, return directly
if my_logical_dev.exists is False:
module.exit_json(changed=False,
name=margs['name'],
id=margs['id'],
value={})
# If not in check mode, delete Logical Device
if not module.check_mode:
try:
# Need to way 1sec before a delete to workaround a current limitation in AOS
time.sleep(1)
my_logical_dev.delete()
except:
module.fail_json(msg="An error occured, while trying to delete the Logical Device")
module.exit_json( changed=True,
name=my_logical_dev.name,
id=my_logical_dev.id,
value={} )
def logical_device_present(module, aos, my_logical_dev):
margs = module.params
if margs['content'] is not None:
if 'display_name' in module.params['content'].keys():
do_load_resource(module, aos.LogicalDevices, module.params['content']['display_name'])
else:
module.fail_json(msg="Unable to find display_name in 'content', Mandatory")
# if logical_device doesn't exist already, create a new one
if my_logical_dev.exists is False and 'content' not in margs.keys():
module.fail_json(msg="'content' is mandatory for module that don't exist currently")
module.exit_json( changed=False,
name=my_logical_dev.name,
id=my_logical_dev.id,
value=my_logical_dev.value )
#########################################################
# Main Function
#########################################################
def logical_device(module):
margs = module.params
try:
aos = get_aos_session(module, margs['session'])
except:
module.fail_json(msg="Unable to login to the AOS server")
item_name = False
item_id = False
if margs['content'] is not None:
content = content_to_dict(module, margs['content'] )
if 'display_name' in content.keys():
item_name = content['display_name']
else:
module.fail_json(msg="Unable to extract 'display_name' from 'content'")
elif margs['name'] is not None:
item_name = margs['name']
elif margs['id'] is not None:
item_id = margs['id']
#----------------------------------------------------
# Find Object if available based on ID or Name
#----------------------------------------------------
my_logical_dev = find_collection_item(aos.LogicalDevices,
item_name=item_name,
item_id=item_id)
#----------------------------------------------------
# Proceed based on State value
#----------------------------------------------------
if margs['state'] == 'absent':
logical_device_absent(module, aos, my_logical_dev)
elif margs['state'] == 'present':
logical_device_present(module, aos, my_logical_dev)
def main():
module = AnsibleModule(
argument_spec=dict(
session=dict(required=True, type="dict"),
name=dict(required=False ),
id=dict(required=False ),
content=dict(required=False, type="json"),
state=dict( required=False,
choices=['present', 'absent'],
default="present")
),
mutually_exclusive = [('name', 'id', 'content')],
required_one_of=[('name', 'id', 'content')],
supports_check_mode=True
)
# Check if aos-pyez is present and match the minimum version
check_aos_version(module, '0.6.0')
logical_device(module)
if __name__ == "__main__":
main()
| gpl-3.0 | 3,997,973,514,520,942,600 | 30.812749 | 128 | 0.592987 | false |
amisrs/angular-flask | angular_flask/lib/python2.7/site-packages/gevent/_semaphore.py | 3 | 10408 | import sys
from gevent.hub import get_hub, getcurrent
from gevent.timeout import Timeout
__all__ = ['Semaphore', 'BoundedSemaphore']
class Semaphore(object):
"""
Semaphore(value=1) -> Semaphore
A semaphore manages a counter representing the number of release()
calls minus the number of acquire() calls, plus an initial value.
The acquire() method blocks if necessary until it can return
without making the counter negative.
If not given, ``value`` defaults to 1.
The semaphore is a context manager and can be used in ``with`` statements.
This Semaphore's ``__exit__`` method does not call the trace function
on CPython, but does under PyPy.
.. seealso:: :class:`BoundedSemaphore` for a safer version that prevents
some classes of bugs.
"""
def __init__(self, value=1):
if value < 0:
raise ValueError("semaphore initial value must be >= 0")
self.counter = value
self._dirty = False
# In PyPy 2.6.1 with Cython 0.23, `cdef public` or `cdef
# readonly` or simply `cdef` attributes of type `object` can appear to leak if
# a Python subclass is used (this is visible simply
# instantiating this subclass if _links=[]). Our _links and
# _notifier are such attributes, and gevent.thread subclasses
# this class. Thus, we carefully manage the lifetime of the
# objects we put in these attributes so that, in the normal
# case of a semaphore used correctly (deallocated when it's not
# locked and no one is waiting), the leak goes away (because
# these objects are back to None). This can also be solved on PyPy
# by simply not declaring these objects in the pxd file, but that doesn't work for
# CPython ("No attribute...")
# See https://github.com/gevent/gevent/issues/660
self._links = None
self._notifier = None
# we don't want to do get_hub() here to allow defining module-level locks
# without initializing the hub
def __str__(self):
params = (self.__class__.__name__, self.counter, len(self._links) if self._links else 0)
return '<%s counter=%s _links[%s]>' % params
def locked(self):
"""Return a boolean indicating whether the semaphore can be acquired.
Most useful with binary semaphores."""
return self.counter <= 0
def release(self):
"""
Release the semaphore, notifying any waiters if needed.
"""
self.counter += 1
self._start_notify()
return self.counter
def _start_notify(self):
if self._links and self.counter > 0 and not self._notifier:
# We create a new self._notifier each time through the loop,
# if needed. (it has a __bool__ method that tells whether it has
# been run; once it's run once---at the end of the loop---it becomes
# false.)
# NOTE: Passing the bound method will cause a memory leak on PyPy
# with Cython <= 0.23.3. You must use >= 0.23.4.
# See https://bitbucket.org/pypy/pypy/issues/2149/memory-leak-for-python-subclass-of-cpyext#comment-22371546
self._notifier = get_hub().loop.run_callback(self._notify_links)
def _notify_links(self):
# Subclasses CANNOT override. This is a cdef method.
# We release self._notifier here. We are called by it
# at the end of the loop, and it is now false in a boolean way (as soon
# as this method returns).
# If we get acquired/released again, we will create a new one, but there's
# no need to keep it around until that point (making it potentially climb
# into older GC generations, notably on PyPy)
notifier = self._notifier
try:
while True:
self._dirty = False
if not self._links:
# In case we were manually unlinked before
# the callback. Which shouldn't happen
return
for link in self._links:
if self.counter <= 0:
return
try:
link(self) # Must use Cython >= 0.23.4 on PyPy else this leaks memory
except: # pylint:disable=bare-except
getcurrent().handle_error((link, self), *sys.exc_info())
if self._dirty:
# We mutated self._links so we need to start over
break
if not self._dirty:
return
finally:
# We should not have created a new notifier even if callbacks
# released us because we loop through *all* of our links on the
# same callback while self._notifier is still true.
assert self._notifier is notifier
self._notifier = None
def rawlink(self, callback):
"""
rawlink(callback) -> None
Register a callback to call when a counter is more than zero.
*callback* will be called in the :class:`Hub <gevent.hub.Hub>`, so it must not use blocking gevent API.
*callback* will be passed one argument: this instance.
This method is normally called automatically by :meth:`acquire` and :meth:`wait`; most code
will not need to use it.
"""
if not callable(callback):
raise TypeError('Expected callable:', callback)
if self._links is None:
self._links = [callback]
else:
self._links.append(callback)
self._dirty = True
def unlink(self, callback):
"""
unlink(callback) -> None
Remove the callback set by :meth:`rawlink`.
This method is normally called automatically by :meth:`acquire` and :meth:`wait`; most
code will not need to use it.
"""
try:
self._links.remove(callback)
self._dirty = True
except (ValueError, AttributeError):
pass
if not self._links:
self._links = None
# TODO: Cancel a notifier if there are no links?
def _do_wait(self, timeout):
"""
Wait for up to *timeout* seconds to expire. If timeout
elapses, return the exception. Otherwise, return None.
Raises timeout if a different timer expires.
"""
switch = getcurrent().switch
self.rawlink(switch)
try:
timer = Timeout._start_new_or_dummy(timeout)
try:
try:
result = get_hub().switch()
assert result is self, 'Invalid switch into Semaphore.wait/acquire(): %r' % (result, )
except Timeout as ex:
if ex is not timer:
raise
return ex
finally:
timer.cancel()
finally:
self.unlink(switch)
def wait(self, timeout=None):
"""
wait(timeout=None) -> int
Wait until it is possible to acquire this semaphore, or until the optional
*timeout* elapses.
.. caution:: If this semaphore was initialized with a size of 0,
this method will block forever if no timeout is given.
:keyword float timeout: If given, specifies the maximum amount of seconds
this method will block.
:return: A number indicating how many times the semaphore can be acquired
before blocking.
"""
if self.counter > 0:
return self.counter
self._do_wait(timeout) # return value irrelevant, whether we got it or got a timeout
return self.counter
def acquire(self, blocking=True, timeout=None):
"""
acquire(blocking=True, timeout=None) -> bool
Acquire the semaphore.
.. caution:: If this semaphore was initialized with a size of 0,
this method will block forever (unless a timeout is given or blocking is
set to false).
:keyword bool blocking: If True (the default), this function will block
until the semaphore is acquired.
:keyword float timeout: If given, specifies the maximum amount of seconds
this method will block.
:return: A boolean indicating whether the semaphore was acquired.
If ``blocking`` is True and ``timeout`` is None (the default), then
(so long as this semaphore was initialized with a size greater than 0)
this will always return True. If a timeout was given, and it expired before
the semaphore was acquired, False will be returned. (Note that this can still
raise a ``Timeout`` exception, if some other caller had already started a timer.)
"""
if self.counter > 0:
self.counter -= 1
return True
if not blocking:
return False
timeout = self._do_wait(timeout)
if timeout is not None:
# Our timer expired.
return False
# Neither our timer no another one expired, so we blocked until
# awoke. Therefore, the counter is ours
self.counter -= 1
assert self.counter >= 0
return True
_py3k_acquire = acquire # PyPy needs this; it must be static for Cython
def __enter__(self):
self.acquire()
def __exit__(self, t, v, tb):
self.release()
class BoundedSemaphore(Semaphore):
"""
BoundedSemaphore(value=1) -> BoundedSemaphore
A bounded semaphore checks to make sure its current value doesn't
exceed its initial value. If it does, :class:`ValueError` is
raised. In most situations semaphores are used to guard resources
with limited capacity. If the semaphore is released too many times
it's a sign of a bug.
If not given, *value* defaults to 1.
"""
#: For monkey-patching, allow changing the class of error we raise
_OVER_RELEASE_ERROR = ValueError
def __init__(self, *args, **kwargs):
Semaphore.__init__(self, *args, **kwargs)
self._initial_value = self.counter
def release(self):
if self.counter >= self._initial_value:
raise self._OVER_RELEASE_ERROR("Semaphore released too many times")
return Semaphore.release(self)
| mit | -3,093,381,927,742,221,000 | 37.69145 | 121 | 0.596368 | false |
tvcsantos/Flexget | flexget/event.py | 5 | 2988 | """
Provides small event framework
"""
from __future__ import absolute_import, division, unicode_literals
import logging
log = logging.getLogger('event')
_events = {}
class Event(object):
"""Represents one registered event."""
def __init__(self, name, func, priority=128):
self.name = name
self.func = func
self.priority = priority
def __call__(self, *args, **kwargs):
return self.func(*args, **kwargs)
def __eq__(self, other):
return self.priority == other.priority
def __lt__(self, other):
return self.priority < other.priority
def __gt__(self, other):
return self.priority > other.priority
def __str__(self):
return '<Event(name=%s,func=%s,priority=%s)>' % (self.name, self.func.__name__, self.priority)
__repr__ = __str__
def event(name, priority=128):
"""Register event to function with a decorator"""
def decorator(func):
add_event_handler(name, func, priority)
return func
return decorator
def get_events(name):
"""
:param String name: event name
:return: List of :class:`Event` for *name* ordered by priority
"""
if not name in _events:
raise KeyError('No such event %s' % name)
_events[name].sort(reverse=True)
return _events[name]
def add_event_handler(name, func, priority=128):
"""
:param string name: Event name
:param function func: Function that acts as event handler
:param priority: Priority for this hook
:return: Event created
:rtype: Event
:raises Exception: If *func* is already registered in an event
"""
events = _events.setdefault(name, [])
for event in events:
if event.func == func:
raise ValueError('%s has already been registered as event listener under name %s' % (func.__name__, name))
log.trace('registered function %s to event %s' % (func.__name__, name))
event = Event(name, func, priority)
events.append(event)
return event
def remove_event_handlers(name):
"""Removes all handlers for given event `name`."""
_events.pop(name, None)
def remove_event_handler(name, func):
"""Remove `func` from the handlers for event `name`."""
for e in list(_events.get(name, [])):
if e.func is func:
_events[name].remove(e)
def fire_event(name, *args, **kwargs):
"""
Trigger an event with *name*. If event is not hooked by anything nothing happens. If a function that hooks an event
returns a value, it will replace the first argument when calling next function.
:param name: Name of event to be called
:param args: List of arguments passed to handler function
:param kwargs: Key Value arguments passed to handler function
"""
if not name in _events:
return
for event in get_events(name):
result = event(*args, **kwargs)
if result is not None:
args = (result,) + args[1:]
return args and args[0]
| mit | -5,708,261,350,407,067,000 | 27.188679 | 119 | 0.629518 | false |
google/ftc-object-detection | training/experimental/inspect_records.py | 1 | 1982 | # Copyright 2018 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from PIL import Image, ImageDraw, ImageFont
import io
import tensorflow as tf
fnt = ImageFont.truetype('LucidaSansRegular.ttf', 12)
flags = tf.app.flags
flags.DEFINE_string('records', 'records/train.records', 'Path to records to decode')
flags.DEFINE_string('decoded_dir', 'decoded', 'Path to write decoded records')
FLAGS = flags.FLAGS
example = tf.train.Example()
counter = 0
for record in tf.python_io.tf_record_iterator(FLAGS.records):
example.ParseFromString(record)
f = example.features.feature
height = f['image/height'].int64_list.value[0]
width = f['image/width'].int64_list.value[0]
e = f['image/encoded'].bytes_list.value[0]
im = Image.open(io.BytesIO(e))
draw = ImageDraw.Draw(im)
for i in range(len(f['image/object/class/text'].bytes_list.value)):
class_text = f['image/object/class/text'].bytes_list.value[i]
xmin = f['image/object/bbox/xmin'].float_list.value[i]
ymin = f['image/object/bbox/ymin'].float_list.value[i]
xmax = f['image/object/bbox/xmax'].float_list.value[i]
ymax = f['image/object/bbox/ymax'].float_list.value[i]
draw.rectangle([xmin*width, ymin*height, xmax*width, ymax*height], outline="rgb(255,0,0)")
draw.text((xmin*width, ymin*height), class_text.decode('utf-8'), font=fnt, fill=(255,0,0,255))
im.save(os.path.join(FLAGS.decoded_dir, "%05d.png" % counter)
counter += 1
| apache-2.0 | -6,338,285,356,073,414,000 | 40.291667 | 102 | 0.702321 | false |
germanovm/vdsm | tests/functional/networkTestsOVS.py | 1 | 15271 | #
# Copyright 2015 Red Hat, Inc.
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
#
# Refer to the README and COPYING files for full details of the license
#
from functools import wraps
from nose.plugins.skip import SkipTest
from vdsm.utils import RollbackContext
from testlib import expandPermutations, permutations
from testValidation import RequireVethMod
from networkTests import (setupModule, tearDownModule, NetworkTest, dummyIf,
_get_source_route, dnsmasqDhcp, NETWORK_NAME,
IP_ADDRESS, IP_MASK, IP_CIDR, IP_GATEWAY,
IPv6_ADDRESS, IPv6_CIDR, VLAN_ID, NOCHK, SUCCESS)
from utils import VdsProxy
import veth
import dhcp
# Make Pyflakes happy
setupModule
tearDownModule
# Do not trigger NetworkTest
NetworkTest.__test__ = False
BRIDGE_NAME = 'ovsbr0'
# Tests which are not supported by OVS hook (because of OVS hook or because of
# tests themselves). Some of these tests should be inherited and 'repaired'
# for OVS, or rewritten.
not_supported = [
'testAddVlanedBridgeless', # bridgeless
'testAddVlanedBridgeless_oneCommand', # bridgeless
'testAfterNetworkSetupHook', # bridgeless
'testBeforeNetworkSetupHook', # bridgeless
'testBrokenNetworkReplacement(False)', # bridgeless
'testBrokenNetworkReplacement(True)', # uses `ip l`
'testDhcpReplaceNicWithBridge', # bridgeless
'testIpLinkWrapper', # uses netlink.iter_links
'testReconfigureBrNetWithVanishedPort', # uses brctl
'testRedefineBondedNetworkIPs', # bridgeless
'testRemovingBridgeDoesNotLeaveBridge', # uses netlink.iter_links
'testRestoreNetworksOnlyRestoreUnchangedDevices', # bond with one slave
'testRestoreToBlockingDHCP', # bridgeless
'testSelectiveRestoreDuringUpgrade', # multiple untagged nets
'testSetupNetworkOutboundQos(False)', # bridgeless
'testSetupNetworksActiveSlave', # ovs doesn't report fake active slaves
'testSetupNetworksAddBadParams(False)', # bridgeless
'testSetupNetworksAddBondWithManyVlans(False)', # bridgeless
'testSetupNetworksAddDelBondedNetwork(False)', # bridgeless
'testSetupNetworksAddDelDhcp(False, (4, 6))', # bridgeless
'testSetupNetworksAddDelDhcp(False, (4,))', # bridgeless
'testSetupNetworksAddDelDhcp(False, (6,))', # bridgeless
'testSetupNetworksAddManyVlans(False)', # bridgeless
'testSetupNetworksAddNetworkToNicAfterBondBreaking(False)', # bridgeless
'testSetupNetworksAddNetworkToNicAfterBondResizing(False)', # bridgeless
'testSetupNetworksAddNetworkToNicAfterBondResizing(True)', # untagged nets
'testSetupNetworksAddOverExistingBond(False)', # bridgeless
'testSetupNetworksAddOverExistingBond(True)', # bridgeless
'testSetupNetworksAddVlan(False)', # bridgeless
'testSetupNetworksConvertVlanNetBridgeness', # bridgeless
'testSetupNetworksDelOneOfBondNets', # bridgeless
'testSetupNetworksDeletesTheBridgeOnlyWhenItIsReconfigured', # netlink
'testSetupNetworksEmergencyDevicesCleanupBondOverwrite(False)', # brless
'testSetupNetworksEmergencyDevicesCleanupVlanOverwrite(False)', # brless
'testSetupNetworksKeepNetworkOnBondAfterBondResizing(False)', # bridgeless
'testSetupNetworksMtus(False)', # bridgeless
'testSetupNetworksMultiMTUsOverBond(False)', # bridgeless
'testSetupNetworksMultiMTUsOverNic(False)', # bridgeless
'testSetupNetworksNetCompatibilityMultipleNetsSameNic(False)', # brless
'testSetupNetworksNiclessBridgeless', # bridgeless
'testSetupNetworksOverDhcpIface', # bridgeless
'testSetupNetworksRemoveBondWithKilledEnslavedNics', # bridgeless
'testSetupNetworksRemoveSlavelessBond', # bridgeless
'testSetupNetworksResizeBond(False)', # bridgeless
'testSetupNetworksResizeBond(True)', # assert exact custom=ovs=True
'testSetupNetworksStableBond(False)', # bridgeless
'testSetupNetworksStableBond(True)', # OVS wont change operstate
'testStaticSourceRouting(False)', # bridgeless
'test_setupNetworks_bond_with_custom_option', # has custom=ovs=True
'test_setupNetworks_on_external_bond', # uses /proc/sys/net
'test_setupNetworks_on_external_vlaned_bond' # uses ifcfg
]
# Test which are not using OVS hook. It make sense to run them anyways,
# but could be skipped.
does_not_use_ovs = [
'testAddDelBondedNetwork(False)',
'testAddDelBondedNetwork(True)',
'testAddDelNetwork(False)',
'testAddDelNetwork(True)',
'testAddNetworkBondWithManyVlans(False)',
'testAddNetworkBondWithManyVlans(True)',
'testAddNetworkManyVlans(False)',
'testAddNetworkManyVlans(True)',
'testAddNetworkVlan(False)',
'testAddNetworkVlan(True)',
'testAddNetworkVlanBond(False)',
'testAddNetworkVlanBond(True)',
'testBondHwAddress(False)',
'testBondHwAddress(True)',
'testDelNetworkBondAccumulation',
'testDelNetworkWithMTU(False)',
'testDelNetworkWithMTU(True)',
'testDelWithoutAdd(False)',
'testDelWithoutAdd(True)',
"testDhclientLeases(4, 'default')",
"testDhclientLeases(4, 'local')",
'testDhclientLeases(6, None)',
'testEditWithoutAdd(False)',
'testEditWithoutAdd(True)',
'testFailWithInvalidBondingName(False)',
'testFailWithInvalidBondingName(True)',
'testFailWithInvalidBridgeName',
'testFailWithInvalidIpConfig',
'testFailWithInvalidNic(False)',
'testFailWithInvalidNic(True)',
'testFailWithInvalidParams(False)',
'testFailWithInvalidParams(True)',
'testGetRouteDeviceTo',
'testReorderBondingOptions(False)',
'testReorderBondingOptions(True)',
'testSafeNetworkConfig(False)',
'testSafeNetworkConfig(True)',
'testTwiceAdd(False)',
'testTwiceAdd(True)',
'testVolatileConfig(False)',
'testVolatileConfig(True)',
'test_getVdsStats'
]
for t in does_not_use_ovs:
delattr(NetworkTest, t)
for t in not_supported:
delattr(NetworkTest, t)
class OVSVdsProxy(VdsProxy):
def setupNetworks(self, networks, bonds, options):
if options.pop('ovs', True):
# skip non-bridged networks and networks without a nic/bonding,
# such tests should be listed in not_suported list
for _, attrs in networks.items():
if not attrs.get('bridged', True):
raise SkipTest('OVS does not support bridgeless networks')
# setup every network as OVS network
for network, attrs in networks.items():
if 'remove' not in attrs:
networks[network].update({'custom': {'ovs': True}})
for bond, attrs in bonds.items():
if 'remove' not in attrs:
bond_opts = bonds[bond].get('options', '').split()
modified = False
for i in range(len(bond_opts)):
if bond_opts[i].startswith('custom='):
bond_opts[i] = ('custom=%s,ovs=True' %
bond_opts[i].split('=', 1)[1])
modified = True
break
if not modified:
bond_opts.append('custom=ovs=True')
bonds[bond]['options'] = ' '.join(bond_opts)
return super(OVSVdsProxy, self).setupNetworks(networks, bonds, options)
@expandPermutations
class OVSNetworkTest(NetworkTest):
__test__ = True
def cleanupNet(func):
""" Copied from networkTests.py
Instance method decorator. Restores a previously persisted network
config in case of a test failure, traceback is kept. Assumes root
privileges.
"""
@wraps(func)
def wrapper(*args, **kwargs):
with RollbackContext(on_exception_only=True) as rollback:
rollback.prependDefer(args[0].vdsm_net.restoreNetConfig)
func(*args, **kwargs)
return wrapper
def setUp(self):
self.vdsm_net = OVSVdsProxy()
def setupNetworks(self, *args, **kwargs):
# Do not run test_kernel_config
if 'test_kernel_config' in kwargs:
kwargs.pop('test_kernel_config')
return self.vdsm_net.setupNetworks(*args, **kwargs)
@cleanupNet
def test_ovirtmgmtm_to_ovs(self):
""" Test transformation of initial management network to OVS.
# TODO: test it with ovirtmgmt and test-network
# NOTE: without default route
# TODO: more asserts
"""
with veth.pair() as (left, right):
veth.setIP(left, IP_ADDRESS, IP_CIDR)
veth.setIP(left, IPv6_ADDRESS, IPv6_CIDR, 6)
veth.setLinkUp(left)
with dnsmasqDhcp(left):
network = {
NETWORK_NAME: {'nic': right, 'bootproto': 'dhcp',
'bridged': True, 'blockingdhcp': True}}
options = NOCHK
options['ovs'] = False
try:
status, msg = self.setupNetworks(network, {}, options)
self.assertEqual(status, SUCCESS, msg)
self.assertNetworkExists(NETWORK_NAME)
options['ovs'] = True
status, msg = self.setupNetworks(network, {}, options)
self.assertEqual(status, SUCCESS, msg)
self.assertNetworkExists(NETWORK_NAME)
finally:
dhcp.delete_dhclient_leases(NETWORK_NAME, True, False)
@permutations([(True,)])
@cleanupNet
def testStaticSourceRouting(self, bridged):
""" Copied from networkTests.py, network changed to vlaned. """
with dummyIf(1) as nics:
status, msg = self.setupNetworks(
{NETWORK_NAME:
{'nic': nics[0], 'bridged': bridged, 'ipaddr': IP_ADDRESS,
'netmask': IP_MASK, 'gateway': IP_GATEWAY,
'vlan': VLAN_ID}},
{}, NOCHK)
self.assertEqual(status, SUCCESS, msg)
self.assertNetworkExists(NETWORK_NAME, bridged)
deviceName = NETWORK_NAME if bridged else nics[0]
ip_addr = self.vdsm_net.netinfo.networks[NETWORK_NAME]['addr']
self.assertSourceRoutingConfiguration(deviceName, ip_addr)
status, msg = self.setupNetworks(
{NETWORK_NAME: {'remove': True}}, {}, NOCHK)
self.assertEqual(status, SUCCESS, msg)
# Assert that routes and rules don't exist
source_route = _get_source_route(deviceName, ip_addr)
for route in source_route._buildRoutes():
self.assertRouteDoesNotExist(route)
for rule in source_route._buildRules():
self.assertRuleDoesNotExist(rule)
@permutations([(True, (4,)), (True, (6,)), (True, (4, 6))])
@cleanupNet
@RequireVethMod
def testSetupNetworksAddDelDhcp(self, bridged, families):
""" Copied from networkTests.py, source_route checking changed from
device_name to BRIDGE_NAME.
"""
with veth.pair() as (left, right):
veth.setIP(left, IP_ADDRESS, IP_CIDR)
veth.setIP(left, IPv6_ADDRESS, IPv6_CIDR, 6)
veth.setLinkUp(left)
with dnsmasqDhcp(left):
dhcpv4 = 4 in families
dhcpv6 = 6 in families
bootproto = 'dhcp' if dhcpv4 else 'none'
network = {NETWORK_NAME: {'nic': right, 'bridged': bridged,
'bootproto': bootproto,
'dhcpv6': dhcpv6,
'blockingdhcp': True}}
try:
status, msg = self.setupNetworks(network, {}, NOCHK)
self.assertEqual(status, SUCCESS, msg)
self.assertNetworkExists(NETWORK_NAME)
test_net = self.vdsm_net.netinfo.networks[NETWORK_NAME]
self.assertEqual(test_net['dhcpv4'], dhcpv4)
self.assertEqual(test_net['dhcpv6'], dhcpv6)
if bridged:
self.assertEqual(test_net['cfg']['BOOTPROTO'],
bootproto)
devs = self.vdsm_net.netinfo.bridges
device_name = NETWORK_NAME
else:
devs = self.vdsm_net.netinfo.nics
device_name = right
self.assertIn(device_name, devs)
net_attrs = devs[device_name]
self.assertEqual(net_attrs['cfg']['BOOTPROTO'], bootproto)
self.assertEqual(net_attrs['dhcpv4'], dhcpv4)
self.assertEqual(net_attrs['dhcpv6'], dhcpv6)
if dhcpv4:
self.assertEqual(test_net['gateway'], IP_GATEWAY)
# TODO: source routing not ready for IPv6
ip_addr = test_net['addr']
self.assertSourceRoutingConfiguration(BRIDGE_NAME,
ip_addr)
# Do not report DHCP from (typically still valid) leases
network[NETWORK_NAME]['bootproto'] = 'none'
network[NETWORK_NAME]['dhcpv6'] = False
status, msg = self.setupNetworks(network, {}, NOCHK)
self.assertEqual(status, SUCCESS, msg)
test_net = self.vdsm_net.netinfo.networks[NETWORK_NAME]
self.assertEqual(test_net['dhcpv4'], False)
self.assertEqual(test_net['dhcpv6'], False)
network = {NETWORK_NAME: {'remove': True}}
status, msg = self.setupNetworks(network, {}, NOCHK)
self.assertEqual(status, SUCCESS, msg)
self.assertNetworkDoesntExist(NETWORK_NAME)
# Assert that routes and rules don't exist
if dhcpv4:
source_route = _get_source_route(BRIDGE_NAME, ip_addr)
for route in source_route._buildRoutes():
self.assertRouteDoesNotExist(route)
for rule in source_route._buildRules():
self.assertRuleDoesNotExist(rule)
finally:
dhcp.delete_dhclient_leases(
NETWORK_NAME if bridged else right, dhcpv4, dhcpv6)
| gpl-2.0 | -8,391,584,923,909,804,000 | 43.521866 | 79 | 0.615218 | false |
CavityGap/greencoin | contrib/devtools/fix-copyright-headers.py | 1 | 1478 | #!/usr/bin/env python
'''
Run this script inside of src/ and it will look for all the files
that were changed this year that still have the last year in the
copyright headers, and it will fix the headers on that file using
a perl regex one liner.
For example: if it finds something like this and we're in 2014
// Copyright (c) 2009-2013 The Bitcoin developers
it will change it to
// Copyright (c) 2009-2014 The Bitcoin developers
It will do this for all the files in the folder and its children.
Author: @gubatron
'''
import os
import time
year = time.gmtime()[0]
last_year = year - 1
command = "perl -pi -e 's/%s The Greencoin/%s The Greencoin/' %s"
listFilesCommand = "find . | grep %s"
extensions = [".cpp",".h"]
def getLastGitModifiedDate(filePath):
gitGetLastCommitDateCommand = "git log " + filePath +" | grep Date | head -n 1"
p = os.popen(gitGetLastCommitDateCommand)
result = ""
for l in p:
result = l
break
result = result.replace("\n","")
return result
n=1
for extension in extensions:
foundFiles = os.popen(listFilesCommand % extension)
for filePath in foundFiles:
filePath = filePath[1:-1]
if filePath.endswith(extension):
filePath = os.getcwd() + filePath
modifiedTime = getLastGitModifiedDate(filePath)
if len(modifiedTime) > 0 and str(year) in modifiedTime:
print n,"Last Git Modified: ", modifiedTime, " - ", filePath
os.popen(command % (last_year,year,filePath))
n = n + 1
| mit | -8,751,418,141,464,714,000 | 26.886792 | 81 | 0.692828 | false |
darafferty/factor | setup.py | 1 | 1734 | from __future__ import print_function
from setuptools import setup, Command
import os
import factor._version
description = 'FACTOR: Facet calibration for LOFAR'
long_description = description
if os.path.exists('README.md'):
with open('README.md') as f:
long_description=f.read()
class PyTest(Command):
user_options = []
def initialize_options(self):
pass
def finalize_options(self):
pass
def run(self):
import sys,subprocess
errno = subprocess.call([sys.executable, 'runtests.py'])
raise SystemExit(errno)
setup(
name='FACTOR',
version=factor._version.__version__,
url='http://github.com/lofar-astron/factor/',
description=description,
long_description=long_description,
platforms='any',
classifiers = [
'Programming Language :: Python',
'Development Status :: 1 - Alpha',
'Natural Language :: English',
'Intended Audience :: Science/Research',
'Operating System :: POSIX :: Linux',
'Topic :: Scientific/Engineering :: Astronomy',
'Topic :: Software Development :: Libraries :: Python Modules',
],
install_requires=['numpy', 'scipy', 'astropy', 'jinja2', 'aplpy>=1.0', 'LSMTool>=1.2', ],
dependency_links=['https://github.com/darafferty/LSMTool'],
scripts = ['bin/runfactor','bin/checkfactor','bin/archivefactor','bin/unarchivefactor'],
packages=['factor', 'factor.operations', 'factor.lib'],
package_data={'factor': [
'parsets/*',
'pipeline/*.cfg',
'pipeline/parsets/*',
'pipeline/plugins/*',
'pipeline/recipes/nodes/*',
'scripts/*',
'skymodels/*']},
cmdclass = {'test': PyTest},
)
| gpl-2.0 | -7,995,605,415,183,063,000 | 29.964286 | 93 | 0.618224 | false |
marcoaureliojf/deepin-software-center-v2 | src/searchEntry.py | 2 | 3568 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright (C) 2011 Deepin, Inc.
# 2011 Wang Yong
#
# Author: Wang Yong <[email protected]>
# Maintainer: Wang Yong <[email protected]>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from lang import __, getDefaultLanguage
from theme import *
import gobject
import gtk
import pango
class SearchEntry(gtk.Entry):
'''Search entry.'''
def __init__(self, parent, helpString, hintDColor, backgroundDColor, foregroundDColor, noHint=False):
'''Init for search entry.'''
# Init.
gtk.Entry.__init__(self)
self.helpString = helpString
self.hintDColor = hintDColor
self.backgroundDColor = backgroundDColor
self.foregroundDColor = foregroundDColor
self.ticker = 0
# Set default font.
self.modify_font(pango.FontDescription(DEFAULT_FONT + " 10"))
# Clean input when first time focus in entry.
if noHint:
self.focusIn = True
self.set_text(self.helpString)
else:
self.focusIn = False
self.connect("focus-in-event", lambda w, e: self.firstFocusIn())
self.connect("expose-event", self.exposeCallback)
self.connect("focus-out-event", lambda w, e: self.focusOut()) # restore text when focus out.
# Show help string.
self.updateColor()
parent.connect("size-allocate", lambda w, e: self.realize())
def exposeCallback(self, widget, event):
'''Expose callback.'''
if self.ticker != appTheme.ticker:
self.ticker = appTheme.ticker
self.updateColor()
def updateColor(self):
'''Update color.'''
self.modify_base(gtk.STATE_NORMAL, gtk.gdk.color_parse(self.backgroundDColor.getColor()))
if self.focusIn:
self.modify_text(gtk.STATE_NORMAL, gtk.gdk.color_parse(self.foregroundDColor.getColor()))
else:
self.modify_text(gtk.STATE_NORMAL, gtk.gdk.color_parse(self.hintDColor.getColor()))
self.set_text(self.helpString)
def firstFocusIn(self):
'''First touch callback.'''
self.focusIn = True
# Empty entry when input is help string.
if self.get_text() == self.helpString:
self.set_text("")
# Adjust input text color.
self.modify_base(gtk.STATE_NORMAL, gtk.gdk.color_parse(self.backgroundDColor.getColor()))
self.modify_text(gtk.STATE_NORMAL, gtk.gdk.color_parse(self.foregroundDColor.getColor()))
return False
def focusOut(self):
'''Callback for 'focus-out-event' signal.'''
if self.get_text() == "":
self.focusIn = False
self.modify_text(gtk.STATE_NORMAL, gtk.gdk.color_parse(self.hintDColor.getColor()))
self.set_text(self.helpString)
gobject.type_register(SearchEntry)
| gpl-3.0 | -1,242,724,971,976,026,000 | 36.166667 | 105 | 0.630045 | false |
timpalpant/calibre | src/calibre/gui2/store/stores/amazon_ca_plugin.py | 3 | 5340 | #!/usr/bin/env python2
# vim:fileencoding=utf-8
# License: GPLv3 Copyright: 2015, Kovid Goyal <kovid at kovidgoyal.net>
from __future__ import (unicode_literals, division, absolute_import,
print_function)
store_version = 3 # Needed for dynamic plugin loading
from contextlib import closing
import urllib
from lxml import html
from PyQt5.Qt import QUrl
from calibre import browser
from calibre.gui2 import open_url
from calibre.gui2.store import StorePlugin
from calibre.gui2.store.search_result import SearchResult
SEARCH_BASE_URL = 'http://www.amazon.ca/s/'
SEARCH_BASE_QUERY = {'url': 'search-alias=digital-text'}
DETAILS_URL = 'http://amazon.ca/dp/'
STORE_LINK = 'http://www.amazon.ca'
DRM_SEARCH_TEXT = 'Simultaneous Device Usage'
DRM_FREE_TEXT = 'Unlimited'
def search_amazon(query, max_results=10, timeout=60,
write_html_to=None,
base_url=SEARCH_BASE_URL,
base_query=SEARCH_BASE_QUERY,
field_keywords='field-keywords'
):
uquery = base_query.copy()
uquery[field_keywords] = query
def asbytes(x):
if isinstance(x, type('')):
x = x.encode('utf-8')
return x
uquery = {asbytes(k):asbytes(v) for k, v in uquery.iteritems()}
url = base_url + '?' + urllib.urlencode(uquery).decode('ascii')
br = browser()
counter = max_results
with closing(br.open(url, timeout=timeout)) as f:
raw = f.read()
if write_html_to is not None:
with open(write_html_to, 'wb') as f:
f.write(raw)
doc = html.fromstring(raw)
try:
results = doc.xpath('//div[@id="atfResults" and @class]')[0]
except IndexError:
return
if 's-result-list-parent-container' in results.get('class', ''):
data_xpath = "descendant-or-self::li[@class and contains(concat(' ', normalize-space(@class), ' '), ' s-result-item ')]"
format_xpath = './/a[@title="Kindle Edition"]/@title'
asin_xpath = '@data-asin'
cover_xpath = "descendant-or-self::img[@class and contains(concat(' ', normalize-space(@class), ' '), ' s-access-image ')]/@src"
title_xpath = "descendant-or-self::h2[@class and contains(concat(' ', normalize-space(@class), ' '), ' s-access-title ')]//text()"
author_xpath = './/span[starts-with(text(), "by ")]/following-sibling::span//text()'
price_xpath = '(.//span[contains(@class, " s-price ")])[last()]//text()'
else:
return
for data in doc.xpath(data_xpath):
if counter <= 0:
break
# Even though we are searching digital-text only Amazon will still
# put in results for non Kindle books (author pages). Se we need
# to explicitly check if the item is a Kindle book and ignore it
# if it isn't.
format = ''.join(data.xpath(format_xpath))
if 'kindle' not in format.lower():
continue
# We must have an asin otherwise we can't easily reference the
# book later.
asin = data.xpath(asin_xpath)
if asin:
asin = asin[0]
else:
continue
cover_url = ''.join(data.xpath(cover_xpath))
title = ''.join(data.xpath(title_xpath))
author = ''.join(data.xpath(author_xpath))
try:
author = author.split('by ', 1)[1].split(" (")[0]
except:
pass
price = ''.join(data.xpath(price_xpath))
counter -= 1
s = SearchResult()
s.cover_url = cover_url.strip()
s.title = title.strip()
s.author = author.strip()
s.price = price.strip()
s.detail_item = asin.strip()
s.formats = 'Kindle'
yield s
class AmazonKindleStore(StorePlugin):
def open(self, parent=None, detail_item=None, external=False):
store_link = (DETAILS_URL + detail_item) if detail_item else STORE_LINK
open_url(QUrl(store_link))
def search(self, query, max_results=10, timeout=60):
for result in search_amazon(query, max_results=max_results, timeout=timeout):
yield result
def get_details(self, search_result, timeout):
url = DETAILS_URL
br = browser()
with closing(br.open(url + search_result.detail_item, timeout=timeout)) as nf:
idata = html.fromstring(nf.read())
if idata.xpath('boolean(//div[@class="content"]//li/b[contains(text(), "' +
DRM_SEARCH_TEXT + '")])'):
if idata.xpath('boolean(//div[@class="content"]//li[contains(., "' +
DRM_FREE_TEXT + '") and contains(b, "' +
DRM_SEARCH_TEXT + '")])'):
search_result.drm = SearchResult.DRM_UNLOCKED
else:
search_result.drm = SearchResult.DRM_UNKNOWN
else:
search_result.drm = SearchResult.DRM_LOCKED
return True
if __name__ == '__main__':
import sys
for result in search_amazon(' '.join(sys.argv[1:]), write_html_to='/t/amazon.html'):
print (result)
| gpl-3.0 | -5,062,873,207,575,200,000 | 36.87234 | 142 | 0.558052 | false |
408794550/Mechine-Learning-In-Action | Ch04/NaiveBayes.py | 1 | 6699 | # 朴素贝叶斯算法
# 优点: 在数据较少的情况下依然有效,可以处理多类别问题
# 缺点: 对于输入数据的准备方式较为敏感
# 适用数据类型: 标称型数据
# 贝叶斯决策论的核心思想:选择具有最高概率的决策
# encoding: utf-8
from Utility import logging
from numpy import *
def load_dataset():
posting_list = [['my', 'dog', 'has', 'flea', 'problems', 'help', 'please'],
['maybe', 'not', 'take', 'him', 'to', 'dog', 'park', 'stupid'],
['my', 'dalmation', 'is', 'so', 'cute', 'I', 'love', 'him'],
['stop', 'posting', 'stupid', 'worthless', 'garbage'],
['mr', 'licks', 'ate', 'my', 'steak', 'how', 'to', 'stop', 'him'],
['quit', 'buying', 'worthless', 'dog', 'food', 'stupid']]
class_vec = [0, 1, 0, 1, 0, 1]
return posting_list, class_vec
# 创建在所有文档中出现的不重复的列表
def create_vocabulary_list(dataset):
vocabulary_set = set([])
for document in dataset:
vocabulary_set = vocabulary_set | set(document) # "|"求两个集合的并集,也是按位或操作,这里用的都是同一个
return list(vocabulary_set)
# 输入input_set,输出向量化的input_set,通过比对vocabulary,如果input_set中的相应位置在vocabulay中出现,则相应位置置1
def set_word_to_vec(vocabulary_list, input_set):
# 创建一个向量,长度与vocabulary_list等长,值为0,表示单词未出现
return_vec = [0]*len(vocabulary_list)
for word in input_set:
if word in vocabulary_list:
# 如果单词出现在input_set中,则相应位置置1
return_vec[vocabulary_list.index(word)] = 1
else:
log.info('the word:%s is not in my vocabulary!', word)
return return_vec
posts_list, classes_list = load_dataset()
my_vocabulary_list = create_vocabulary_list(posts_list)
logging.info(my_vocabulary_list)
def train_naive_bayes(train_matrix, train_category):
train_docs_count = len(train_matrix)
words_count = len(train_matrix[0])
p_abusive = sum(train_category)/float(train_docs_count)
# 刚开始的
p0_count_vec = ones(words_count)
p1_count_vec = ones(words_count)
p0_denom = 2.0
p1_denom = 2.0
for i in range(train_docs_count):
if train_category[i] == 1: # 1代表侮辱性文字
# 两向量的加法
p1_count_vec += train_matrix[i]
p1_denom += sum(train_matrix[i])
logging.info(p1_denom)
else:
p0_count_vec += train_matrix[i]
p0_denom += sum(train_matrix[i])
# 取对数的原因:有时候计算因子太小,程序会下溢(很多小数相乘,最后四舍五入得0),多以取自然对数
p1_vec = log(p1_count_vec/p1_denom)
p0_vec = log(p0_count_vec/p0_denom)
return p0_vec, p1_vec, p_abusive
train_mat = []
for post_in_doc in posts_list:
# 在这里把测试文本数值化,既表示某个词是否在文档中出现了
train_mat.append(set_word_to_vec(my_vocabulary_list, post_in_doc))
p0_vec, p1_vec, p_abusive = train_naive_bayes(train_mat, classes_list)
logging.info(p_abusive)
def classify_naive_bayes(vec2_classify, p0_vec, p1_vec, p_class1):
# 向量中的元素先各自乘 * p1-vec,然后再相加,最后比较p1与p0
p1 = sum(vec2_classify * p1_vec) + log(p_class1)
p0 = sum(vec2_classify * p0_vec) + log(1.0 - p_class1)
return 1 if p1 > p0 else 0
def testing_nb():
test_entry = ['love', 'my', 'dalmation']
this_doc = array(set_word_to_vec(my_vocabulary_list, test_entry))
logging.info('{},classified as: {}'.format(test_entry, classify_naive_bayes(this_doc, p0_vec, p1_vec, p_abusive)))
test_entry = ['stupid', 'garbage']
this_doc = array(set_word_to_vec(my_vocabulary_list, test_entry))
logging.info('{},classified as: {}'.format(test_entry, classify_naive_bayes(this_doc, p0_vec, p1_vec, p_abusive)))
testing_nb()
# 词袋模型
def bag_words_to_vec(vocabulary_list, input_set):
return_vec = [0] * len(vocabulary_list)
for word in input_set:
if word in vocabulary_list:
# 如果单词出现在input_set中,则相应位置置1
return_vec[vocabulary_list.index(word)] += 1 # 上面与这个方法类似的,只是置1,没有记录词出现的次数
return return_vec
# 使用朴素贝叶斯过滤垃圾邮件
def text_parse(big_str):
import re
# 字符串前加 r ,字符串内部的 \ 就不会被特殊处理
# \w匹配数字字符或者下划线
# re.split() #第一个参数表示要匹配的模式(正则表达式),第二个是要匹配的对象
tokens_list = re.split(r'\w', big_str)
return [tok.lower() for tok in tokens_list if len(tok) > 2]
def spam_test():
doc_list = []
classes_list = []
full_text = []
for i in range(1,26):
# open()打开失败怎么办?解决:使用with open
with open('email/spam/%d.txt' % i) as f:
word_list = text_parse(f.read())
doc_list.append(word_list)
full_text.append(word_list)
classes_list.append(1)
with open('email/ham/%d.txt' % i) as f:
word_list = text_parse(f.read())
doc_list.append(word_list)
full_text.append(word_list)
classes_list.append(1)
vocabulary_list = create_vocabulary_list(doc_list)
training_set = range(50)
test_set = []
# 随机选10个作为测试样本
for i in range(10):
rand_index = int(random.uniform(0, len(training_set)))
test_set.append(training_set[rand_index])
del (training_set[rand_index])
train_mat = []
train_classes = []
# 训练
for doc_index in training_set:
train_mat.append(bag_words_to_vec(vocabulary_list, doc_list[doc_index]))
train_classes.append(class_list[doc_index])
p0_vec, p1_vec, p_spam = train_naive_bayes(array(train_mat), array(train_classes))
error_count = 0
# 测试
for doc_index in test_set:
word_vector = bag_words_to_vec(vocabulary_list, doc_listist[doc_index])
if classify_naive_bayes(array(word_vector), p0_vec, p1_vec, p_spam) != classList[doc_index]:
error_count += 1
logging.info("classification error{}".format(docList[docIndex]))
logging.info('the error rate is:{} '.format(float(errorCount) / len(testSet)))
spam_test()
# 使用朴素贝叶斯分类器从个人广告中获取区域倾向
| apache-2.0 | -5,173,781,764,079,549,000 | 33.04142 | 118 | 0.605076 | false |
code-google-com/oyprojectmanager | oyProjectManager/models/version.py | 3 | 47978 | # -*- coding: utf-8 -*-
# Copyright (c) 2009-2014, Erkan Ozgur Yilmaz
#
# This module is part of oyProjectManager and is released under the BSD 2
# License: http://www.opensource.org/licenses/BSD-2-Clause
import os
import re
from copy import copy
import jinja2
from sqlalchemy import (UniqueConstraint, Column, Integer, ForeignKey, String,
Boolean, Enum, Table)
from sqlalchemy.ext.associationproxy import association_proxy
from sqlalchemy.ext.declarative import synonym_for
from sqlalchemy.ext.hybrid import Comparator, hybrid_property
from sqlalchemy.orm import relationship, validates, synonym, backref
from oyProjectManager import db
from oyProjectManager import conf
from oyProjectManager.db.declarative import Base
from oyProjectManager.models.auth import User
from oyProjectManager.models.errors import CircularDependencyError
from oyProjectManager.models.mixins import IOMixin
# create a logger
import logging
logger = logging.getLogger(__name__)
logger.setLevel(logging.WARNING)
class VersionStatusComparator(str, Comparator):
"""The comparator class for Version.status
"""
def __new__(cls, status):
if isinstance(status, VersionStatusComparator):
status = status.status
elif isinstance(status, basestring):
status_list_long_names = conf.status_list_long_names
if status in status_list_long_names:
index = status_list_long_names.index(status)
status = conf.status_list[index]
status = status
obj = str.__new__(cls, status)
obj.status = status
return obj
def __eq__(self, other):
if not isinstance(other, VersionStatusComparator):
other = VersionStatusComparator(other)
return self.__clause_element__() == other.status
def __clause_element__(self):
return self.status
class Version(Base, IOMixin):
"""Holds versions of assets or shots.
In oyProjectManager a Version is the file created for an
:class:`~oyProjectManager.models.asset.Asset` or
:class:`~oyProjectManager.models.shot.Shot`\ . The placement of this file
is automatically handled by the connected
:class:`~oyProjectManager.models.version.VersionType` instance.
The values given for
:attr:`~oyProjectManager.models.version.Version.base_name` and
:attr:`~oyProjectManager.models.version.Version.take_name` are conditioned
as follows:
* Each word in the string should start with an upper-case letter (title)
* It can have all upper-case letters
* CamelCase is allowed
* Valid characters are ([A-Z])([a-zA-Z0-9\_])
* No white space characters are allowed, if a string is given with
white spaces, it will be replaced with underscore ("_") characters.
* No numbers are allowed at the beginning of the string
* No leading or trailing underscore character is allowed
So, with these rules are given, the examples for input and conditioned
strings are as follows:
* "BaseName" -> "BaseName"
* "baseName" -> "BaseName"
* " baseName" -> "BaseName"
* " base name" -> "Base_Name"
* " 12base name" -> "Base_Name"
* " 12 base name" -> "Base_Name"
* " 12 base name 13" -> "Base_Name_13"
* ">£#>$#£½$ 12 base £#$£#$£½¾{½{ name 13" -> "Base_Name_13"
* "_base_name_" -> "Base_Name"
For a newly created Version the
:attr:`~oyProjectManager.models.version.Version.filename` and the
:attr:`~oyProjectManager.models.version.Version.path` attributes are
rendered from the associated
:class:`~oyProjectManager.models.version.VersionType` instance. The
resultant
:attr:`~oyProjectManager.models.version.Version.filename` and
:attr:`~oyProjectManager.models.version.Version.path` values are stored and
retrieved back from the Version instance itself, no re-rendering happens.
It means, the Version class depends the
:class:`~oyProjectManager.models.version.VersionType` class only at the
initialization, any change made to the
:class:`~oyProjectManager.models.version.VersionType` instance (like
changing the :attr:`~oyProjectManager.models.version.VersionType.name` or
:attr:`~oyProjectManager.models.version.VersionType.code` of the
:class:`~oyProjectManager.models.version.VersionType`) will not effect the
Version instances created before this change. This is done in that way to
be able to freely change the
:class:`~oyProjectManager.models.version.VersionType` attributes and
prevent loosing the connection between a Version and a file on the
repository for previously created Versions.
.. versionadded:: 0.2.2
Published Versions:
After v0.2.2 Versions can be set published. It is a bool attribute
holding information about if this Version is published or not.
:param version_of: A
:class:`~oyProjectManager.models.entity.VersionableBase` instance
(:class:`~oyProjectManager.models.asset.Asset` or
:class:`~oyProjectManager.models.shot.Shot`) which is the owner of this
version. Can not be skipped or set to None.
:type type: :class:`~oyProjectManager.models.asset.Asset`,
:class:`~oyProjectManager.models.shot.Shot` or
:class:`~oyProjectManager.models.entity.VersionableBase`
:param type: A :class:`~oyProjectManager.models.version.VersionType`
instance which is showing the type of the current version. The type is
also responsible of the placement of this Version in the repository. So
the :attr:`~oyProjectManager.models.version.Version.filename` and the
:attr:`~oyProjectManager.models.version.Version.path` is defined by the
related :class:`~oyProjectManager.models.version.VersionType` and the
:class:`~oyProjectManager.models.project.Project` settings. Can not be
skipped or can not be set to None.
:type type: :class:`~oyProjectManager.models.version.VersionType`
:param str base_name: A string showing the base name of this Version
instance. Generally used to create an appropriate
:attr:`~oyProjectManager.models.version.Version.filename` and a
:attr:`~oyProjectManager.models.version.Version.path` value. Can not be
skipped, can not be None or empty string.
:param str take_name: A string showing the take name. The default value is
"Main" and it will be used in case it is skipped or it is set to None
or an empty string. Generally used to create an appropriate
:attr:`~oyProjectManager.models.version.Version.filename` and a
:attr:`~oyProjectManager.models.version.Version.path` value.
:param int revision_number: It is an integer number showing the client
revision number. The default value is 0 and it is used when the argument
is skipped or set to None. It should be an increasing number with the
newly created versions.
:param int version_number: An integer number showing the current version
number. It should be an increasing number among the Versions with the
same base_name and take_name values. If skipped a proper value will be
used by looking at the previous versions created with the same base_name
and take_name values from the database. If the given value already exists
then it will be replaced with the next available version number from the
database.
:param str note: A string holding the related note for this version. Can be
used for anything the user desires.
:param created_by: A :class:`~oyProjectManager.models.auth.User` instance
showing who created this version. It can not be skipped or set to None or
anything other than a :class:`~oyProjectManager.models.auth.User`
instance.
:type created_by: :class:`~oyProjectManager.models.auth.User`
:param str extension: A string holding the file extension of this version.
It may or may not include a dot (".") sign as the first character.
:param bool is_published: A bool value defining this Version as a published
one.
"""
# TODO: add audit info like date_created, date_updated, created_at and
# updated_by
# TODO: add needs_update flag, primarily need to be used with renamed
# versions
# file_size_format = "%.2f MB"
# timeFormat = '%d.%m.%Y %H:%M'
__tablename__ = "Versions"
__table_args__ = (
UniqueConstraint("version_of_id", "take_name", "_version_number",
"type_id"),
{"extend_existing": True}
)
id = Column(Integer, primary_key=True)
version_of_id = Column(Integer, ForeignKey("Versionables.id"),
nullable=False)
_version_of = relationship("VersionableBase")
type_id = Column(Integer, ForeignKey("VersionTypes.id"))
_type = relationship("VersionType")
_filename = Column(String)
_path = Column(String)
_output_path = Column(String)
_extension = Column(String)
base_name = Column(String)
take_name = Column(String, default=conf.default_take_name)
revision_number = Column(Integer, default=0)
_version_number = Column(Integer, default=1, nullable=False)
note = Column(String, default='')
created_by_id = Column(Integer, ForeignKey("Users.id"))
created_by = relationship("User")
references = relationship(
"Version",
secondary="Version_References",
primaryjoin="Versions.c.id==Version_References.c.referencer_id",
secondaryjoin="Version_References.c.reference_id==Versions.c.id",
backref="referenced_by"
)
is_published = Column(Boolean, default=False)
_status = Column(
Enum(*conf.status_list, name='StatusNames'),
)
def __init__(
self,
version_of,
base_name,
type,
created_by,
take_name=conf.default_take_name,
version_number=1,
note="",
extension="",
is_published=False,
status=None,
inputs=None,
outputs=None
):
IOMixin.__init__(self, inputs, outputs)
self.is_published = is_published
self._version_of = version_of
self._type = type
self.base_name = base_name
self.take_name = take_name
self.version_number = version_number
self.note = note
self.created_by = created_by
self._filename = ""
self._path = ""
self._output_path = ""
# setting the extension will update the path variables already
self.extension = extension
self.status = status
def __repr__(self):
"""The representation of this version
"""
return "<Version: %s>" % self.filename
def __eq__(self, other):
"""the equality operator
"""
return isinstance(other, Version) and \
self.base_name == other.base_name and \
self.version_of == other.version_of and \
self.type == other.type and self.take_name == other.take_name and \
self.version_number == other.version_number
def __ne__(self, other):
"""the not equal operator
"""
return not self.__eq__(other)
def update_paths(self):
"""updates the path variables
"""
kwargs = self._template_variables()
self._filename = jinja2.Template(self.type.filename).render(**kwargs)
self._path = jinja2.Template(self.type.path).render(**kwargs)
self._output_path = jinja2.Template(self.type.output_path). \
render(**kwargs)
@validates("_version_of")
def _validate_version_of(self, key, version_of):
"""validates the given version of value
"""
from oyProjectManager.models.entity import VersionableBase
if version_of is None:
raise TypeError("Version.version_of can not be None")
if not isinstance(version_of, VersionableBase):
raise TypeError("Version.version_of should be an Asset or Shot "
"or anything derives from VersionableBase class")
return version_of
@synonym_for("_version_of")
@property
def version_of(self):
"""The instance that this version belongs to.
Generally it is a Shot or an Asset instance or anything which derives
from VersionableBase class
"""
return self._version_of
@validates("_type")
def _validate_type(self, key, type):
"""validates the given type value
"""
if type is None:
raise TypeError("Version.type can not be None")
if not isinstance(type, VersionType):
raise TypeError("Version.type should be an instance of "
"VersionType class")
# raise a TypeError if the given VersionType is not suitable for the
# given version_of instance
if self.version_of.__class__.__name__ != type.type_for:
raise TypeError(
"The VersionType instance given for Version.type is not "
"suitable for the given VersionableBase instance, the "
"version_of is a %s and the version_type is for %s" %
(self.version_of.__class__.__name__, type.type_for)
)
return type
def _validate_extension(self, extension):
"""Validates the given extension value
"""
if not isinstance(extension, (str, unicode)):
raise TypeError("Version.extension should be an instance of "
"string or unicode")
if extension != "":
if not extension.startswith("."):
extension = "." + extension
return extension
def _extension_getter(self):
"""Returns the extension attribute value
"""
return self._extension
def _extension_setter(self, extension):
"""Sets the extension attribute
:param extension: The new extension should be a string or unicode value
either starting with a dot sign or not.
"""
self._extension = self._validate_extension(extension)
# now update the filename
self.update_paths()
extension = synonym(
"_extension",
descriptor=property(fget=_extension_getter, fset=_extension_setter),
doc="""The extension of this version file, updating the extension will
also update the filename
"""
)
@synonym_for("_type")
@property
def type(self):
"""The type of this Version instance.
It is a VersionType object.
"""
return self._type
def _template_variables(self):
from oyProjectManager.models.shot import Shot
kwargs = {
"project": self.version_of.project,
"sequence": self.version_of.sequence if isinstance(self.version_of,
Shot) else "",
"shot": self.version_of,
"asset": self.version_of,
"version": self,
"type": self.type,
}
return kwargs
@synonym_for("_filename")
@property
def filename(self):
"""The filename of this version.
It is automatically created by rendering the VersionType.filename
template with the information supplied with this Version instance.
"""
return self._filename
@synonym_for("_path")
@property
def path(self):
"""The path of this version.
It is automatically created by rendering the template in
:class`~oyProjectManager.models.version.Version`\.
:attr:`~oyProjectManager.models.version.VersionType.path` of the with
the information supplied by this
:class:`~oyProjectManager.models.version.Version` instance.
The resultant path is an absolute one. But the stored path in the
database is just the relative portion to the
:class:`~oyProjectManager.models.repository.Repository`\ .\
:attr:`~oyProjectManager.models.repository.Repository.server_path`
"""
return os.path.join(
self.project.path,
self._path
).replace("\\", "/")
@property
def full_path(self):
"""The full_path of this version.
It is the join of
:class:`~oyProjectManager.models.repository.Repository`.\
:attr:`~oyProjectManager.models.repository.Repository.server_path` and
:class:`~oyProjectManager.models.version.Version`.\
:attr:`~oyProjectManager.models.version.Version.path` and
:class:`~oyProjectManager.models.version.Version`.\
:attr:`~oyProjectManager.models.version.Version.filename` attributes.
So, it is an absolute path. The value of the ``full_path`` is not
stored in the database.
"""
return os.path.join(
self.path,
self.filename
).replace("\\", "/")
@synonym_for("_output_path")
@property
def output_path(self):
"""The output_path of this version.
It is automatically created by rendering the
:class:`~oyProjectManager.models.version.VersionType`\ .\
:attr:`~oyProjectManager.models.version.VersionType.output_path`
template with the information supplied with this ``Version`` instance.
The resultant path is an absolute one. But the stored path in the
database is just the relative portion to the
:class:`~oyProjectManager.models.repository.Repository`\ .\
:attr:`~oyProjectManager.models.repository.Repository.server_path`.
"""
return os.path.join(
self.project.path,
self._output_path
).replace("\\", "/")
def _condition_name(self, name):
"""conditions the base name, see the
:class:`~oyProjectManager.models.version.Version` documentation for
details
"""
# strip the name
name = name.strip()
# convert all the "-" signs to "_" at the beginning and the at the end
# of the string
#name = name.replace("-", "_")
#name = re.sub(r"^[\-]+", r"", name)
#name = re.sub(r"[\-]+$", r"", name)
# remove unnecessary characters from the string
name = re.sub(r"([^a-zA-Z0-9\s_\-]+)", r"", name)
# remove all the characters from the beginning which are not alphabetic
name = re.sub(r"(^[^a-zA-Z0-9]+)", r"", name)
# substitute all spaces with "_" characters
name = re.sub(r"([\s])+", "_", name)
# make each words first letter uppercase
name = "_".join([word[0].upper() + word[1:]
for word in name.split("_")
if len(word)])
name = "-".join([word[0].upper() + word[1:]
for word in name.split("-")
if len(word)])
return name
@validates("base_name")
def _validate_base_name(self, key, base_name):
"""validates the given base_name value
"""
if base_name is None:
raise TypeError("Version.base_name can not be None, please "
"supply a proper string or unicode value")
if not isinstance(base_name, (str, unicode)):
raise TypeError("Version.base_name should be an instance of "
"string or unicode")
base_name = self._condition_name(base_name)
if base_name == "":
raise ValueError("Version.base_name is either given as an empty "
"string or it became empty after formatting")
return base_name
@validates("take_name")
def _validate_take_name(self, key, take_name):
"""validates the given take_name value
"""
# get the config
# from oyProjectManager import config
# conf = config.Config()
from oyProjectManager import conf
if take_name is None:
take_name = conf.default_take_name
if not isinstance(take_name, (str, unicode)):
raise TypeError("Version.take_name should be an instance of "
"string or unicode")
take_name = self._condition_name(take_name)
if take_name == "":
raise ValueError("Version.take_name is either given as an empty "
"string or it became empty after formatting")
return take_name
def latest_version(self):
"""returns the Version instance with the highest version number in this
series
:returns: :class:`~oyProjectManager.models.version.Version` instance
"""
# .filter(Version.base_name == self.base_name)\
return db.session \
.query(Version) \
.filter(Version.type == self.type) \
.filter(Version.version_of == self.version_of) \
.filter(Version.take_name == self.take_name) \
.order_by(Version.version_number.desc()) \
.first()
@property
def max_version(self):
"""returns the maximum version number for this Version from the
database.
:returns: int
"""
last_version = self.latest_version()
if last_version:
max_version = last_version.version_number
else:
max_version = 0
return max_version
def _validate_version_number(self, version_number):
"""validates the given version number
"""
max_version = self.max_version
if version_number is None:
# get the smallest possible value for the version_number
# from the database
version_number = max_version + 1
if version_number <= max_version:
version_number = max_version + 1
return version_number
def _version_number_getter(self):
"""returns the version_number of this Version instance
"""
return self._version_number
def _version_number_setter(self, version_number):
"""sets the version_number of this Version instance
"""
self._version_number = self._validate_version_number(version_number)
version_number = synonym(
"_version_number",
descriptor=property(
_version_number_getter,
_version_number_setter
)
)
def save(self):
"""commits the changes to the database
"""
if self not in db.session:
db.session.add(self)
db.session.commit()
@validates("note")
def _validate_note(self, key, note):
"""validates the given note value
"""
if note is None:
note = ""
if not isinstance(note, (str, unicode)):
raise TypeError("Version.note should be an instance of "
"string or unicode")
return note
@validates("created_by")
def _validate_created_by(self, key, created_by):
"""validates the created_by value
"""
if created_by is None:
raise TypeError("Version.created_by can not be None, please "
"set it to oyProjectManager.models.auth.User "
"instance")
if not isinstance(created_by, User):
raise TypeError("Version.created_by should be an instance of"
"oyProjectManager.models.auth.User")
return created_by
@validates("references")
def _validate_references(self, key, reference):
"""validates the given reference value
"""
if reference is self:
raise ValueError("Version.references can not have a reference to "
"itself")
# check circular dependency
_check_circular_dependency(reference, self)
return reference
@property
def project(self):
"""The :class:`~oyProjectManager.models.project.Project` instance that
this Version belongs to
"""
return self.version_of.project
def is_latest_version(self):
"""returns True if this is the latest Version False otherwise
"""
return self.max_version == self.version_number
def is_latest_published_version(self):
"""returns True if this is the latest published Version False otherwise
"""
if not self.is_published:
return False
return self == self.latest_published_version()
def latest_published_version(self):
"""Returns the last published version.
:return: :class:`~oyProjectManager.models.version.Version`
"""
return db.session \
.query(Version) \
.filter(Version.type == self.type) \
.filter(Version.version_of == self.version_of) \
.filter(Version.take_name == self.take_name) \
.filter(Version.is_published == True) \
.order_by(Version.version_number.desc()) \
.first()
@property
def dependency_update_list(self, published_only=True):
"""Calculates a list of
:class:`~oyProjectManager.models.version.Version` instances, which are
referenced by this Version and has a newer version.
Also checks the references in the referenced Version and appends the
resultant list to the current dependency_update_list. Resulting a much
deeper update info.
:return: list of :class:`~oyProjectManager.models.version.Version`
instances.
"""
# loop through the referenced Version instances and check if they have
# newer Versions
update_list = []
# for ref in self.references:
# if not ref.is_latest_version():
# update_list.append(ref)
# # also loop in their references
# update_list.extend(ref.dependency_update_list)
# for now just search for published versions for the first references
# do not search the children of it
for ref in self.references:
# get the last published versions of the references
published_version = ref.latest_published_version()
# if the version number is bigger add it to the update list
if published_version:
if published_version.version_number > ref.version_number:
update_list.append(ref)
return update_list
# @validates('status')
def _validate_status(self, status):
"""validates the given status value
"""
if isinstance(status, VersionStatusComparator):
status = status.status
if status is None:
latest_version = self.latest_version()
if latest_version:
status = latest_version.status
else:
# set it to status[0]
status = conf.status_list[0]
if not isinstance(status, (str, unicode)):
raise TypeError(
'Version.status should be one an instance of string and the '
'value should be one of of %s not %s' %
(conf.status_list, status.__class__.__name__)
)
all_statuses = copy(conf.status_list)
all_statuses.extend(conf.status_list_long_names)
if status not in all_statuses:
raise ValueError('Version.status should be one of %s not %s' %
(conf.status_list, status))
if status in conf.status_list_long_names:
index = conf.status_list_long_names.index(status)
status = conf.status_list[index]
return status
@hybrid_property
def status(self):
return VersionStatusComparator(self._status)
@status.setter
def status(self, status):
self._status = self._validate_status(status)
class VersionType(Base):
"""A template for :class:`~oyProjectManager.models.version.Version` class.
A VersionType is basically a template object for the
:class:`~oyProjectManager.models.version.Version` instances. It gives the
information about the filename template, path template and output path
template for the :class:`~oyProjectManager.models.version.Version` class.
Then the :class:`~oyProjectManager.models.version.Version` class renders
this Jinja2 templates and places itself (or the produced files) in to the
appropriate folders in the
:class:`~oyProjectManager.models.repository.Repository`.
All the template variables (
:attr:`~oyProjectManager.models.version.VersionType.filename`,
:attr:`~oyProjectManager.models.version.VersionType.path`,
:attr:`~oyProjectManager.models.version.VersionType.output_path`) can use
the following variables in their template codes.
.. _table:
+---------------+-----------------------------+--------------------------+
| Variable Name | Variable Source | Description |
+===============+=============================+==========================+
| project | version.version_of.project | The project that the |
| | | Version belongs to |
+---------------+-----------------------------+--------------------------+
| sequence | version.version_of.sequence | Only available for Shot |
| | | versions |
+---------------+-----------------------------+--------------------------+
| version | version | The version itself |
+---------------+-----------------------------+--------------------------+
| type | version.type | The VersionType instance |
| | | attached to the this |
| | | Version |
+---------------+-----------------------------+--------------------------+
In oyProjectManager, generally you don't need to create VersionType
instances by hand. Instead, add all the version types you need to your
config.py file and the :class:`~oyProjectManager.models.project.Project`
instance will create all the necessary VersionTypes from this config.py
configuration file. For more information about the the config.py please see
the documentation of config.py.
For previously created projects, where a new type is needed to be added you
can still create a new VersionType instance and save it to the Projects'
database.
:param str name: The name of this template. The name is not formatted in
anyway. It can not be skipped or it can not be None or it can not be an
empty string. The name attribute should be unique. Be careful that even
specifying a non unique name VersionType instance will not raise an error
until :meth:`~oyProjectManager.models.version.VersionType.save` is
called.
:param str code: The code is a shorthand form of the name. For example,
if the name is "Animation" than the code can be "ANIM" or "Anim" or
"anim". Because the code is generally used in filename, path or
output_path templates it is going to be a part of the filename or path,
so be careful about what you give as a code. The code attribute should be
unique. Be careful that even specifying a non unique code VersionType
instance will not raise an error until
:meth:`~oyProjectManager.models.version.VersionType.save` is called. For
formatting, these rules are current:
* no white space characters are allowed
* can not start with a number
* can not start or end with an underscore character
* both lowercase or uppercase letters are allowed
A good code is the short form of the
:attr:`~oyProjectManager.models.version.VersionType.name` attribute.
Examples:
+----------------+----------------------+
| Name | Code |
+================+======================+
| Animation | Anim or ANIM |
+----------------+----------------------+
| Composition | Comp or COMP |
+----------------+----------------------+
| Match Move | MM |
+----------------+----------------------+
| Camera Track | Track or TACK |
+----------------+----------------------+
| Model | Model or MODEL |
+----------------+----------------------+
| Rig | Rig or RIG |
+----------------+----------------------+
| Scene Assembly | Asmbly or ASMBLY |
+----------------+----------------------+
| Lighting | Lighting or LIGHTING |
+----------------+----------------------+
| Camera | Cam or CAM |
+----------------+----------------------+
:param filename: The filename template. It is a single line Jinja2 template
showing the filename of the
:class:`~oyProjectManager.models.version.Version` which is using this
VersionType. Look for the above `table`_ for possible variables those can
be used in the template code.
For an example the following is a nice example for Asset version
filename::
{{version.base_name}}_{{version.take_name}}_{{type.code}}_\\
v{{'%03d'|format(version.version_number)}}_\\
{{version.created_by.initials}}
Which will render something like that::
Car_Main_Model_v001_oy
Now all the versions for the same asset will have a consistent name.
When the filename argument is skipped or is an empty string is given a
TypeError will be raised to prevent creation of files with no names.
:param str path: The path template. It is a single line Jinja2 template
showing the absolute path of this
:class:`~oyProjectManager.models.version.Version` instance. Look for the
above `table`_ for possible variables those can be used in the template
code.
For an example the following is a nice template for a Shot version::
{{project.full_path}}/Sequences/{{sequence.code}}/Shots/\\
{{version.base_name}}/{{type.code}}
This will place a Shot Version whose base_name is SH001 and let say that
the type is Animation (where the code is ANIM) to a path like::
M:/JOBs/PROJ1/Sequences/SEQ1/Shots/SH001/ANIM
All the animation files realted to this shot will be saved inside that
folder.
:param str output_path: It is a single line Jinja2 template which shows
where to place the outputs of this kind of
:class:`~oyProjectManager.models.version.Version`\ s. An output is simply
anything that is rendered out from the source file, it can be the renders
or playblast/flipbook outputs for Maya, Nuke or Houdini and can be
different file type versions (tga, jpg, etc.) for Photoshop files.
Generally it is a good idea to store the output right beside the source
file. So for a Shot the following is a good example::
{{version.path}}/Outputs
Which will render as::
M:/JOBs/PROJ1/Sequences/SEQ1/Shots/SH001/ANIM/Outputs
:param str extra_folders: It is a list of single line Jinja2 template codes
which are showing the extra folders those need to be created. It is
generally created in the
:class:`~oyProjectManager.models.project.Project` creation phase.
The following is an extra folder hierarchy created for the FX version
type::
{{version.path}}/cache
:param environments: A list of environments that this VersionType is valid
for. The idea behind is to limit the possible list of types for the
program that the user is working on. So let say it may not possible to
create a camera track in Photoshop then what one can do is to add a
Camera Track type but exclude the Photoshop from the list of environments
that this type is valid for.
The given value should be a list of environment names, be careful about
not to pass just a string for the environments list, python will convert
the string to a list by putting all the letters in separate elements in
the list. And probably this is not something one wants.
:type environments: list of strings
:param type_for: An enum value specifying what this VersionType instance is
for, is it for an "Asset" or for an "Shot". The two acceptable values are
"Asset" or "Shot". Any other value will raise an IntegrityError. It can
not be skipped.
"""
__tablename__ = "VersionTypes"
__table_args__ = (
{"extend_existing": True}
)
id = Column(Integer, primary_key=True)
# project_id = Column(Integer, ForeignKey("Projects.id"))
# _project = relationship("Project")
name = Column(String, unique=True)
code = Column(String, unique=True)
filename = Column(
String,
doc="""The filename template for this type of version instances.
You can freely change the filename template attribute after creating
:class:`~oyProjectManager.models.version.Version`\ s of this type. Any
:class:`~oyProjectManager.models.version.Version` which is created
prior to this change will not be effected. But be careful about the
older and newer :class:`~oyProjectManager.models.version.Version`\ s of
the same :class:`~oyProjectManager.models.asset.Asset` or
:class:`~oyProjectManager.models.shot.Shot` may placed to different
folders according to your new template.
The template **should not** include a dot (".") sign before the
extension, it is handled by the
:class:`~oyProjectManager.models.version.Version` instance.
"""
)
path = Column(
String,
doc="""The path template for this Type of Version instance.
You can freely change the path template attribute after creating
:class:`~oyProjectManager.models.version.Version`\ s of this type. Any
:class:`~oyProjectManager.models.version.Version` which is created
prior to this change will not be effected. But be careful about the
older and newer :class:`~oyProjectManager.models.version.Version`\ s of
the same :class:`~oyProjectManager.models.asset.Asset` or
:class:`~oyProjectManager.models.shot.Shot` may placed to different
folders according to your new template.
The path template should be an relative one to the
:attr:`~oyProjectManager.models.repository.Repository.server_path`, so
don't forget to place ``{{project.code}}`` at the beginning of you
template if you are storing all your asset and shots inside the project
directory.
If you want to store your assets in one place and use them in several
projects, you can do it by starting the ``path`` of the VersionType
with something like that::
"Assets/{{version.base_name}}/{{type.code}}"
and if your repository path is "/mnt/M/JOBs" then your assets will be
stored in::
"/mnt/M/JOBs/Assets"
"""
)
output_path = Column(
String,
doc="""The output path template for this Type of Version instances.
To place your output path right beside the original version file you
can set the ``output_path`` to::
"{{version.path}}/Outputs/{{version.take_name}}"
"""
)
extra_folders = Column(
String,
doc="""A string containing the extra folder names those needs to be
created"""
)
environments = association_proxy(
"version_type_environments",
"environment_name"
)
_type_for = Column(
Enum("Asset", "Shot", name="ckEnumType"),
doc="""A enum value showing if this version type is valid for Assets or
Shots.
"""
)
def __init__(
self,
name,
code,
path,
filename,
output_path,
environments,
type_for,
extra_folders=None
):
self.name = name
self.code = code
self.filename = filename
self.path = path
self.output_path = output_path
self.environments = environments
self.extra_folders = extra_folders
self._type_for = type_for
def __eq__(self, other):
"""equality operator
"""
return isinstance(other, VersionType) and self.name == other.name
def __ne__(self, other):
"""inequality operator
"""
return not self.__eq__(other)
@validates("name")
def _validate_name(self, key, name):
"""validates the given name value
"""
if name is None:
raise TypeError("VersionType.name can not be None, please "
"supply a string or unicode instance")
if not isinstance(name, (str, unicode)):
raise TypeError("VersionType.name should be an instance of "
"string or unicode")
return name
@validates("code")
def _validate_code(self, key, code):
"""validates the given code value
"""
if code is None:
raise TypeError("VersionType.code can not be None, please "
"specify a proper string value")
if not isinstance(code, (str, unicode)):
raise TypeError("VersionType.code should be an instance of "
"string or unicode, please supply one")
return code
@validates("extra_folders")
def _validate_extra_folders(self, key, extra_folders):
"""validates the given extra_folders value
"""
if extra_folders is None:
extra_folders = ""
if not isinstance(extra_folders, (str, unicode)):
raise TypeError("VersionType.extra_folders should be a string or "
"unicode value showing the extra folders those "
"needs to be created with the Version of this "
"type.")
return extra_folders
@validates("filename")
def _validate_filename(self, key, filename):
"""validates the given filename
"""
if filename is None:
raise TypeError("VersionType.filename can not be None, please "
"specify a valid filename template string by "
"using Jinja2 template syntax")
if not isinstance(filename, (str, unicode)):
raise TypeError("VersionType.filename should be an instance of"
"string or unicode")
if filename == "":
raise ValueError("VersionType.filename can not be an empty "
"string, it should be a string containing a "
"Jinja2 template code showing the file naming "
"convention of Versions of this type.")
return filename
@validates("path")
def _validate_path(self, key, path):
"""validates the given path
"""
if path is None:
raise TypeError("VersionType.path can not be None, please "
"specify a valid path template string by using "
"Jinja2 template syntax")
if not isinstance(path, (str, unicode)):
raise TypeError("VersionType.path should be an instance of string "
"or unicode")
if path == "":
raise ValueError("VersionType.path can not be an empty "
"string, it should be a string containing a "
"Jinja2 template code showing the file naming "
"convention of Versions of this type.")
return path
@validates("output_path")
def _validate_output_path(self, key, output_path):
"""Validates the given output_path value
"""
if output_path is None:
raise TypeError("VersionType.output_path can not be None")
if not isinstance(output_path, (str, unicode)):
raise TypeError("VersionType.output_path should be an instance "
"of string or unicode, not %s" % type(output_path))
if output_path == "":
raise ValueError("VersionType.output_path can not be an empty "
"string")
return output_path
def save(self):
"""Saves the current VersionType to the database
"""
if self not in db.session:
db.session.add(self)
db.session.commit()
@validates("_type_for")
def _validate_type_for(self, key, type_for):
"""Validates the given type_for value
"""
if type_for is None:
raise TypeError("VersionType.type_for can not be None, it should "
"be a string or unicode value")
if not isinstance(type_for, (str, unicode)):
raise TypeError("VersionType.type_for should be an instance of "
"string or unicode, not %s" % type(type_for))
return type_for
@synonym_for("_type_for")
@property
def type_for(self):
"""An enum attribute holds what is this VersionType created for, a Shot
or an Asset.
"""
return self._type_for
class VersionTypeEnvironments(Base):
"""An association object for VersionType.environments
"""
__tablename__ = "VersionType_Environments"
__table_args__ = (
{"extend_existing": True}
)
versionType_id = Column(Integer, ForeignKey("VersionTypes.id"),
primary_key=True)
environment_name = Column(
String,
primary_key=True,
doc="""The name of the environment which the VersionType instance is
valid for
"""
)
version_type = relationship(
"VersionType",
backref=backref(
"version_type_environments",
cascade="all, delete-orphan"
)
)
def __init__(self, environment_name):
self.environment_name = environment_name
@validates("environment_name")
def _validate_environment_name(self, key, environment_name):
"""validates the given environment_name value
"""
if environment_name is None or \
not isinstance(environment_name, (str, unicode)):
raise TypeError("VersionType.environments should be a list of "
"strings containing the environment names")
return environment_name
def _check_circular_dependency(version, check_for_version):
"""checks the circular dependency in version if it has check_for_version in
its depends list
"""
for reference in version.references:
if reference is check_for_version:
raise CircularDependencyError(
"version %s can not reference %s, this creates a circular "
"dependency" % (version, check_for_version)
)
else:
_check_circular_dependency(reference, check_for_version)
# secondary tables
Version_References = Table(
"Version_References", Base.metadata,
Column("referencer_id", Integer, ForeignKey("Versions.id"),
primary_key=True),
Column("reference_id", Integer, ForeignKey("Versions.id"),
primary_key=True),
extend_existing=True
)
| bsd-2-clause | 2,766,646,839,963,355,000 | 36.243012 | 79 | 0.602702 | false |
LolexInc/Lolex-Tools | project/new/lib/LolexToolsMethods.py | 1 | 1215 | #! python3
# 0
# 0 000000 0 000000 0 0 000000000 00000000 00000000 0 000000
# 0 00 0 0 0 00 00 0 0 0 0 0 0
# 0 00 0 0 00000 00 000000 00 0 0 0 0 0 00000
# 0 00 0 0 0 0 0 00 0 0 0 0 0 0
# 0000000 000000 0000000 000000 0 0 00 00000000 00000000 0000000 000000
#
# authors = Monkeyboy2805
import sys
class AddUser:
def setup_folder(self):
pass
def add_user_to_file(self):
pass
class Authenticate:
@staticmethod
def login():
try:
sys.path.insert(0, "./project/new/setup/exp/TEST")
import users
usernameenter = input("Please enter your username.")
if usernameenter in users.users:
path = users.paths[users.users.index(usernameenter)] # NOTE: the local variable path will be used at some point, but there isn't enough functionality for it to be used as of yet
print("Welcome " + (str(usernameenter)))
else:
return 1
except ImportError:
return 2
| lgpl-3.0 | 4,586,291,687,691,301,000 | 34.735294 | 181 | 0.510288 | false |
mlabru/ptracks | control/newton/control_newton.py | 1 | 12852 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
---------------------------------------------------------------------------------------------------
control_newton
revision 0.2 2015/nov mlabru
pep8 style conventions
revision 0.1 2014/nov mlabru
initial version (Linux/Python)
---------------------------------------------------------------------------------------------------
"""
__version__ = "$revision: 0.2$"
__author__ = "Milton Abrunhosa"
__date__ = "2015/11"
# < imports >--------------------------------------------------------------------------------------
# python library
import logging
import multiprocessing
import Queue
import time
# mpi4py
# from mpi4py import MPI
# model
import model.common.glb_data as gdata
import model.newton.model_newton as model
# view
import view.newton.view_newton as view
# control
import control.control_basic as control
# import control.control_debug as cdbg
import control.common.glb_defs as gdefs
import control.config.config_newton as config
import control.events.events_basic as events
import control.network.get_address as gaddr
import control.network.net_listener as listener
import control.network.net_sender as sender
import control.simula.sim_time as stime
# < class CControlNewton >-------------------------------------------------------------------------
class CControlNewton(control.CControlBasic):
"""
control newton
"""
# ---------------------------------------------------------------------------------------------
def __init__(self):
"""
constructor
"""
# init super class
super(CControlNewton, self).__init__()
# herdados de CControlManager
# self.app # the application
# self.event # event manager
# self.config # opções de configuração
# self.model # model manager
# self.view # view manager
# self.voip # biblioteca de VoIP
# herdados de CControlBasic
# self.ctr_flight # flight control
# self.sim_stat # simulation statistics
# self.sim_time # simulation timer
# carrega as opções de configuração
self.config = config.CConfigNewton(gdefs.D_CFG_FILE)
assert self.config
# init MPI
self.__mpi_comm = None # MPI.COMM_WORLD
# assert self.__mpi_comm
# create simulation time engine
self.sim_time = stime.CSimTime(self)
assert self.sim_time
# cria a queue de envio de comando/controle/configuração
self.__q_snd_cnfg = multiprocessing.Queue()
assert self.__q_snd_cnfg
# endereço de envio
lt_ifce, ls_addr, li_port = gaddr.get_address(self.config, "net.cnfg")
# cria o socket de envio de comando/controle/configuração
self.__sck_snd_cnfg = sender.CNetSender(lt_ifce, ls_addr, li_port, self.__q_snd_cnfg)
assert self.__sck_snd_cnfg
# cria a queue de envio de pistas
self.__q_snd_trks = multiprocessing.Queue()
assert self.__q_snd_trks
# endereço de envio
lt_ifce, ls_addr, li_port = gaddr.get_address(self.config, "net.trks")
# cria o socket de envio de pistas
self.__sck_snd_trks = sender.CNetSender(lt_ifce, ls_addr, li_port, self.__q_snd_trks)
assert self.__sck_snd_trks
# cria a queue de recebimento de comando/controle/configuração
self.__q_rcv_cnfg = multiprocessing.Queue()
assert self.__q_rcv_cnfg
# endereço de recebimento
lt_ifce, ls_addr, li_port = gaddr.get_address(self.config, "net.cnfg")
# cria o socket de recebimento de comando/controle/configuração
self.__sck_rcv_cnfg = listener.CNetListener(lt_ifce, ls_addr, li_port, self.__q_rcv_cnfg)
assert self.__sck_rcv_cnfg
# set as daemon
# self.__sck_rcv_cnfg.daemon = True
# cria a queue de recebimento de comandos de pilotagem
self.__q_rcv_cpil = multiprocessing.Queue()
assert self.__q_rcv_cpil
# endereço de recebimento
lt_ifce, ls_addr, li_port = gaddr.get_address(self.config, "net.cpil")
# cria o socket de recebimento de comandos de pilotagem
self.__sck_rcv_cpil = listener.CNetListener(lt_ifce, ls_addr, li_port, self.__q_rcv_cpil)
assert self.__sck_rcv_cpil
# set as daemon
# self.__sck_rcv_cpil.daemon = True
# instância o modelo
self.model = model.CModelNewton(self)
assert self.model
# get flight emulation model
self.__emula = self.model.emula
assert self.__emula
# create view manager
self.view = view.CViewNewton(self.model, self)
assert self.view
# ---------------------------------------------------------------------------------------------
def cbk_termina(self):
"""
termina a aplicação
"""
# clear to go
assert self.event
# cria um evento de fim de execução
l_evt = events.CQuit()
assert l_evt
# dissemina o evento
self.event.post(l_evt)
# ---------------------------------------------------------------------------------------------
def run(self):
"""
drive application
"""
# checks
assert self.event
assert self.__q_rcv_cnfg
assert self.__sck_rcv_cnfg
assert self.__emula
# temporização de scheduler
lf_tim_rrbn = self.config.dct_config["tim.rrbn"]
# keep things running
gdata.G_KEEP_RUN = True
# ativa o relógio
self.start_time()
# inicia o recebimento de mensagens de comando/controle/configuração(ccc)
self.__sck_rcv_cnfg.start()
# starts flight model
self.__emula.start()
# obtém o tempo inicial em segundos
lf_now = time.time()
# starts web server
self.view.start()
# application loop
while gdata.G_KEEP_RUN:
try:
# obtém um item da queue de mensagens de comando/controle/configuração (nowait)
llst_data = self.__q_rcv_cnfg.get(False)
# cdbg.M_DBG.debug("llst_data: {}".format(llst_data))
# queue tem dados ?
if llst_data:
# mensagem de fim de execução ?
if gdefs.D_MSG_FIM == int(llst_data[0]):
# termina a aplicação sem confirmação e sem envio de fim
self.cbk_termina()
# mensagem de aceleração ?
elif gdefs.D_MSG_ACC == int(llst_data[0]):
# acelera/desacelera a aplicação
self.sim_time.cbk_acelera(float(llst_data[1]))
# mensagem de congelamento ?
elif gdefs.D_MSG_FRZ == int(llst_data[0]):
# salva a hora atual
self.sim_time.cbk_congela()
# mensagem de descongelamento ?
elif gdefs.D_MSG_UFZ == int(llst_data[0]):
# restaura a hora
self.sim_time.cbk_descongela()
# senão, mensagem não reconhecida ou não tratavél
else:
# mensagens não tratadas ?
if int(llst_data[0]) in [gdefs.D_MSG_EXE, gdefs.D_MSG_SRV, gdefs.D_MSG_TIM]:
# próxima mensagem
continue
# logger
l_log = logging.getLogger("CControlNewton::run")
l_log.setLevel(logging.WARNING)
l_log.warning("<E01: mensagem não reconhecida ou não tratável.")
# em caso de não haver mensagens...
except Queue.Empty:
# salva o tempo anterior
lf_ant = lf_now
# tempo atual em segundos
lf_now = time.time()
# calcula o tempo decorrido
lf_dif = lf_now - lf_ant
# está adiantado ?
if lf_tim_rrbn > lf_dif:
# permite o scheduler
time.sleep((lf_tim_rrbn - lf_dif) * .99)
# senão, atrasou...
else:
# logger
l_log = logging.getLogger("CControlNewton::run")
l_log.setLevel(logging.WARNING)
l_log.warning("<E02: atrasou: {}.".format(lf_dif - lf_tim_rrbn))
# em caso de não haver mensagens...
except Exception, l_err:
# logger
l_log = logging.getLogger("CControlNewton::run")
l_log.setLevel(logging.WARNING)
l_log.warning("<E03: control error: {}.".format(l_err))
# self.sim_stat.noProcFlights = fe.flightsProcessed
# self.sim_stat.printScore()
# ---------------------------------------------------------------------------------------------
def start_time(self):
"""
start time
"""
# clear to go
assert self.model
assert self.sim_time
# exercício
l_exe = self.model.exe
assert l_exe
# hora de início do exercício
lt_hora = l_exe.t_exe_hor_ini
# inicia o relógio da simulação
self.sim_time.set_hora(lt_hora)
# =============================================================================================
# data
# =============================================================================================
# ---------------------------------------------------------------------------------------------
@property
def emula(self):
return self.__emula
@emula.setter
def emula(self, f_val):
self.__emula = f_val
# ---------------------------------------------------------------------------------------------
@property
def mpi_comm(self):
return self.__mpi_comm
# ---------------------------------------------------------------------------------------------
@property
def mpi_rank(self):
return 0 if self.__mpi_comm is None else self.__mpi_comm.rank
# ---------------------------------------------------------------------------------------------
@property
def mpi_size(self):
return 1 if self.__mpi_comm is None else self.__mpi_comm.size
# ---------------------------------------------------------------------------------------------
@property
def q_rcv_cpil(self):
return self.__q_rcv_cpil
@q_rcv_cpil.setter
def q_rcv_cpil(self, f_val):
self.__q_rcv_cpil = f_val
# ---------------------------------------------------------------------------------------------
@property
def sck_rcv_cpil(self):
return self.__sck_rcv_cpil
@sck_rcv_cpil.setter
def sck_rcv_cpil(self, f_val):
self.__sck_rcv_cpil = f_val
# ---------------------------------------------------------------------------------------------
@property
def q_rcv_cnfg(self):
return self.__q_rcv_cnfg
@q_rcv_cnfg.setter
def q_rcv_cnfg(self, f_val):
self.__q_rcv_cnfg = f_val
# ---------------------------------------------------------------------------------------------
@property
def sck_rcv_cnfg(self):
return self.__sck_rcv_cnfg
@sck_rcv_cnfg.setter
def sck_rcv_cnfg(self, f_val):
self.__sck_rcv_cnfg = f_val
# ---------------------------------------------------------------------------------------------
@property
def q_snd_cnfg(self):
return self.__q_snd_cnfg
@q_snd_cnfg.setter
def q_snd_cnfg(self, f_val):
self.__q_snd_cnfg = f_val
# ---------------------------------------------------------------------------------------------
@property
def sck_snd_cnfg(self):
return self.__sck_snd_cnfg
@sck_snd_cnfg.setter
def sck_snd_cnfg(self, f_val):
self.__sck_snd_cnfg = f_val
# ---------------------------------------------------------------------------------------------
@property
def q_snd_trks(self):
return self.__q_snd_trks
@q_snd_trks.setter
def q_snd_trks(self, f_val):
self.__q_snd_trks = f_val
# ---------------------------------------------------------------------------------------------
@property
def sck_snd_trks(self):
return self.__sck_snd_trks
@sck_snd_trks.setter
def sck_snd_trks(self, f_val):
self.__sck_snd_trks = f_val
# < the end >--------------------------------------------------------------------------------------
| gpl-3.0 | -3,338,555,403,574,336,000 | 31.295455 | 100 | 0.458128 | false |
dotkrnl/gface | lib/settings.py | 1 | 1894 | #!/usr/bin/env python2.7
# -*- coding: UTF-8 -*-
#### Copyright for this software - 本软件的版权信息 ####
# Copyright (C) 2013 Jason Lau (刘家昌)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import defaults
import logic.defaultLogic
FMT = '.jpg'
USE = defaults.PHOTO_BASE["TWO_INCH"]
USES = [ defaults.PHOTO_BASE["ONE_INCH"],
defaults.PHOTO_BASE["TWO_INCH"],
defaults.PHOTO_BASE["SMALL_TWO_INCH"],
defaults.PHOTO_BASE["ID_CARD"],
defaults.PHOTO_BASE["NATIONAL_STUDENT"]]
PHOTO = os.path.join(
os.path.dirname(os.path.abspath(__file__)),
"../photo"
)
PRINT_USES = [ defaults.PRINT_BASE["THREE_R"],
defaults.PRINT_BASE["FOUR_R"],
defaults.PRINT_BASE["FIVE_R"],
defaults.PRINT_BASE["SIX_R"],
defaults.PRINT_BASE["EIGHT_R"] ]
PRINT = os.path.join(
os.path.dirname(os.path.abspath(__file__)),
"../print"
)
PRINTER = "printer_name"
RAW = os.path.join(
os.path.dirname(os.path.abspath(__file__)),
"../raw"
)
ROT = False#True
SAVE = logic.defaultLogic.PhotoLogic
FILE = os.path.join(
os.path.dirname(os.path.abspath(__file__)),
"../students.csv"
)
FULLSCREEN = False#True
NOEXIT = False#True
FONTSIZE = 48
LINESIZE = 120
LEFTMAR = 30
TOPLOC = 90
| apache-2.0 | -6,986,004,401,871,246,000 | 26.940299 | 74 | 0.623932 | false |
jjmiranda/edx-platform | lms/djangoapps/grades/tests/test_models.py | 1 | 8528 | """
Unit tests for grades models.
"""
from base64 import b64encode
from collections import OrderedDict
import ddt
from hashlib import sha1
import json
from django.db.utils import IntegrityError
from django.test import TestCase
from opaque_keys.edx.locator import CourseLocator, BlockUsageLocator
from lms.djangoapps.grades.models import (
BlockRecord,
BlockRecordList,
PersistentSubsectionGrade,
VisibleBlocks
)
class BlockRecordListTestCase(TestCase):
"""
Verify the behavior of BlockRecordList, particularly around edge cases
"""
def setUp(self):
super(BlockRecordListTestCase, self).setUp()
self.course_key = CourseLocator(
org='some_org',
course='some_course',
run='some_run'
)
def test_empty_block_record_set(self):
empty_json = '{0}"blocks":[],"course_key":"{1}"{2}'.format('{', unicode(self.course_key), '}')
brs = BlockRecordList((), self.course_key)
self.assertFalse(brs)
self.assertEqual(
brs.json_value,
empty_json
)
self.assertEqual(
BlockRecordList.from_json(empty_json),
brs
)
class GradesModelTestCase(TestCase):
"""
Base class for common setup of grades model tests.
"""
def setUp(self):
super(GradesModelTestCase, self).setUp()
self.course_key = CourseLocator(
org='some_org',
course='some_course',
run='some_run'
)
self.locator_a = BlockUsageLocator(
course_key=self.course_key,
block_type='problem',
block_id='block_id_a'
)
self.locator_b = BlockUsageLocator(
course_key=self.course_key,
block_type='problem',
block_id='block_id_b'
)
self.record_a = BlockRecord(locator=self.locator_a, weight=1, max_score=10)
self.record_b = BlockRecord(locator=self.locator_b, weight=1, max_score=10)
@ddt.ddt
class BlockRecordTest(GradesModelTestCase):
"""
Test the BlockRecord model.
"""
def setUp(self):
super(BlockRecordTest, self).setUp()
def test_creation(self):
"""
Tests creation of a BlockRecord.
"""
weight = 1
max_score = 10
record = BlockRecord(
self.locator_a,
weight,
max_score,
)
self.assertEqual(record.locator, self.locator_a)
@ddt.data(
(0, 0, "0123456789abcdef"),
(1, 10, 'totally_a_real_block_key'),
("BlockRecord is", "a dumb data store", "with no validation"),
)
@ddt.unpack
def test_serialization(self, weight, max_score, block_key):
"""
Tests serialization of a BlockRecord using the to_dict() method.
"""
record = BlockRecord(block_key, weight, max_score)
expected = OrderedDict([
("locator", block_key),
("weight", weight),
("max_score", max_score),
])
self.assertEqual(expected, record._asdict())
class VisibleBlocksTest(GradesModelTestCase):
"""
Test the VisibleBlocks model.
"""
def _create_block_record_list(self, blocks):
"""
Creates and returns a BlockRecordList for the given blocks.
"""
return VisibleBlocks.objects.create_from_blockrecords(BlockRecordList.from_list(blocks, self.course_key))
def test_creation(self):
"""
Happy path test to ensure basic create functionality works as expected.
"""
vblocks = self._create_block_record_list([self.record_a])
list_of_block_dicts = [self.record_a._asdict()]
for block_dict in list_of_block_dicts:
block_dict['locator'] = unicode(block_dict['locator']) # BlockUsageLocator is not json-serializable
expected_data = {
'course_key': unicode(self.record_a.locator.course_key),
'blocks': [
{'locator': unicode(self.record_a.locator), 'max_score': 10, 'weight': 1},
],
}
expected_json = json.dumps(expected_data, separators=(',', ':'), sort_keys=True)
expected_hash = b64encode(sha1(expected_json).digest())
self.assertEqual(expected_data, json.loads(vblocks.blocks_json))
self.assertEqual(expected_json, vblocks.blocks_json)
self.assertEqual(expected_hash, vblocks.hashed)
def test_ordering_matters(self):
"""
When creating new vblocks, different ordering of blocks produces
different records in the database.
"""
stored_vblocks = self._create_block_record_list([self.record_a, self.record_b])
repeat_vblocks = self._create_block_record_list([self.record_b, self.record_a])
same_order_vblocks = self._create_block_record_list([self.record_a, self.record_b])
new_vblocks = self._create_block_record_list([self.record_b])
self.assertNotEqual(stored_vblocks.pk, repeat_vblocks.pk)
self.assertNotEqual(stored_vblocks.hashed, repeat_vblocks.hashed)
self.assertEquals(stored_vblocks.pk, same_order_vblocks.pk)
self.assertEquals(stored_vblocks.hashed, same_order_vblocks.hashed)
self.assertNotEqual(stored_vblocks.pk, new_vblocks.pk)
self.assertNotEqual(stored_vblocks.hashed, new_vblocks.hashed)
def test_blocks_property(self):
"""
Ensures that, given an array of BlockRecord, creating visible_blocks
and accessing visible_blocks.blocks yields a copy of the initial array.
Also, trying to set the blocks property should raise an exception.
"""
expected_blocks = BlockRecordList.from_list([self.record_a, self.record_b], self.course_key)
visible_blocks = self._create_block_record_list(expected_blocks)
self.assertEqual(expected_blocks, visible_blocks.blocks)
with self.assertRaises(AttributeError):
visible_blocks.blocks = expected_blocks
@ddt.ddt
class PersistentSubsectionGradeTest(GradesModelTestCase):
"""
Test the PersistentSubsectionGrade model.
"""
def setUp(self):
super(PersistentSubsectionGradeTest, self).setUp()
self.usage_key = BlockUsageLocator(
course_key=self.course_key,
block_type='subsection',
block_id='subsection_12345',
)
self.block_records = BlockRecordList([self.record_a, self.record_b], self.course_key)
self.params = {
"user_id": 12345,
"usage_key": self.usage_key,
"course_version": "deadbeef",
"subtree_edited_timestamp": "2016-08-01 18:53:24.354741",
"earned_all": 6.0,
"possible_all": 12.0,
"earned_graded": 6.0,
"possible_graded": 8.0,
"visible_blocks": self.block_records,
}
def test_create(self):
"""
Tests model creation, and confirms error when trying to recreate model.
"""
created_grade = PersistentSubsectionGrade.create_grade(**self.params)
with self.assertNumQueries(1):
read_grade = PersistentSubsectionGrade.read_grade(
user_id=self.params["user_id"],
usage_key=self.params["usage_key"],
)
self.assertEqual(created_grade, read_grade)
self.assertEquals(read_grade.visible_blocks.blocks, self.block_records)
with self.assertRaises(IntegrityError):
PersistentSubsectionGrade.create_grade(**self.params)
def test_create_bad_params(self):
"""
Confirms create will fail if params are missing.
"""
del self.params["earned_graded"]
with self.assertRaises(IntegrityError):
PersistentSubsectionGrade.create_grade(**self.params)
def test_course_version_is_optional(self):
del self.params["course_version"]
PersistentSubsectionGrade.create_grade(**self.params)
@ddt.data(True, False)
def test_update_or_create_grade(self, already_created):
created_grade = PersistentSubsectionGrade.create_grade(**self.params) if already_created else None
self.params["earned_all"] = 7
updated_grade = PersistentSubsectionGrade.update_or_create_grade(**self.params)
self.assertEquals(updated_grade.earned_all, 7)
if already_created:
self.assertEquals(created_grade.id, updated_grade.id)
self.assertEquals(created_grade.earned_all, 6)
| agpl-3.0 | -6,927,463,723,962,252,000 | 34.983122 | 113 | 0.622889 | false |
thedep2/CouchPotatoServer | couchpotato/core/media/movie/providers/trailer/youtube_dl/extractor/rai.py | 22 | 4658 | from __future__ import unicode_literals
import re
from .subtitles import SubtitlesInfoExtractor
from ..compat import (
compat_urllib_parse,
)
from ..utils import (
parse_duration,
unified_strdate,
)
class RaiIE(SubtitlesInfoExtractor):
_VALID_URL = r'(?P<url>http://(?:.+?\.)?(?:rai\.it|rai\.tv|rainews\.it)/dl/.+?-(?P<id>[\da-f]{8}-[\da-f]{4}-[\da-f]{4}-[\da-f]{4}-[\da-f]{12})(?:-.+?)?\.html)'
_TESTS = [
{
'url': 'http://www.rai.tv/dl/RaiTV/programmi/media/ContentItem-cb27157f-9dd0-4aee-b788-b1f67643a391.html',
'md5': 'c064c0b2d09c278fb293116ef5d0a32d',
'info_dict': {
'id': 'cb27157f-9dd0-4aee-b788-b1f67643a391',
'ext': 'mp4',
'title': 'Report del 07/04/2014',
'description': 'md5:f27c544694cacb46a078db84ec35d2d9',
'upload_date': '20140407',
'duration': 6160,
}
},
{
'url': 'http://www.raisport.rai.it/dl/raiSport/media/rassegna-stampa-04a9f4bd-b563-40cf-82a6-aad3529cb4a9.html',
'md5': '8bb9c151924ce241b74dd52ef29ceafa',
'info_dict': {
'id': '04a9f4bd-b563-40cf-82a6-aad3529cb4a9',
'ext': 'mp4',
'title': 'TG PRIMO TEMPO',
'description': '',
'upload_date': '20140612',
'duration': 1758,
},
'skip': 'Error 404',
},
{
'url': 'http://www.rainews.it/dl/rainews/media/state-of-the-net-Antonella-La-Carpia-regole-virali-7aafdea9-0e5d-49d5-88a6-7e65da67ae13.html',
'md5': '35cf7c229f22eeef43e48b5cf923bef0',
'info_dict': {
'id': '7aafdea9-0e5d-49d5-88a6-7e65da67ae13',
'ext': 'mp4',
'title': 'State of the Net, Antonella La Carpia: regole virali',
'description': 'md5:b0ba04a324126903e3da7763272ae63c',
'upload_date': '20140613',
},
'skip': 'Error 404',
},
{
'url': 'http://www.rai.tv/dl/RaiTV/programmi/media/ContentItem-b4a49761-e0cc-4b14-8736-2729f6f73132-tg2.html',
'md5': '35694f062977fe6619943f08ed935730',
'info_dict': {
'id': 'b4a49761-e0cc-4b14-8736-2729f6f73132',
'ext': 'mp4',
'title': 'Alluvione in Sardegna e dissesto idrogeologico',
'description': 'Edizione delle ore 20:30 ',
}
},
]
def _real_extract(self, url):
mobj = re.match(self._VALID_URL, url)
video_id = mobj.group('id')
media = self._download_json('%s?json' % mobj.group('url'), video_id, 'Downloading video JSON')
title = media.get('name')
description = media.get('desc')
thumbnail = media.get('image_300') or media.get('image_medium') or media.get('image')
duration = parse_duration(media.get('length'))
uploader = media.get('author')
upload_date = unified_strdate(media.get('date'))
formats = []
for format_id in ['wmv', 'm3u8', 'mediaUri', 'h264']:
media_url = media.get(format_id)
if not media_url:
continue
formats.append({
'url': media_url,
'format_id': format_id,
'ext': 'mp4',
})
if self._downloader.params.get('listsubtitles', False):
page = self._download_webpage(url, video_id)
self._list_available_subtitles(video_id, page)
return
subtitles = {}
if self._have_to_download_any_subtitles:
page = self._download_webpage(url, video_id)
subtitles = self.extract_subtitles(video_id, page)
return {
'id': video_id,
'title': title,
'description': description,
'thumbnail': thumbnail,
'uploader': uploader,
'upload_date': upload_date,
'duration': duration,
'formats': formats,
'subtitles': subtitles,
}
def _get_available_subtitles(self, video_id, webpage):
subtitles = {}
m = re.search(r'<meta name="closedcaption" content="(?P<captions>[^"]+)"', webpage)
if m:
captions = m.group('captions')
STL_EXT = '.stl'
SRT_EXT = '.srt'
if captions.endswith(STL_EXT):
captions = captions[:-len(STL_EXT)] + SRT_EXT
subtitles['it'] = 'http://www.rai.tv%s' % compat_urllib_parse.quote(captions)
return subtitles
| gpl-3.0 | -159,452,521,588,304,320 | 36.564516 | 163 | 0.522542 | false |
jelugbo/tundex | common/lib/xmodule/xmodule/xml_module.py | 17 | 16439 | import json
import copy
import logging
import os
import sys
from lxml import etree
from xblock.fields import Dict, Scope, ScopeIds
from xmodule.x_module import XModuleDescriptor
from xmodule.modulestore.inheritance import own_metadata, InheritanceKeyValueStore
from xmodule.modulestore import EdxJSONEncoder
from xblock.runtime import KvsFieldData
log = logging.getLogger(__name__)
# assume all XML files are persisted as utf-8.
edx_xml_parser = etree.XMLParser(dtd_validation=False, load_dtd=False,
remove_comments=True, remove_blank_text=True,
encoding='utf-8')
def name_to_pathname(name):
"""
Convert a location name for use in a path: replace ':' with '/'.
This allows users of the xml format to organize content into directories
"""
return name.replace(':', '/')
def is_pointer_tag(xml_obj):
"""
Check if xml_obj is a pointer tag: <blah url_name="something" />.
No children, one attribute named url_name, no text.
Special case for course roots: the pointer is
<course url_name="something" org="myorg" course="course">
xml_obj: an etree Element
Returns a bool.
"""
if xml_obj.tag != "course":
expected_attr = set(['url_name'])
else:
expected_attr = set(['url_name', 'course', 'org'])
actual_attr = set(xml_obj.attrib.keys())
has_text = xml_obj.text is not None and len(xml_obj.text.strip()) > 0
return len(xml_obj) == 0 and actual_attr == expected_attr and not has_text
def get_metadata_from_xml(xml_object, remove=True):
meta = xml_object.find('meta')
if meta is None:
return ''
dmdata = meta.text
if remove:
xml_object.remove(meta)
return dmdata
def serialize_field(value):
"""
Return a string version of the value (where value is the JSON-formatted, internally stored value).
If the value is a string, then we simply return what was passed in.
Otherwise, we return json.dumps on the input value.
"""
if isinstance(value, basestring):
return value
return json.dumps(value, cls=EdxJSONEncoder)
def deserialize_field(field, value):
"""
Deserialize the string version to the value stored internally.
Note that this is not the same as the value returned by from_json, as model types typically store
their value internally as JSON. By default, this method will return the result of calling json.loads
on the supplied value, unless json.loads throws a TypeError, or the type of the value returned by json.loads
is not supported for this class (from_json throws an Error). In either of those cases, this method returns
the input value.
"""
try:
deserialized = json.loads(value)
if deserialized is None:
return deserialized
try:
field.from_json(deserialized)
return deserialized
except (ValueError, TypeError):
# Support older serialized version, which was just a string, not result of json.dumps.
# If the deserialized version cannot be converted to the type (via from_json),
# just return the original value. For example, if a string value of '3.4' was
# stored for a String field (before we started storing the result of json.dumps),
# then it would be deserialized as 3.4, but 3.4 is not supported for a String
# field. Therefore field.from_json(3.4) will throw an Error, and we should
# actually return the original value of '3.4'.
return value
except (ValueError, TypeError):
# Support older serialized version.
return value
class XmlDescriptor(XModuleDescriptor):
"""
Mixin class for standardized parsing of from xml
"""
xml_attributes = Dict(help="Map of unhandled xml attributes, used only for storage between import and export",
default={}, scope=Scope.settings)
# Extension to append to filename paths
filename_extension = 'xml'
# The attributes will be removed from the definition xml passed
# to definition_from_xml, and from the xml returned by definition_to_xml
# Note -- url_name isn't in this list because it's handled specially on
# import and export.
metadata_to_strip = ('data_dir',
'tabs', 'grading_policy',
'discussion_blackouts',
# VS[compat] -- remove the below attrs once everything is in the CMS
'course', 'org', 'url_name', 'filename',
# Used for storing xml attributes between import and export, for roundtrips
'xml_attributes')
metadata_to_export_to_policy = ('discussion_topics', 'checklists')
@classmethod
def definition_from_xml(cls, xml_object, system):
"""
Return the definition to be passed to the newly created descriptor
during from_xml
xml_object: An etree Element
"""
raise NotImplementedError("%s does not implement definition_from_xml" % cls.__name__)
@classmethod
def clean_metadata_from_xml(cls, xml_object):
"""
Remove any attribute named for a field with scope Scope.settings from the supplied
xml_object
"""
for field_name, field in cls.fields.items():
if field.scope == Scope.settings and xml_object.get(field_name) is not None:
del xml_object.attrib[field_name]
@classmethod
def file_to_xml(cls, file_object):
"""
Used when this module wants to parse a file object to xml
that will be converted to the definition.
Returns an lxml Element
"""
return etree.parse(file_object, parser=edx_xml_parser).getroot()
@classmethod
def load_file(cls, filepath, fs, def_id): # pylint: disable=invalid-name
'''
Open the specified file in fs, and call cls.file_to_xml on it,
returning the lxml object.
Add details and reraise on error.
'''
try:
with fs.open(filepath) as xml_file:
return cls.file_to_xml(xml_file)
except Exception as err:
# Add info about where we are, but keep the traceback
msg = 'Unable to load file contents at path %s for item %s: %s ' % (
filepath, def_id, err)
raise Exception, msg, sys.exc_info()[2]
@classmethod
def load_definition(cls, xml_object, system, def_id):
'''Load a descriptor definition from the specified xml_object.
Subclasses should not need to override this except in special
cases (e.g. html module)'''
# VS[compat] -- the filename attr should go away once everything is
# converted. (note: make sure html files still work once this goes away)
filename = xml_object.get('filename')
if filename is None:
definition_xml = copy.deepcopy(xml_object)
filepath = ''
else:
filepath = cls._format_filepath(xml_object.tag, filename)
# VS[compat]
# TODO (cpennington): If the file doesn't exist at the right path,
# give the class a chance to fix it up. The file will be written out
# again in the correct format. This should go away once the CMS is
# online and has imported all current (fall 2012) courses from xml
if not system.resources_fs.exists(filepath) and hasattr(cls, 'backcompat_paths'):
candidates = cls.backcompat_paths(filepath)
for candidate in candidates:
if system.resources_fs.exists(candidate):
filepath = candidate
break
definition_xml = cls.load_file(filepath, system.resources_fs, def_id)
# Add the attributes from the pointer node
definition_xml.attrib.update(xml_object.attrib)
definition_metadata = get_metadata_from_xml(definition_xml)
cls.clean_metadata_from_xml(definition_xml)
definition, children = cls.definition_from_xml(definition_xml, system)
if definition_metadata:
definition['definition_metadata'] = definition_metadata
definition['filename'] = [filepath, filename]
return definition, children
@classmethod
def load_metadata(cls, xml_object):
"""
Read the metadata attributes from this xml_object.
Returns a dictionary {key: value}.
"""
metadata = {'xml_attributes': {}}
for attr, val in xml_object.attrib.iteritems():
# VS[compat]. Remove after all key translations done
attr = cls._translate(attr)
if attr in cls.metadata_to_strip:
# don't load these
continue
if attr not in cls.fields:
metadata['xml_attributes'][attr] = val
else:
metadata[attr] = deserialize_field(cls.fields[attr], val)
return metadata
@classmethod
def apply_policy(cls, metadata, policy):
"""
Add the keys in policy to metadata, after processing them
through the attrmap. Updates the metadata dict in place.
"""
for attr, value in policy.iteritems():
attr = cls._translate(attr)
if attr not in cls.fields:
# Store unknown attributes coming from policy.json
# in such a way that they will export to xml unchanged
metadata['xml_attributes'][attr] = value
else:
metadata[attr] = value
@classmethod
def from_xml(cls, xml_data, system, id_generator):
"""
Creates an instance of this descriptor from the supplied xml_data.
This may be overridden by subclasses
xml_data: A string of xml that will be translated into data and children for
this module
system: A DescriptorSystem for interacting with external resources
"""
xml_object = etree.fromstring(xml_data)
# VS[compat] -- just have the url_name lookup, once translation is done
url_name = xml_object.get('url_name', xml_object.get('slug'))
def_id = id_generator.create_definition(xml_object.tag, url_name)
usage_id = id_generator.create_usage(def_id)
# VS[compat] -- detect new-style each-in-a-file mode
if is_pointer_tag(xml_object):
# new style:
# read the actual definition file--named using url_name.replace(':','/')
filepath = cls._format_filepath(xml_object.tag, name_to_pathname(url_name))
definition_xml = cls.load_file(filepath, system.resources_fs, def_id)
else:
definition_xml = xml_object
filepath = None
definition, children = cls.load_definition(definition_xml, system, def_id) # note this removes metadata
# VS[compat] -- make Ike's github preview links work in both old and
# new file layouts
if is_pointer_tag(xml_object):
# new style -- contents actually at filepath
definition['filename'] = [filepath, filepath]
metadata = cls.load_metadata(definition_xml)
# move definition metadata into dict
dmdata = definition.get('definition_metadata', '')
if dmdata:
metadata['definition_metadata_raw'] = dmdata
try:
metadata.update(json.loads(dmdata))
except Exception as err:
log.debug('Error in loading metadata %r', dmdata, exc_info=True)
metadata['definition_metadata_err'] = str(err)
# Set/override any metadata specified by policy
cls.apply_policy(metadata, system.get_policy(usage_id))
field_data = {}
field_data.update(metadata)
field_data.update(definition)
field_data['children'] = children
field_data['xml_attributes']['filename'] = definition.get('filename', ['', None]) # for git link
kvs = InheritanceKeyValueStore(initial_values=field_data)
field_data = KvsFieldData(kvs)
return system.construct_xblock_from_class(
cls,
# We're loading a descriptor, so student_id is meaningless
ScopeIds(None, xml_object.tag, def_id, usage_id),
field_data,
)
@classmethod
def _format_filepath(cls, category, name):
return u'{category}/{name}.{ext}'.format(category=category,
name=name,
ext=cls.filename_extension)
def export_to_file(self):
"""If this returns True, write the definition of this descriptor to a separate
file.
NOTE: Do not override this without a good reason. It is here
specifically for customtag...
"""
return True
def export_to_xml(self, resource_fs):
"""
Returns an xml string representing this module, and all modules
underneath it. May also write required resources out to resource_fs
Assumes that modules have single parentage (that no module appears twice
in the same course), and that it is thus safe to nest modules as xml
children as appropriate.
The returned XML should be able to be parsed back into an identical
XModuleDescriptor using the from_xml method with the same system, org,
and course
resource_fs is a pyfilesystem object (from the fs package)
"""
# Set up runtime.export_fs so that it's available through future
# uses of the pure xblock add_xml_to_node api
self.runtime.export_fs = resource_fs
# Get the definition
xml_object = self.definition_to_xml(resource_fs)
self.clean_metadata_from_xml(xml_object)
# Set the tag so we get the file path right
xml_object.tag = self.category
# Add the non-inherited metadata
for attr in sorted(own_metadata(self)):
# don't want e.g. data_dir
if attr not in self.metadata_to_strip and attr not in self.metadata_to_export_to_policy:
val = serialize_field(self._field_data.get(self, attr))
try:
xml_object.set(attr, val)
except Exception:
logging.exception(
u'Failed to serialize metadata attribute %s with value %s in module %s. This could mean data loss!!!',
attr, val, self.url_name
)
for key, value in self.xml_attributes.items():
if key not in self.metadata_to_strip:
xml_object.set(key, serialize_field(value))
if self.export_to_file():
# Write the definition to a file
url_path = name_to_pathname(self.url_name)
filepath = self._format_filepath(self.category, url_path)
resource_fs.makedir(os.path.dirname(filepath), recursive=True, allow_recreate=True)
with resource_fs.open(filepath, 'w') as fileobj:
fileobj.write(etree.tostring(xml_object, pretty_print=True, encoding='utf-8'))
# And return just a pointer with the category and filename.
record_object = etree.Element(self.category)
else:
record_object = xml_object
record_object.set('url_name', self.url_name)
# Special case for course pointers:
if self.category == 'course':
# add org and course attributes on the pointer tag
record_object.set('org', self.location.org)
record_object.set('course', self.location.course)
return etree.tostring(record_object, pretty_print=True, encoding='utf-8')
def definition_to_xml(self, resource_fs):
"""
Return a new etree Element object created from this modules definition.
"""
raise NotImplementedError(
"%s does not implement definition_to_xml" % self.__class__.__name__)
@property
def non_editable_metadata_fields(self):
non_editable_fields = super(XmlDescriptor, self).non_editable_metadata_fields
non_editable_fields.append(XmlDescriptor.xml_attributes)
return non_editable_fields
| agpl-3.0 | -1,596,718,683,780,248,800 | 38.140476 | 126 | 0.616948 | false |
talkincode/txweb | txweb/paginator.py | 10 | 2978 | #!/usr/bin/env python
#coding=utf-8
from __future__ import division
import math
class Paginator():
"""系统查询分页工具
"""
def __init__(self, url_func, page=1, total=0, page_size=20):
self.url_func = url_func
self.page = 1 if page < 1 else page
self.total = total
self.page_size = page_size
self.page_num = int(math.ceil(self.total / self.page_size)) if self.total > 0 else 0
self.page_bars = {}
self.data = ()
for _page in range(1, self.page_num + 1):
_index = int(_page / 10)
if not self.page_bars.has_key(_index):
self.page_bars[_index] = set([_page])
else:
self.page_bars[_index].add(_page)
def render(self, form_id=None):
'''
动态输出html内容
'''
page_bar = self.page_bars.get(int(self.page / 10))
if page_bar is None:
return ''
_htmls = []
if form_id:
_htmls.append(u'''<script>
function goto_page(form_id,page){
var form=document.getElementById(form_id);
var page_input = document.createElement("input");
page_input.type="hidden";
page_input.name="page";
page_input.value=page;
form.appendChild(page_input);
form.submit();
}</script>''')
_htmls.append('<ul class="pagination pull-right">')
_htmls.append(u'\t<li class="disabled"><a href="#">查询记录数 %s</a></li>' % self.total)
current_start = self.page
if current_start == 1:
_htmls.append(u'\t<li class="disabled"><a href="#">首页</a></li>')
_htmls.append(u'\t<li class="disabled"><a href="#">← 上一页</a></li>')
else:
_htmls.append(u'\t<li><a href="%s">首页</a></li>' % self.url_func(1,form_id))
_htmls.append(u'\t<li><a href="%s">← 上一页</a></li>' % self.url_func(current_start - 1,form_id))
for page in page_bar:
_page_url = self.url_func(page,form_id)
if page == self.page:
_htmls.append(u'\t<li class="active"><span>%s <span class="sr-only">{current}</span></span></li>' % page)
else:
_htmls.append(u'\t<li><a href="%s">%s</a></li>' % (_page_url, page))
current_end = self.page
if current_end == self.page_num:
_htmls.append(u'\t<li class="disabled"><a href="#">下一页 →</a></li>')
_htmls.append(u'\t<li class="disabled"><a href="#">尾页</a></li>')
else:
_htmls.append(u'\t<li><a href="%s">下一页 →</a></li>' % self.url_func(current_end + 1,form_id))
_htmls.append(u'\t<li><a href="%s">尾页</a></li>' % self.url_func(self.page_num,form_id))
_htmls.append('</ul>')
return '\r\n'.join(_htmls) | mit | -8,691,426,116,596,310,000 | 37.68 | 121 | 0.502414 | false |
jordanemedlock/psychtruths | temboo/core/Library/Google/Drive/Parents/Insert.py | 5 | 4897 | # -*- coding: utf-8 -*-
###############################################################################
#
# Insert
# Adds a parent folder for a file.
#
# Python versions 2.6, 2.7, 3.x
#
# Copyright 2014, Temboo Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND,
# either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
#
#
###############################################################################
from temboo.core.choreography import Choreography
from temboo.core.choreography import InputSet
from temboo.core.choreography import ResultSet
from temboo.core.choreography import ChoreographyExecution
import json
class Insert(Choreography):
def __init__(self, temboo_session):
"""
Create a new instance of the Insert Choreo. A TembooSession object, containing a valid
set of Temboo credentials, must be supplied.
"""
super(Insert, self).__init__(temboo_session, '/Library/Google/Drive/Parents/Insert')
def new_input_set(self):
return InsertInputSet()
def _make_result_set(self, result, path):
return InsertResultSet(result, path)
def _make_execution(self, session, exec_id, path):
return InsertChoreographyExecution(session, exec_id, path)
class InsertInputSet(InputSet):
"""
An InputSet with methods appropriate for specifying the inputs to the Insert
Choreo. The InputSet object is used to specify input parameters when executing this Choreo.
"""
def set_AccessToken(self, value):
"""
Set the value of the AccessToken input for this Choreo. ((optional, string) A valid access token retrieved during the OAuth2 process. This is required unless you provide the ClientID, ClientSecret, and RefreshToken to generate a new access token.)
"""
super(InsertInputSet, self)._set_input('AccessToken', value)
def set_ClientID(self, value):
"""
Set the value of the ClientID input for this Choreo. ((conditional, string) The Client ID provided by Google. Required unless providing a valid AccessToken.)
"""
super(InsertInputSet, self)._set_input('ClientID', value)
def set_ClientSecret(self, value):
"""
Set the value of the ClientSecret input for this Choreo. ((conditional, string) The Client Secret provided by Google. Required unless providing a valid AccessToken.)
"""
super(InsertInputSet, self)._set_input('ClientSecret', value)
def set_Fields(self, value):
"""
Set the value of the Fields input for this Choreo. ((optional, string) Selector specifying a subset of fields to include in the response.)
"""
super(InsertInputSet, self)._set_input('Fields', value)
def set_FileID(self, value):
"""
Set the value of the FileID input for this Choreo. ((required, string) The ID of the file.)
"""
super(InsertInputSet, self)._set_input('FileID', value)
def set_ParentID(self, value):
"""
Set the value of the ParentID input for this Choreo. ((required, string) The ID of the parent.)
"""
super(InsertInputSet, self)._set_input('ParentID', value)
def set_RefreshToken(self, value):
"""
Set the value of the RefreshToken input for this Choreo. ((conditional, string) An OAuth refresh token used to generate a new access token when the original token is expired. Required unless providing a valid AccessToken.)
"""
super(InsertInputSet, self)._set_input('RefreshToken', value)
class InsertResultSet(ResultSet):
"""
A ResultSet with methods tailored to the values returned by the Insert Choreo.
The ResultSet object is used to retrieve the results of a Choreo execution.
"""
def getJSONFromString(self, str):
return json.loads(str)
def get_Response(self):
"""
Retrieve the value for the "Response" output from this Choreo execution. ((json) The response from Google.)
"""
return self._output.get('Response', None)
def get_NewAccessToken(self):
"""
Retrieve the value for the "NewAccessToken" output from this Choreo execution. ((string) Contains a new AccessToken when the RefreshToken is provided.)
"""
return self._output.get('NewAccessToken', None)
class InsertChoreographyExecution(ChoreographyExecution):
def _make_result_set(self, response, path):
return InsertResultSet(response, path)
| apache-2.0 | -6,064,132,987,262,936,000 | 40.854701 | 255 | 0.666939 | false |
noba3/KoTos | addons/plugin.video.mediathek/mediathek/factory.py | 1 | 1883 | # -*- coding: utf-8 -*-
#-------------LicenseHeader--------------
# plugin.video.Mediathek - Gives access to most video-platforms from German public service broadcasters
# Copyright (C) 2010 Raptor 2101 [[email protected]]
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from mediathek.wdr import *
from mediathek.ard import *
from mediathek.zdf import *
from mediathek.arte import *
from mediathek.dreisat import *
from mediathek.bayerisches import *
from mediathek.orf import *
from mediathek.bralpha import *
from mediathek.ndr import *
from mediathek.kika import *
class MediathekFactory(object):
def __init__(self):
self.avaibleMediathekes = {
ARDMediathek.name():ARDMediathek,
ZDFMediathek.name():ZDFMediathek,
ARTEMediathek.name():ARTEMediathek,
DreiSatMediathek.name():DreiSatMediathek,
#BayerischesFSMediathek.name():BayerischesFSMediathek,
ORFMediathek.name():ORFMediathek,
#WDRMediathek.name():WDRMediathek,
#BRAlphaMediathek.name():BRAlphaMediathek,
NDRMediathek.name():NDRMediathek,
KIKA.name():KIKA
}
def getAvaibleMediathekTypes(self):
return sorted(self.avaibleMediathekes.keys())
def getMediathek(self,mediathekName, gui):
return self.avaibleMediathekes[mediathekName](gui);
| gpl-2.0 | -2,008,556,173,750,065,000 | 38.229167 | 103 | 0.733935 | false |
seomoz/roger-mesos-tools | cli/roger_promote.py | 1 | 10706 | #!/usr/bin/env python
# -*- encoding: utf-8 -*-
"""
Provides a class and script. See bin/roger.py for a reference of how the script
is called.
Promote moves containers between dev, stage, prod environments
:Classes:
RogerPromote: Provides application promotion between environments for the
supported Mesos frameworks.
:Exceptions:
RogerPromoteError: Generic exception for errors in RogerPromote
"""
# ~
# stdlib
import argparse
import tempfile
import os
import os.path
import shutil
import sys
import subprocess
# ~
# core
from cli.settings import Settings
from cli.appconfig import AppConfig
from cli.frameworkUtils import FrameworkUtils
from cli.marathon import Marathon
from cli.chronos import Chronos
def describe():
return 'Enables application promotion between environments'
class RogerPromote(object):
"""
Enable application promotion between environments
:Params:
:app_config [cli.appconfig.AppConfig]: Default: cli.appconfig.AppConfig
:settings [cli.settings.Settings]: Default: cli.settings.Settings
:framework_utils [cli.framework_utils.FrameworkUtils]:
Default: cli.framework_utils.FrameworkUtils
:framework [cli.framework.Framework]: Subclass of Framework
Default: cli.marathon.Marathon
"""
def __init__(
self,
app_config=AppConfig(),
settings=Settings(),
framework_utils=FrameworkUtils(),
framework=Marathon()
):
self._app_config = app_config
self._settings = settings
self._framework_utils = framework_utils
self._framework = framework
self._config_dir = None
self._roger_env = None
self._temp_dir = None
@classmethod
def promote(cls, instance=None):
"""
:Params:
:instance [cli.roger_promote.RogerPromote]: Avalailable for test
:Raises:
:cli.roger_utils.RogerPromoteError:
:Return [bool]: True if successful, False if otherwise
"""
# Get instance
if instance:
rp = instance
else:
rp = cls()
# Get Namespace obj
args = rp.arg_parse().parse_args()
# Set framework based on app config
rp._set_framework(args.config, args.app_name)
# Get repo name
repo = rp._config_resolver('repo', args.app_name, args.config)
if not repo:
raise RogerPromoteError('Repo not found')
# Clone the repo
rp._clone_repo(repo)
# Locate roger_push.py
roger_push = rp._roger_push_script()
app_data = rp._app_config.getAppData(
rp.config_dir,
args.config,
args.app_name
)
container_names= []
for container in app_data['containers']:
if isinstance(container, dict):
# this indicates that there's a config for individual container
container_names.extend(container.keys())
else:
container_names.append(container)
print("Containers being promoted: {}".format(container_names))
failed_images = []
for container in container_names:
template_path = rp._get_template_path(
container,
rp.config_dir,
args,
args.app_name
)
full_image_name = rp._image_name(
args.from_env,
args.config,
template_path
)
if "/" in full_image_name:
image_name = full_image_name.split('/', 1)[1]
else:
image_name = full_image_name
# Execute the script
cmd = [
roger_push,
'--env', args.to_env,
args.app_name, rp._temp_dir, image_name, args.config
]
string_cmd = ' '.join(cmd)
ret_val = None
x = 0
while x < 3:
x += 1
ret_val = os.system(string_cmd)
if ret_val == 0:
print("Container - {}, image - {}, promoted from - {} to - {}".
format(container, image_name, args.from_env, args.to_env))
break
if ret_val != 0:
print("Failed to deploy Container - {}, with Image - {image} to {env}".
format(container,image_name, args.to))
failed_images.append(image_name)
# CleanUp
shutil.rmtree(rp._temp_dir)
if len(failed_images) > 0:
print("Images that failed")
for failed_image in failed_images:
print(failed_image)
return False
return True
def arg_parse(self):
"""
Returns a argparse.NameSpace instance holding argument data
"""
parser = argparse.ArgumentParser(
prog='roger promote',
description=describe()
)
env_choices = ['local', 'dev', 'stage', 'prod']
parser.add_argument(
'from_env', choices=env_choices, help='The source environment'
)
parser.add_argument(
'to_env', choices=env_choices, help='The destination environment'
)
parser.add_argument('app_name', help='The name of the application')
parser.add_argument('config', help='The name of the config file')
return parser
@property
def framework(self):
return self._framework
@property
def config_dir(self):
"""
Property that returns the config dir, typically /vagrant/config
:Return [str]: Path to the configuration directory
"""
if self._config_dir is None:
self._config_dir = self._settings.getConfigDir()
return self._config_dir
@property
def roger_env(self):
"""
Property that returns the dict loaded from
/vagrant/config/roger-mesos-tools.config
:Return [dict]: roger-mesos-tools.config loaded into a dict
"""
if self._roger_env is None:
self._rover_env = self._app_config.getRogerEnv(self.config_dir)
return self._rover_env
def _set_framework(self, config_file, app_name):
"""
Set the _framework instance variable based on the application config
:Params:
: [str]: Name of the configuration file
:app_name [str]: Name of the application
"""
app_data = self._app_config.getAppData(
self.config_dir, config_file, app_name
)
self._framework = self._framework_utils.getFramework(app_data)
def _image_name(self, environment, config_file, template_file):
"""
Returns the image name as a str
:Params:
:environment [str]: Environment as found in roger-mesos-tools.config
:config_file [dict]: Data loaded from roger-mesos-tools.config
:template_file [str]: file that contains a template
:Return [str]: image name with version
"""
username = os.environ['ROGER_USER']
if environment == 'dev':
password = os.environ['ROGER_USER_PASS_DEV']
elif environment == 'stage':
password = os.environ['ROGER_USER_PASS_STAGE']
elif environment == 'prod':
password = os.environ['ROGER_USER_PASS_PROD']
app_id = self._framework.get_app_id(template_file, self._framework.getName())
image = self._framework.get_image_name(
username,
password,
environment,
app_id,
self.config_dir,
config_file
)
return image
def _config_resolver(self, key, application, config_file):
"""
Returns the value for the desired key within the application's
configuration.
:Params:
:key [str]: The key containing the desired value we wish to return
:application [str]: The application being promoted
:config_file [str] path to the yml or json file, typically found under
/vagrant/config/
:Return [str]: Returns string if found, otherwise None
"""
config_data = self._app_config.getConfig(self.config_dir, config_file)
found = None
if key in config_data:
found = config_data[key]
for name, data in config_data['apps'].items():
if name == application:
if key in data:
found = data[key]
break
return found
def _clone_repo(self, repo):
"""
Clone the repo
:Params:
:repo [str] The name of the repo
:Raises:
:subprocess.CalledProcessError:
:Return: None
"""
repo_url = self._app_config.getRepoUrl(repo)
self._temp_dir = tempfile.mkdtemp()
subprocess.check_call(['git', 'clone', repo_url], cwd=self._temp_dir)
def _roger_push_script(self):
"""
Returns path [str] to the roger_push.py executable
:Return [str]: Path to the script
"""
code_dir = os.path.abspath(os.path.dirname(__file__))
return os.path.join(code_dir, 'roger_push.py')
def _get_template_path(
self,
container_name,
config_dir,
args,
app_name,
app_object=AppConfig(),
settings_object=Settings()
):
"""
Returns the template path for a given container_name
Each framework requires an template_path for the app_id method
:Params:
:config_dir [str]: path to the config directory
:args [argparse.NameSpace]: how to get acceses to the values passed
:app_name [str]: name of app
:app_object [cli.appconfig.AppConfig]: instance of AppConfig
:settings_object [cli.settings.Settings]: instance of Settings
"""
data = app_object.getConfig(config_dir, args.config)
repo = self._config_resolver('repo', app_name, args.config)
template_path = self._config_resolver(
'template_path', app_name, args.config)
# this path is always relative to the root repo dir, so join
if template_path and not os.path.isabs(template_path):
app_path = os.path.join(self._temp_dir, repo, template_path)
else:
app_path = settings_object.getTemplatesDir()
file_name = "{0}-{1}.json".format(
data['name'], container_name)
return os.path.join(app_path, file_name)
if __name__ == '__main__':
if not RogerPromote.promote():
sys.exit(1)
| apache-2.0 | -7,767,978,578,044,005,000 | 28.905028 | 87 | 0.572109 | false |
rudaoshi/neural_machine | neural_machine/tasks/language/common/corpus/sequence_pair_corpus.py | 3 | 3399 | __author__ = 'Sun'
import codecs
import numpy as np
import json, logging
import itertools
from collections import Counter
from neural_machine.tasks.language.common.corpus.sequence_corpus import SequenceCorpus
from utils.data_process import to_time_distributed_categorical
class SequencePairCorpus(object):
def __init__(self,
source_with_start=False, source_with_end = False, source_with_unk = False,
target_with_start=False, target_with_end=False, target_with_unk=False,
same_length = False
):
self.source_with_start = source_with_start
self.source_with_end = source_with_end
self.source_with_unk = source_with_unk
self.target_with_start = target_with_start
self.target_with_end = target_with_end
self.target_with_unk = target_with_unk
self.source_corpus = SequenceCorpus(source_with_start, source_with_end, source_with_unk)
self.target_corpus = SequenceCorpus(target_with_start, target_with_end, target_with_unk)
self.same_length = same_length
self.corpus = []
def build(self, data_file, source_segmenter, target_segmenter):
for line in data_file:
line = line.strip()
if not line:
continue
try:
src_seq, tgt_seq = line.split('\t')
except:
logging.error("no sequence pair found in sentence : {0} ".format(json.dumps(line)))
continue
if self.same_length and len(src_seq) != len(tgt_seq):
logging.error("src and tgt seq not in same length {0} {1} {2}".format(len(src_seq), len(tgt_seq), json.dumps(line)))
continue
src=self.source_corpus.update(src_seq, source_segmenter)
target=self.target_corpus.update(tgt_seq, target_segmenter)
self.corpus.append((src, target))
def make(self, data_file, source_segmenter, target_segmenter):
corpus = SequencePairCorpus(
self.source_with_start, self.source_with_end , self.source_with_unk ,
self.target_with_start, self.target_with_end, self.target_with_unk,
self.same_length
)
corpus.source_corpus = self.source_corpus.clone()
corpus.target_corpus = self.target_corpus.clone()
for line in data_file:
line = line.strip()
if not line:
continue
try:
src_seq, tgt_seq = line.split('\t')
except:
logging.error("no sequence pair found in sentence : {0} ".format(json.dumps(line)))
continue
if self.same_length and len(src_seq) != len(tgt_seq):
logging.error("src and tgt seq not in same length {0} {1} {2}".format(len(src_seq), len(tgt_seq), json.dumps(line)))
continue
src = self.source_corpus.predict(src_seq, source_segmenter)
target = self.target_corpus.predict(tgt_seq, target_segmenter)
corpus.corpus.append((src, target))
return corpus
def source_cell_num(self):
return self.source_corpus.cell_num()
def target_cell_num(self):
return self.target_corpus.cell_num()
def corpus_size(self):
return len(self.corpus)
| gpl-3.0 | -7,165,570,883,764,807,000 | 31.371429 | 132 | 0.59135 | false |
hfp/tensorflow-xsmm | tensorflow/contrib/bayesflow/python/ops/monte_carlo_impl.py | 15 | 14351 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Monte Carlo integration and helpers.
@@expectation
@@expectation_importance_sampler
@@expectation_importance_sampler_logspace
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import nn
from tensorflow.python.util import deprecation
__all__ = [
'expectation',
'expectation_importance_sampler',
'expectation_importance_sampler_logspace',
]
def expectation_importance_sampler(f,
log_p,
sampling_dist_q,
z=None,
n=None,
seed=None,
name='expectation_importance_sampler'):
r"""Monte Carlo estimate of \\(E_p[f(Z)] = E_q[f(Z) p(Z) / q(Z)]\\).
With \\(p(z) := exp^{log_p(z)}\\), this `Op` returns
\\(n^{-1} sum_{i=1}^n [ f(z_i) p(z_i) / q(z_i) ], z_i ~ q,\\)
\\(\approx E_q[ f(Z) p(Z) / q(Z) ]\\)
\\(= E_p[f(Z)]\\)
This integral is done in log-space with max-subtraction to better handle the
often extreme values that `f(z) p(z) / q(z)` can take on.
If `f >= 0`, it is up to 2x more efficient to exponentiate the result of
`expectation_importance_sampler_logspace` applied to `Log[f]`.
User supplies either `Tensor` of samples `z`, or number of samples to draw `n`
Args:
f: Callable mapping samples from `sampling_dist_q` to `Tensors` with shape
broadcastable to `q.batch_shape`.
For example, `f` works "just like" `q.log_prob`.
log_p: Callable mapping samples from `sampling_dist_q` to `Tensors` with
shape broadcastable to `q.batch_shape`.
For example, `log_p` works "just like" `sampling_dist_q.log_prob`.
sampling_dist_q: The sampling distribution.
`tfp.distributions.Distribution`.
`float64` `dtype` recommended.
`log_p` and `q` should be supported on the same set.
z: `Tensor` of samples from `q`, produced by `q.sample` for some `n`.
n: Integer `Tensor`. Number of samples to generate if `z` is not provided.
seed: Python integer to seed the random number generator.
name: A name to give this `Op`.
Returns:
The importance sampling estimate. `Tensor` with `shape` equal
to batch shape of `q`, and `dtype` = `q.dtype`.
"""
q = sampling_dist_q
with ops.name_scope(name, values=[z, n]):
z = _get_samples(q, z, n, seed)
log_p_z = log_p(z)
q_log_prob_z = q.log_prob(z)
def _importance_sampler_positive_f(log_f_z):
# Same as expectation_importance_sampler_logspace, but using Tensors
# rather than samples and functions. Allows us to sample once.
log_values = log_f_z + log_p_z - q_log_prob_z
return _logspace_mean(log_values)
# With \\(f_{plus}(z) = max(0, f(z)), f_{minus}(z) = max(0, -f(z))\\),
# \\(E_p[f(Z)] = E_p[f_{plus}(Z)] - E_p[f_{minus}(Z)]\\)
# \\( = E_p[f_{plus}(Z) + 1] - E_p[f_{minus}(Z) + 1]\\)
# Without incurring bias, 1 is added to each to prevent zeros in logspace.
# The logarithm is approximately linear around 1 + epsilon, so this is good
# for small values of 'z' as well.
f_z = f(z)
log_f_plus_z = math_ops.log(nn.relu(f_z) + 1.)
log_f_minus_z = math_ops.log(nn.relu(-1. * f_z) + 1.)
log_f_plus_integral = _importance_sampler_positive_f(log_f_plus_z)
log_f_minus_integral = _importance_sampler_positive_f(log_f_minus_z)
return math_ops.exp(log_f_plus_integral) - math_ops.exp(log_f_minus_integral)
def expectation_importance_sampler_logspace(
log_f,
log_p,
sampling_dist_q,
z=None,
n=None,
seed=None,
name='expectation_importance_sampler_logspace'):
r"""Importance sampling with a positive function, in log-space.
With \\(p(z) := exp^{log_p(z)}\\), and \\(f(z) = exp{log_f(z)}\\),
this `Op` returns
\\(Log[ n^{-1} sum_{i=1}^n [ f(z_i) p(z_i) / q(z_i) ] ], z_i ~ q,\\)
\\(\approx Log[ E_q[ f(Z) p(Z) / q(Z) ] ]\\)
\\(= Log[E_p[f(Z)]]\\)
This integral is done in log-space with max-subtraction to better handle the
often extreme values that `f(z) p(z) / q(z)` can take on.
In contrast to `expectation_importance_sampler`, this `Op` returns values in
log-space.
User supplies either `Tensor` of samples `z`, or number of samples to draw `n`
Args:
log_f: Callable mapping samples from `sampling_dist_q` to `Tensors` with
shape broadcastable to `q.batch_shape`.
For example, `log_f` works "just like" `sampling_dist_q.log_prob`.
log_p: Callable mapping samples from `sampling_dist_q` to `Tensors` with
shape broadcastable to `q.batch_shape`.
For example, `log_p` works "just like" `q.log_prob`.
sampling_dist_q: The sampling distribution.
`tfp.distributions.Distribution`.
`float64` `dtype` recommended.
`log_p` and `q` should be supported on the same set.
z: `Tensor` of samples from `q`, produced by `q.sample` for some `n`.
n: Integer `Tensor`. Number of samples to generate if `z` is not provided.
seed: Python integer to seed the random number generator.
name: A name to give this `Op`.
Returns:
Logarithm of the importance sampling estimate. `Tensor` with `shape` equal
to batch shape of `q`, and `dtype` = `q.dtype`.
"""
q = sampling_dist_q
with ops.name_scope(name, values=[z, n]):
z = _get_samples(q, z, n, seed)
log_values = log_f(z) + log_p(z) - q.log_prob(z)
return _logspace_mean(log_values)
def _logspace_mean(log_values):
"""Evaluate `Log[E[values]]` in a stable manner.
Args:
log_values: `Tensor` holding `Log[values]`.
Returns:
`Tensor` of same `dtype` as `log_values`, reduced across dim 0.
`Log[Mean[values]]`.
"""
# center = Max[Log[values]], with stop-gradient
# The center hopefully keep the exponentiated term small. It is canceled
# from the final result, so putting stop gradient on it will not change the
# final result. We put stop gradient on to eliminate unnecessary computation.
center = array_ops.stop_gradient(_sample_max(log_values))
# centered_values = exp{Log[values] - E[Log[values]]}
centered_values = math_ops.exp(log_values - center)
# log_mean_of_values = Log[ E[centered_values] ] + center
# = Log[ E[exp{log_values - E[log_values]}] ] + center
# = Log[E[values]] - E[log_values] + center
# = Log[E[values]]
log_mean_of_values = math_ops.log(_sample_mean(centered_values)) + center
return log_mean_of_values
@deprecation.deprecated(
'2018-10-01',
'The tf.contrib.bayesflow library has moved to '
'TensorFlow Probability (https://github.com/tensorflow/probability). '
'Use `tfp.monte_carlo.expectation` instead.',
warn_once=True)
def expectation(f, samples, log_prob=None, use_reparametrization=True,
axis=0, keep_dims=False, name=None):
r"""Computes the Monte-Carlo approximation of \\(E_p[f(X)]\\).
This function computes the Monte-Carlo approximation of an expectation, i.e.,
\\(E_p[f(X)] \approx= m^{-1} sum_i^m f(x_j), x_j\ ~iid\ p(X)\\)
where:
- `x_j = samples[j, ...]`,
- `log(p(samples)) = log_prob(samples)` and
- `m = prod(shape(samples)[axis])`.
Tricks: Reparameterization and Score-Gradient
When p is "reparameterized", i.e., a diffeomorphic transformation of a
parameterless distribution (e.g.,
`Normal(Y; m, s) <=> Y = sX + m, X ~ Normal(0,1)`), we can swap gradient and
expectation, i.e.,
grad[ Avg{ \\(s_i : i=1...n\\) } ] = Avg{ grad[\\(s_i\\)] : i=1...n } where
S_n = Avg{\\(s_i\\)}` and `\\(s_i = f(x_i), x_i ~ p\\).
However, if p is not reparameterized, TensorFlow's gradient will be incorrect
since the chain-rule stops at samples of non-reparameterized distributions.
(The non-differentiated result, `approx_expectation`, is the same regardless
of `use_reparametrization`.) In this circumstance using the Score-Gradient
trick results in an unbiased gradient, i.e.,
```none
grad[ E_p[f(X)] ]
= grad[ int dx p(x) f(x) ]
= int dx grad[ p(x) f(x) ]
= int dx [ p'(x) f(x) + p(x) f'(x) ]
= int dx p(x) [p'(x) / p(x) f(x) + f'(x) ]
= int dx p(x) grad[ f(x) p(x) / stop_grad[p(x)] ]
= E_p[ grad[ f(x) p(x) / stop_grad[p(x)] ] ]
```
Unless p is not reparametrized, it is usually preferable to
`use_reparametrization = True`.
Warning: users are responsible for verifying `p` is a "reparameterized"
distribution.
Example Use:
```python
import tensorflow_probability as tfp
tfd = tfp.distributions
# Monte-Carlo approximation of a reparameterized distribution, e.g., Normal.
num_draws = int(1e5)
p = tfd.Normal(loc=0., scale=1.)
q = tfd.Normal(loc=1., scale=2.)
exact_kl_normal_normal = tfd.kl_divergence(p, q)
# ==> 0.44314718
approx_kl_normal_normal = tfp.monte_carlo.expectation(
f=lambda x: p.log_prob(x) - q.log_prob(x),
samples=p.sample(num_draws, seed=42),
log_prob=p.log_prob,
use_reparametrization=(p.reparameterization_type
== distribution.FULLY_REPARAMETERIZED))
# ==> 0.44632751
# Relative Error: <1%
# Monte-Carlo approximation of non-reparameterized distribution, e.g., Gamma.
num_draws = int(1e5)
p = ds.Gamma(concentration=1., rate=1.)
q = ds.Gamma(concentration=2., rate=3.)
exact_kl_gamma_gamma = tfd.kl_divergence(p, q)
# ==> 0.37999129
approx_kl_gamma_gamma = tfp.monte_carlo.expectation(
f=lambda x: p.log_prob(x) - q.log_prob(x),
samples=p.sample(num_draws, seed=42),
log_prob=p.log_prob,
use_reparametrization=(p.reparameterization_type
== distribution.FULLY_REPARAMETERIZED))
# ==> 0.37696719
# Relative Error: <1%
# For comparing the gradients, see `monte_carlo_test.py`.
```
Note: The above example is for illustration only. To compute approximate
KL-divergence, the following is preferred:
```python
approx_kl_p_q = tfp.vi.monte_carlo_csiszar_f_divergence(
f=bf.kl_reverse,
p_log_prob=q.log_prob,
q=p,
num_draws=num_draws)
```
Args:
f: Python callable which can return `f(samples)`.
samples: `Tensor` of samples used to form the Monte-Carlo approximation of
\\(E_p[f(X)]\\). A batch of samples should be indexed by `axis`
dimensions.
log_prob: Python callable which can return `log_prob(samples)`. Must
correspond to the natural-logarithm of the pdf/pmf of each sample. Only
required/used if `use_reparametrization=False`.
Default value: `None`.
use_reparametrization: Python `bool` indicating that the approximation
should use the fact that the gradient of samples is unbiased. Whether
`True` or `False`, this arg only affects the gradient of the resulting
`approx_expectation`.
Default value: `True`.
axis: The dimensions to average. If `None`, averages all
dimensions.
Default value: `0` (the left-most dimension).
keep_dims: If True, retains averaged dimensions using size `1`.
Default value: `False`.
name: A `name_scope` for operations created by this function.
Default value: `None` (which implies "expectation").
Returns:
approx_expectation: `Tensor` corresponding to the Monte-Carlo approximation
of \\(E_p[f(X)]\\).
Raises:
ValueError: if `f` is not a Python `callable`.
ValueError: if `use_reparametrization=False` and `log_prob` is not a Python
`callable`.
"""
with ops.name_scope(name, 'expectation', [samples]):
if not callable(f):
raise ValueError('`f` must be a callable function.')
if use_reparametrization:
return math_ops.reduce_mean(f(samples), axis=axis, keepdims=keep_dims)
else:
if not callable(log_prob):
raise ValueError('`log_prob` must be a callable function.')
stop = array_ops.stop_gradient # For readability.
x = stop(samples)
logpx = log_prob(x)
fx = f(x) # Call `f` once in case it has side-effects.
# We now rewrite f(x) so that:
# `grad[f(x)] := grad[f(x)] + f(x) * grad[logqx]`.
# To achieve this, we use a trick that
# `h(x) - stop(h(x)) == zeros_like(h(x))`
# but its gradient is grad[h(x)].
# Note that IEEE754 specifies that `x - x == 0.` and `x + 0. == x`, hence
# this trick loses no precision. For more discussion regarding the
# relevant portions of the IEEE754 standard, see the StackOverflow
# question,
# "Is there a floating point value of x, for which x-x == 0 is false?"
# http://stackoverflow.com/q/2686644
fx += stop(fx) * (logpx - stop(logpx)) # Add zeros_like(logpx).
return math_ops.reduce_mean(fx, axis=axis, keepdims=keep_dims)
def _sample_mean(values):
"""Mean over sample indices. In this module this is always [0]."""
return math_ops.reduce_mean(values, axis=[0])
def _sample_max(values):
"""Max over sample indices. In this module this is always [0]."""
return math_ops.reduce_max(values, axis=[0])
def _get_samples(dist, z, n, seed):
"""Check args and return samples."""
with ops.name_scope('get_samples', values=[z, n]):
if (n is None) == (z is None):
raise ValueError(
'Must specify exactly one of arguments "n" and "z". Found: '
'n = %s, z = %s' % (n, z))
if n is not None:
return dist.sample(n, seed=seed)
else:
return ops.convert_to_tensor(z, name='z')
| apache-2.0 | -7,686,672,466,956,218,000 | 37.371658 | 80 | 0.630061 | false |
Shekharrajak/pydy | pydy/codegen/tests/test_cython_code.py | 4 | 5401 | #!/usr/bin/env python
import os
import numpy as np
import sympy as sm
from ...models import multi_mass_spring_damper
from ..cython_code import CythonMatrixGenerator
class TestCythonMatrixGenerator(object):
def setup(self):
self.prefix = 'boogly_bee'
sys = multi_mass_spring_damper(6, True, True)
self.matrices = (sys.eom_method.mass_matrix,
sys.eom_method.forcing)
self.arguments = (sys.constants_symbols,
sys.coordinates,
sys.speeds,
sys.specifieds_symbols)
self.generator = CythonMatrixGenerator(self.arguments,
self.matrices, self.prefix)
def test_generate_code_blocks(self):
expected = {}
expected['header_args'] = \
"""\
double* input_0,
double* input_1,
double* input_2,
double* input_3,
double* output_0,
double* output_1\
"""
expected['python_args'] = \
"""\
np.ndarray[np.double_t, ndim=1, mode='c'] input_0,
np.ndarray[np.double_t, ndim=1, mode='c'] input_1,
np.ndarray[np.double_t, ndim=1, mode='c'] input_2,
np.ndarray[np.double_t, ndim=1, mode='c'] input_3,
np.ndarray[np.double_t, ndim=1, mode='c'] output_0,
np.ndarray[np.double_t, ndim=1, mode='c'] output_1\
"""
expected['c_args'] = \
"""\
<double*> input_0.data,
<double*> input_1.data,
<double*> input_2.data,
<double*> input_3.data,
<double*> output_0.data,
<double*> output_1.data\
"""
expected['output'] = \
"""\
output_0.reshape(6, 6),
output_1\
"""
self.generator._generate_code_blocks()
for k, v in self.generator.code_blocks.items():
assert v == expected[k]
def test_doprint(self):
expected_pyx_source = \
"""\
import numpy as np
cimport numpy as np
cimport cython
cdef extern from "boogly_bee_c.h":
void evaluate(
double* input_0,
double* input_1,
double* input_2,
double* input_3,
double* output_0,
double* output_1
)
@cython.boundscheck(False)
@cython.wraparound(False)
def eval(
np.ndarray[np.double_t, ndim=1, mode='c'] input_0,
np.ndarray[np.double_t, ndim=1, mode='c'] input_1,
np.ndarray[np.double_t, ndim=1, mode='c'] input_2,
np.ndarray[np.double_t, ndim=1, mode='c'] input_3,
np.ndarray[np.double_t, ndim=1, mode='c'] output_0,
np.ndarray[np.double_t, ndim=1, mode='c'] output_1
):
evaluate(
<double*> input_0.data,
<double*> input_1.data,
<double*> input_2.data,
<double*> input_3.data,
<double*> output_0.data,
<double*> output_1.data
)
return (
output_0.reshape(6, 6),
output_1
)\
"""
expected_setup_py_source = """\
#!/usr/bin/env python
from distutils.core import setup
from distutils.extension import Extension
from Cython.Build import cythonize
import numpy
extension = Extension(name="boogly_bee",
sources=["boogly_bee.pyx",
"boogly_bee_c.c"],
include_dirs=[numpy.get_include()])
setup(name="boogly_bee",
ext_modules=cythonize([extension]))\
"""
setup, pyx, c_header, c_source = self.generator.doprint()
assert setup == expected_setup_py_source
assert pyx == expected_pyx_source
def test_write(self):
setup, pyx, c_header, c_source = self.generator.doprint()
self.generator.write()
with open(self.prefix + '_c.h') as f:
assert f.read() == c_header
with open(self.prefix + '_c.c') as f:
assert f.read() == c_source
with open(self.prefix + '_setup.py') as f:
assert f.read() == setup
with open(self.prefix + '.pyx') as f:
assert f.read() == pyx
def test_compile(self):
f = self.generator.compile()
subs = {}
args = []
for argset in self.arguments:
vals = np.random.random(len(argset))
args.append(vals)
for arg, val in zip(argset, vals):
subs[arg] = val
for matrix in self.matrices:
nr, nc = matrix.shape
args.append(np.empty(nr * nc, dtype=float))
for output, expected in zip(f(*args), self.matrices):
try:
expected = sm.matrix2numpy(expected.subs(subs),
dtype=float).squeeze()
except TypeError:
# dtype kwarg in not supported in earlier SymPy versions
expected = np.asarray(sm.matrix2numpy(expected.subs(subs)),
dtype=float).squeeze()
np.testing.assert_allclose(output, expected)
def teardown(self):
for suffix in ['_c.h', '_c.c', '_setup.py', '.pyx']:
filename = self.prefix + suffix
if os.path.isfile(filename):
os.remove(filename)
| bsd-3-clause | -6,750,143,707,180,863,000 | 26.840206 | 75 | 0.515275 | false |
johnmeade/helpy | curried.py | 1 | 3257 | 'Curried helpers for cleaner funcitonal style programming'
import sys
import helpy.functional as fnc
from functools import reduce as py_reduce
if sys.version_info.major >= 3 and sys.version_info.minor >= 3:
from inspect import signature
def __arg_count__(f):
return len( signature(f).parameters )
else:
from inspect import getargspec
def __arg_count__(f):
return len( getargspec(f).args )
py_map = map
py_filter = filter
def curry(f):
'''Curry simple functions, excluding any built-ins that are implemented in
C. Some C functions (like reduce) are provided in this module. To curry
other such built-in not in this module, you must wrap it in a python
function or lambda like so:
curried_max = curry(lambda x, y: max(x, y))
max9 = curried_max(9)
assert max9(8) == 9
assert max9(13) == 13
Note that this implementation tried to be
immutable, ie the following will not work:
foo = lambda x, y, z: x+y+z
f = curry( foo )
f(9)
f(8) # independent from the above line
f(7) # again, just creates a new instance
Instead, a new instance is returned at each evaluation, so for example the
following works:
foo = lambda x, y, z: x+y+z
f = curry( foo )(9)
g = f(8)
assert g(7) == 9+8+7
assert g(6) == 9+8+6
assert f(5)(2) == 9+5+2
'''
n = __arg_count__(f)
return __curry__(f, n-1, [])
def __curry__(f, n, args):
'''Curries the first n args of f, keeping track of all passed args along the
way. The final call will spread the arg list into the actual call to f.
'''
# Note that we need to make a copy of the args list to stop python from
# using the same one for repeated calls. For example, without this the
# following will fail on the last statement:
# `f=lambda x,y: x+y; g=curry(f)(1); g(2); g(3)`
if n == 0: return lambda x: f( *fnc.appended(args[:], x) )
else: return lambda x: __curry__( f, n-1, fnc.appended(args[:], x) )
def pipe(*fns):
'''Apply functions in pipe-order and curry the argument. Note that this
currently only works with functions that take one arg, so you may want
to wrap all args into a list.
Example:
f(g(h(x))) == pipe(h,g,f)(x)
'''
return lambda x: py_reduce(lambda acc, f: f(acc), fns, x)
#
# Faster currying for common functions (and built-ins)
#
def eq(val): return lambda x: x == val
def not_eq(val): return lambda x: x != val
def lt(val): return lambda x: x < val
def lte(val): return lambda x: x <= val
def gt(val): return lambda x: x > val
def gte(val): return lambda x: x > val
def startswith(s):
return lambda x: x.startswith(s)
def endswith(s):
return lambda x: x.endswith(s)
def map(f):
return lambda lst: py_map(f, lst)
def filter(f):
return lambda lst: py_filter(f, lst)
def reduce(f):
# can't use default args with lambdas, need full py function defs
def _list(lst):
def _init(init=None):
if init == None: return py_reduce(f, lst)
else: return py_reduce(f, lst, init)
return _init
return _list
def sort_by(f):
return lambda lst: sorted(lst, key=f)
def argsort_by(f):
return lambda lst: fnc.argsorted(lst, key=f)
| mit | 848,729,204,883,938,400 | 24.849206 | 80 | 0.632484 | false |
kx499/mhn | server/mhn/auth/views.py | 7 | 5357 | import hashlib
from datetime import datetime
from flask import Blueprint, request, jsonify
from flask.ext.mail import Message
from sqlalchemy.exc import IntegrityError
from flask_security.utils import (
login_user as login, verify_and_update_password,
encrypt_password, logout_user as logout)
from mhn import db, mail
from mhn import user_datastore
from mhn.common.utils import error_response
from mhn.auth.models import User, PasswdReset, ApiKey
from mhn.auth import errors
from mhn.auth import (
get_datastore, login_required, roles_accepted, current_user)
from mhn.api import errors as apierrors
import uuid
auth = Blueprint('auth', __name__, url_prefix='/auth')
@auth.route('/login/', methods=['POST'])
def login_user():
if 'email' not in request.json:
return error_response(errors.AUTH_EMAIL_MISSING, 400)
if 'password' not in request.json:
return error_response(errors.AUTH_PSSWD_MISSING, 400)
# email and password are in the posted data.
user = User.query.filter_by(
email=request.json.get('email')).first()
psswd_check = False
if user:
psswd_check = verify_and_update_password(
request.json.get('password'), user)
if user and psswd_check:
login(user, remember=True)
return jsonify(user.to_dict())
else:
return error_response(errors.AUTH_INCORRECT_CREDENTIALS, 401)
@auth.route('/logout/', methods=['GET'])
def logout_user():
logout()
return jsonify({})
@auth.route('/user/', methods=['POST'])
@auth.route('/register/', methods=['POST'])
@roles_accepted('admin')
def create_user():
missing = User.check_required(request.json)
if missing:
return error_response(
apierrors.API_FIELDS_MISSING.format(missing), 400)
else:
user = get_datastore().create_user(
email=request.json.get('email'),
password=encrypt_password(request.json.get('password')))
userrole = user_datastore.find_role('admin')
user_datastore.add_role_to_user(user, userrole)
try:
db.session.add(user)
db.session.flush()
apikey = ApiKey(user_id=user.id, api_key=str(uuid.uuid4()).replace("-", ""))
db.session.add(apikey)
db.session.commit()
except IntegrityError:
return error_response(errors.AUTH_USERNAME_EXISTS, 400)
else:
return jsonify(user.to_dict())
@auth.route('/user/<user_id>/', methods=['DELETE'])
@roles_accepted('admin')
def delete_user(user_id):
user = User.query.get(user_id)
if not user:
return error_response(errors.AUTH_NOT_FOUND.format(user_id), 404)
user.active= False
db.session.add(user)
db.session.commit()
return jsonify({})
@auth.route('/resetrequest/', methods=['POST'])
def reset_passwd_request():
if 'email' not in request.json:
return error_response(errors.AUTH_EMAIL_MISSING, 400)
email = request.json['email']
user = User.query.filter_by(email=email).first()
if not user:
return error_response(errors.AUTH_NOT_FOUND.format(email), 404)
hashstr = hashlib.sha1(str(datetime.utcnow()) + user.email).hexdigest()
# Deactivate all other password resets for this user.
PasswdReset.query.filter_by(user=user).update({'active': False})
reset = PasswdReset(hashstr=hashstr, active=True, user=user)
db.session.add(reset)
db.session.commit()
# Send password reset email to user.
from mhn import mhn
msg = Message(
html=reset.email_body, subject='MHN Password reset',
recipients=[user.email], sender=mhn.config['DEFAULT_MAIL_SENDER'])
try:
mail.send(msg)
except:
return error_response(errors.AUTH_SMTP_ERROR, 500)
else:
return jsonify({})
@auth.route('/changepass/', methods=['POST'])
def change_passwd():
password = request.json.get('password')
password_repeat = request.json.get('password_repeat')
if not password or not password_repeat:
# Request body is not complete.
return error_response(errors.AUTH_RESET_MISSING, 400)
if password != password_repeat:
# Password do not match.
return error_response(errors.AUTH_PASSWD_MATCH, 400)
if current_user.is_authenticated():
# No need to check password hash object or email.
user = current_user
else:
email = request.json.get('email')
hashstr = request.json.get('hashstr')
if not email or not hashstr:
# Request body is not complete for not authenticated
# request, ie, uses password reset hash.
return error_response(errors.AUTH_RESET_MISSING, 400)
reset = db.session.query(PasswdReset).join(User).\
filter(User.email == email, PasswdReset.active == True).\
filter(PasswdReset.hashstr == hashstr).\
first()
if not reset:
return error_response(errors.AUTH_RESET_HASH, 404)
db.session.add(reset)
reset.active = False
user = reset.user
user.password = encrypt_password(password)
db.session.add(user)
db.session.commit()
return jsonify({})
@auth.route('/me/', methods=['GET'])
@login_required
def get_user():
return jsonify(current_user.to_dict())
| lgpl-2.1 | 2,123,904,715,376,590,300 | 33.56129 | 88 | 0.644951 | false |
allink/django-admin-sso | example/wsgi.py | 3 | 1132 | """
WSGI config for test2 project.
This module contains the WSGI application used by Django's development server
and any production WSGI deployments. It should expose a module-level variable
named ``application``. Django's ``runserver`` and ``runfcgi`` commands discover
this application via the ``WSGI_APPLICATION`` setting.
Usually you will have the standard Django WSGI application here, but it also
might make sense to replace the whole Django WSGI application with a custom one
that later delegates to the Django one. For example, you could introduce WSGI
middleware here, or combine a Django application with an application of another
framework.
"""
import os
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "test2.settings")
# This application object is used by any WSGI server configured to use this
# file. This includes Django's development server, if the WSGI_APPLICATION
# setting points here.
from django.core.wsgi import get_wsgi_application
application = get_wsgi_application()
# Apply WSGI middleware here.
# from helloworld.wsgi import HelloWorldApplication
# application = HelloWorldApplication(application)
| bsd-3-clause | 94,997,185,504,878,600 | 39.428571 | 79 | 0.79947 | false |
deevarvar/myLab | baidu_code/bcoreapi/ticket/base.py | 1 | 27437 | #-*- coding=utf-8 -*-
'''
@description: 核心购票新接口测试用例基类。
@author: miliang<[email protected]>
'''
import sys
import copy
import urllib
import urllib2
import json
import hashlib
import time
import MySQLdb
import random
import redis
import socket
from settings import SERVER,REDIS_CONF,MYSQL,SIGN_KEY, ODP_PAL_SERVER
import ticketlog
import logging.config
logging.config.dictConfig(ticketlog.LOGGING)
logger = logging.getLogger('ticket')
class Ticket_Base(object):
def __init__(self):
self.req_url = 'http://' + SERVER['HOST'] + ':' + SERVER['PORT'] + '/ticket/'
self.base_url = 'http://' + SERVER['HOST'] + ':' + SERVER['PORT'] + '/ticket/'
#self.base_url = 'http://dianying.baidu.com/ticket/'
self.sign_key = SIGN_KEY
self.req_dict = {}
self.page_dict = {}
self.redis = redis.Redis(REDIS_CONF['HOST'],REDIS_CONF['PORT'],REDIS_CONF['DB'])
def doAssert(self):
pass
def execute(self):
self.page_dict = self.doRequest(self.req_url,self.req_dict)
self.doAssert()
def genUrl(self,base_url,params):
sign = self.signGenerate(params)
params['sign'] = sign
query_string = urllib.urlencode(params)
url = base_url + '?' + query_string
return url
def doRequest(self,base_url,params):
sign = self.signGenerate(params)
params['sign'] = sign
query_string = urllib.urlencode(params)
url = base_url + '?' + query_string
logger.debug('::::Request url:::: ' + url)
user_agent = 'Mozilla/4.0 (compatible; MSIE 5.5; Windows NT)'
headers = { 'User-Agent' : user_agent }
req = urllib2.Request(url,'',headers)
# 超时检验
try:
response = urllib2.urlopen(req)
except socket.timeout:
logger.error('socket timeout')
result = {"erroring" : 2}
return result
the_page = response.read()
try:
#print json.loads(the_page)
return json.loads(the_page)
except:
logger.error('json decode error ' + the_page);
result = {"erroring":1}
return result
def doPostRequest(self,base_url,params):
query_string = urllib.urlencode(params)
url = base_url + '?' + query_string
#print '::::Request url:::: ' + url
user_agent = 'Mozilla/4.0 (compatible; MSIE 5.5; Windows NT)'
headers = { 'User-Agent' : user_agent }
data = urllib.urlencode(params)
req = urllib2.Request(url,data,headers)
response = urllib2.urlopen(req)
the_page = response.read()
logger.info(the_page)
try:
return json.loads(the_page)
except:
result = {"erroring":1}
return result
def signGenerate(self,params_origin):
params = copy.deepcopy(params_origin)
if params.has_key('log_id'):
del params['log_id']
if params.has_key('sign'):
del params['sign']
params = sorted(params.iteritems(), key=lambda d:d[0])
sign = hashlib.new("md5", self.sign_key + urllib.urlencode(params)).hexdigest()
sign = hashlib.new("md5", sign + self.sign_key).hexdigest()
return sign
def getSchedule(self,third_from,counter=1):
'''
@description: 场次查询
@notes:
@input:
third_from : 合作方
counter : 选择的满足条件的场次序号
@output:
result : 场次信息,包括合作方名称、场次号、影院第三方id。
'''
mysql = MySQLdb.connect(host=MYSQL["HOST"],port=MYSQL["PORT"],user=MYSQL["USER"],passwd=MYSQL["PASSWD"],db=MYSQL["DB"],charset="utf8")
cursor = mysql.cursor()
cmd = "select * from t_movie_cache_odp where third_from='%s' and data!='' order by id DESC limit 0,100" % (third_from)
cursor.execute(cmd)
for item in cursor.fetchall():
third_id = item[1]
# 去poi表查一下该影院有没有关联影院,没有则剔除
logger.debug('try to get cinema_id is ' + str(third_id) + ', third_from is ' + str(third_from))
sql = "select cinemas_id from t_movie_poi where third_from='%s' and third_id='%s' limit 0,1" % (third_from,third_id)
cursor.execute(sql)
poi_data = cursor.fetchall()
#print poi_data[0][0]
#logger.debug('cinema id is ' + str(poi_data[0][0]))
if len(poi_data) == 0 or int(poi_data[0][0]) == 0:
continue
# 需要锁定某影院则需取消此段注释
#if third_id != '32012101':
# continue
try:
time_table = json.loads(item[3])['time_table']
except:
continue
refer_dict = {}
if len(time_table) == 0 or type(time_table) == type(refer_dict):
continue
time_threshold = time.time()+2*60*60
for day in range(len(time_table)):
for seq in time_table[day]:
#logger.debug('time is '+seq['time'] + ', date is '+seq['date']);
if not seq['time'] or not seq['date']:
continue
seq_datetime = seq['date'] + ' ' + seq['time']
try:
seq_outbuy_time = time.mktime(time.strptime(seq_datetime, '%Y-%m-%d %H:%M'))
except:
seq_outbuy_time = time.mktime(time.strptime(seq_datetime, '%Y-%m-%d %H:%M:%S'))
if seq_outbuy_time < time_threshold:
continue
if not seq['src_info'][0]['seq_no']:
continue
counter -= 1
if counter != 0:
continue
result = {}
result['seq_no'] = seq['src_info'][0]['seq_no']
result['origin_price'] = seq['origin_price']
result['third_from'] = third_from
result['third_id'] = third_id
result['movie_id'] = seq['src_info'][0]['movie_id']
if seq['src_info'][0].has_key('price'):
result['price'] = seq['src_info'][0]['price']
# fire锁座老接口需要的参数
if seq['src_info'][0].has_key('hall_id'):
result['hall_id'] = seq['src_info'][0]['hall_id']
if seq['src_info'][0].has_key('section_id'):
result['section_id'] = seq['src_info'][0]['section_id']
if seq['src_info'][0].has_key('show_seq_no'):
result['show_seq_no'] = seq['src_info'][0]['show_seq_no']
#print seq
return result
def getSeat(self,third_from,cinema_index=1,num=1,seq_no=None,third_id=None,seat_index=1,mode=0):
'''
@description: 座位信息获取
@notes:
@input:
third_from : 合作方
cinema_index : 选择的满足条件的场次序号
num : 座位数
seq_no : 场次号,仅在mode=1时有效;
third_id : 第三方影院id,仅在mode=1时有效;
seat_index : 座位序号,即返回第几个符合条件的座位,仅在mode=1时有效;
mode: 0为检索模式,函数会搜索数据库并自动找出一个符合条件的场次;1为指定模式,需要用户指定seq_no、third_id和seat_index; 2为探测模式,仅对座位图获取请求成功与否做判断并返回结论
@output:
result : 座位信息,包括场次号、影院第三方id、座位号。
'''
result = {}
result['seat_no'] = []
#错误号。0:正常;1001:座位图获取失败;1002:场次已满
result['error_no'] = 0
if mode == 0:
for i in range(10):
schedule = self.getSchedule(third_from,cinema_index+i)
if not schedule or not schedule['seq_no']:
logger.error('get schedule error for '+str(third_from))
continue
result['seq_no'] = schedule['seq_no']
result['third_id'] = schedule['third_id']
if schedule.has_key('price'):
result['price'] = schedule['price']
result['origin_price'] = schedule['origin_price']
result['movie_id'] = schedule['movie_id']
# fire老锁座接口需要hall_id,section_id,show_seq_no三个参数
if schedule.has_key('hall_id'):
result['hall_id'] = schedule['hall_id']
if schedule.has_key('section_id'):
result['section_id'] = schedule['section_id']
if schedule.has_key('show_seq_no'):
result['show_seq_no'] = schedule['show_seq_no']
#base_url = 'http://' + SERVER['HOST'] + ':' + SERVER['PORT'] + '/ticket/' + 'seat/get'
base_url = self.base_url + 'seat/get'
params = {"third_from":schedule['third_from'],"seq_no":schedule['seq_no'],"third_id":schedule['third_id']}
#logger.info('choose seq_no is http://' + ODP_PAL_SERVER['HOST'] + ':' + ODP_PAL_SERVER['PORT'] + '/detail?qt=movie&act=select&from=pc&seq_no='
# +schedule['seq_no']+ '&cinema_id='+ schedule['third_id'] +'&third_from='+schedule['third_from']+'&sfrom=map')
while True:
seat_info = self.doRequest(base_url,params)
#logger.debug('seat_info is '+ str(seat_info));
if not seat_info.has_key('erroring'):
break
# 取得没有预订的座位号
if not 'data' in seat_info or not seat_info['data']:
logger.error('get seat info error for '+str(params));
continue
else:
if not seq_no or not third_id:
return result
result['seq_no'] = seq_no
result['third_id'] = third_id
#base_url = 'http://' + SERVER['HOST'] + ':' + SERVER['PORT'] + '/ticket/' + 'seat/get'
base_url = self.base_url + 'seat/get'
params = {"third_from":third_from,"seq_no":seq_no,"third_id":third_id}
while True:
#base_url='http://dianying.baidu.com/ticket/seat/get' # debug
seat_info = self.doRequest(base_url,params)
if not seat_info.has_key('erroring'):
break
if not seat_info.has_key('data') or not seat_info['data']:
logger.error('get seat info error for '+str(params));
result['error_no'] = 1001
return result
if mode == 2:
return result
# 遍历场次座位信息,找到空余座位
seq_full = 1
for row in seat_info['data']:
for col in row['col']:
if col['sno'] and col['cid'] and col['st'] == 1:
seq_full = 0
seat_index -= 1
if seat_index <= 0:
result['seat_no'].append(col['sno'])
num -= 1
if num == 0:
break
if num == 0:
break
if seq_full == 1:
result['error_no'] = 1002
return result
def lockSeat(self,third_from,cinema_index=1,num=1,seq_no=None,third_id=None,seat_index=1,log_id=123456,phone_number='13892396551',mode=0):
'''
@description: 锁座
@notes:
@input:
third_from : 合作方
cinema_index : 选择的满足条件的场次序号
num : 座位数
seq_no : 场次号,仅在mode=1时有效;
third_id : 第三方影院id,仅在mode=1时有效;
seat_index : 座位序号,仅在mode=1时有效;
mode : 0是检索模式,会从数据库中自动选择一个合适的场次进行锁座;1是指定模式,会对指定场次和影院进行锁座
@output:
result : 座位信息,包括场次号、影院第三方id、座位号、第三方订单id。
'''
result = {}
result['error_no'] = 0
# 随机生成一个手机号
phone_number = self.randomPhoneNum()
result['phone_number'] = phone_number
if mode == 0:
for i in range(10):
#获取座位
seat_query = self.getSeat(third_from,cinema_index=1+i,num=num)
if seat_query['error_no'] != 0:
continue
seq_no = seat_query['seq_no']
third_id = seat_query['third_id']
seat_info = ''
for seat_no in seat_query['seat_no']:
seat_info += '|' + seat_no
seat_info = seat_info[1:]
params = {"third_from":third_from,"seq_no":seq_no,"third_id":third_id,"log_id":log_id,"phone_number":phone_number,"seat_info":seat_info}
#base_url = 'http://' + SERVER['HOST'] + ':' + SERVER['PORT'] + '/ticket/' + 'seat/lockseat'
base_url = self.base_url + 'seat/lockseat'
logger.info('lockseat seat info is '+ str(params) + ' , pc entry url is ' + \
'http://' + ODP_PAL_SERVER['HOST'] + ':' + ODP_PAL_SERVER['PORT'] + '/detail?qt=movie&act=select&from=pc&seq_no='\
+str(seq_no) + '&cinema_id='+ str(third_id) +'&third_from='+str(third_from)+'&sfrom=map');
# 重试机制
retry_max = 3
retry = 0
# 跳出重试的原因,0为锁座成功,1为超过重试次数
break_reason = 0
while True:
if retry == retry_max:
break_reason = 1
break
while True:
seat_lock = self.doRequest(base_url,params)
if not seat_lock.has_key('erroring'):
break
#print seat_lock
if seat_lock['third_order_id']:
break_reason = 0
break
retry += 1
time.sleep(1.5)
if break_reason == 1:
logger.error('lockseat error')
continue
third_order_id = seat_lock['third_order_id']
result['seq_no'] = seq_no
result['third_id'] = third_id
result['seat_no'] = seat_info
result['third_order_id'] = third_order_id
return result
elif mode == 1:
seat_query = self.getSeat(third_from,seq_no=seq_no,third_id=third_id,seat_index=seat_index,mode=1)
if seat_query['error_no'] != 0:
result['error_no'] = seat_query['error_no']
return result
if seat_query['error_no'] == 0 and len(seat_query['seat_no']) == 0:
result['error_no'] = 2001
return result
seq_no = seat_query['seq_no']
third_id = seat_query['third_id']
seat_info = ''
for seat_no in seat_query['seat_no']:
seat_info += '|' + seat_no
seat_info = seat_info[1:]
result['seq_no'] = seq_no
result['third_id'] = third_id
result['seat_no'] = seat_info
params = {"third_from":third_from,"seq_no":seq_no,"third_id":third_id,"log_id":log_id,"phone_number":phone_number,"seat_info":seat_info}
#base_url = 'http://' + SERVER['HOST'] + ':' + SERVER['PORT'] + '/ticket/' + 'seat/lockseat'
base_url = self.base_url + 'seat/lockseat'
# 重试机制
retry_max = 5
retry = 0
while True:
if retry == retry_max:
return result
while True:
seat_lock = self.doRequest(base_url,params)
#print seat_lock
if not seat_lock.has_key('erroring'):
break
if seat_lock.has_key('third_order_id') and seat_lock['third_order_id']:
break
retry += 1
time.sleep(1.5)
third_order_id = seat_lock['third_order_id']
#result['seq_no'] = seq_no
#result['third_id'] = third_id
#result['seat_no'] = seat_info
result['third_order_id'] = third_order_id
return result
else:
return result
def getOrder(self,third_from):
'''
@description: 获取订单信息
@notes:
@input:
third_from : 合作方
@output:
result : 订单信息,包括订票手机号。
'''
result = {}
mysql = MySQLdb.connect(host=MYSQL['HOST'],port=MYSQL['PORT'],db=MYSQL['DB'],user=MYSQL['USER'],passwd=MYSQL['PASSWD'],charset='utf8')
cursor = mysql.cursor()
sql = "select * from t_movie_border where third_from='%s' order by border_id desc limit 0,1" % third_from
#print sql
cursor.execute(sql)
for order in cursor.fetchall():
result['phone_number'] = order[11]
#print result['phone_number']
if result['phone_number']:
return result
def getSeatStatus(self,third_from,seq_no,third_id,seat_no):
'''
@description: 座位锁定情况查询
@notes: 目前仅支持单个座位查询
@input:
third_from : 合作方
seq_no : 场次号
third_from : 第三方影院id
seat_no: 座位号
@output:
result : lock_status:座位锁定情况,0为走廊,1为可售,2为不可售,3为未找到座位
'''
result = {}
result['error_no'] = 0
if not seq_no or not third_id:
return result
#base_url = 'http://' + SERVER['HOST'] + ':' + SERVER['PORT'] + '/ticket/' + 'seat/get'
base_url = self.base_url + 'seat/get'
params = {"third_from":third_from,"seq_no":seq_no,"third_id":third_id}
while True:
seat_info = self.doRequest(base_url,params)
if not seat_info.has_key('erroring'):
break
if not seat_info.has_key('data') or not seat_info['data']:
logger.info(seat_info)
result['error_no'] = 1001
return result
# 遍历场次座位信息,找到指定座位
result['lock_status'] = 3
for row in seat_info['data']:
for col in row['col']:
#if col['sno'] and col['cid'] and col['st'] == 1:
if col['sno'] == seat_no:
result['lock_status'] = col['st']
return result
def randomPhoneNum(self):
'''
@description: 随机生成一个手机号
@notes: 道高一尺,魔高一丈!
@input:
@output:
phone_num:随机生成的手机号
'''
phone_num = '13'
for i in range(9):
digit = random.randint(0,9)
phone_num += str(digit)
return phone_num
def getAllSchedules(self,third_from):
'''
@description: 获取某合作方的所有2小时以后的场次信息
@notes:
@input:
third_from : 合作方
@output:
result : 场次信息,包括合作方名称、场次号、影院第三方id。
'''
result = []
mysql = MySQLdb.connect(host=MYSQL["HOST"],port=MYSQL["PORT"],user=MYSQL["USER"],passwd=MYSQL["PASSWD"],db=MYSQL["DB"],charset="utf8")
cursor = mysql.cursor()
cmd = "select * from t_movie_cache_odp where third_from='%s' and data!='' order by id" % (third_from)
cursor.execute(cmd)
for item in cursor.fetchall():
third_id = item[1]
# debug for lanhai
#if third_id != '32012101':
# continue
try:
time_table = json.loads(item[3])['time_table']
except:
continue
refer_dict = {}
if len(time_table) == 0 or type(time_table) == type(refer_dict):
continue
time_threshold = time.time()+2*60*60
for day in range(len(time_table)):
for seq in time_table[day]:
if not seq['time'] or not seq['date']:
continue
seq_datetime = seq['date'] + ' ' + seq['time']
try:
seq_outbuy_time = time.mktime(time.strptime(seq_datetime, '%Y-%m-%d %H:%M'))
except:
seq_outbuy_time = time.mktime(time.strptime(seq_datetime, '%Y-%m-%d %H:%M:%S'))
if seq_outbuy_time < time_threshold:
continue
if not seq['src_info'][0]['seq_no']:
continue
seq_info = {}
seq_info['seq_no'] = seq['src_info'][0]['seq_no']
seq_info['origin_price'] = seq['origin_price']
seq_info['third_from'] = third_from
seq_info['third_id'] = third_id
seq_info['movie_id'] = seq['src_info'][0]['movie_id']
seq_info['seq_outbuy_time'] = seq_outbuy_time
if seq['src_info'][0].has_key('price'):
seq_info['price'] = seq['src_info'][0]['price']
# fire锁座老接口需要的参数
if seq['src_info'][0].has_key('hall_id'):
seq_info['hall_id'] = seq['src_info'][0]['hall_id']
if seq['src_info'][0].has_key('section_id'):
seq_info['section_id'] = seq['src_info'][0]['section_id']
if seq['src_info'][0].has_key('show_seq_no'):
seq_info['show_seq_no'] = seq['src_info'][0]['show_seq_no']
result.append(seq_info)
#print seq
return result
def lockAllSeat(self,third_from,seq_no=None,third_id=None,mode=1):
'''
@description: 锁定某场次全部座位,并把结果
@notes:
@input:
third_from : 合作方
seq_no : 场次号,仅在mode=1时有效;
third_id : 第三方影院id,仅在mode=1时有效;
mode: 1为指定模式,需要用户指定seq_no、third_id
@output:
result : fail:失败次数; success:成功次数。
'''
result = {}
result['seat_no'] = []
#错误号。0:正常;1001:座位图获取失败;1002:场次已满
result['error_no'] = 0
result['fail'] = 0
result['success'] = 0
if mode == 1:
if not seq_no or not third_id:
return result
result['seq_no'] = seq_no
result['third_id'] = third_id
#base_url = 'http://' + SERVER['HOST'] + ':' + SERVER['PORT'] + '/ticket/' + 'seat/get'
base_url = self.base_url + 'seat/get'
params = {"third_from":third_from,"seq_no":seq_no,"third_id":third_id}
while True:
#base_url='http://dianying.baidu.com/ticket/seat/get' # debug
seat_info = self.doRequest(base_url,params)
if not seat_info.has_key('erroring'):
break
if not seat_info.has_key('data') or not seat_info['data']:
logger.info(seat_info)
result['error_no'] = 1001
return result
else:
return result
# 遍历场次座位信息,找到空余座位
seq_full = 1
for row in seat_info['data']:
for col in row['col']:
if col['sno'] and col['cid'] and col['st'] == 1:
seq_full = 0
# 执行锁座
params = {"third_from":third_from,"seq_no":seq_no,"third_id":third_id,"log_id":'123456',"phone_number":self.randomPhoneNum(),"seat_info":col['sno']}
base_url = self.base_url + 'seat/lockseat'
seat_lock = self.doRequest(base_url,params)
if seat_lock.has_key('third_order_id') and seat_lock['third_order_id']:
result['success'] += 1
self.redis.incr(REDIS_CONF['SUC_NO']+'_lockseat')
elif seat_lock.has_key('erroring') and seat_lock['erroring'] == 2:
logger.error("::::time out!!!::::")
result['fail'] += 1
self.redis.incr(REDIS_CONF['FAIL_NO']+'_lockseat')
else:
logger.error("::::fail!!!::::")
result['fail'] += 1
self.redis.incr(REDIS_CONF['FAIL_NO']+'_lockseat')
if seq_full == 1:
result['error_no'] = 1002
return result
def setTimeout(self,time_out_seconds):
socket.setdefaulttimeout(time_out_seconds)
if __name__ == '__main__':
base = Ticket_Base()
#print base.getSchedule('newvista')
print base.getSeat('xingmei')
#print base.getSeat(sys.argv[1],seq_no=sys.argv[2],third_id=sys.argv[3],seat_index=45,mode=1)
#print base.getSeat('newvista',seq_no='0000000000005476',third_id='dadi0076',mode=2)
#print base.lockSeat(sys.argv[1],seq_no=sys.argv[2],third_id=sys.argv[3],seat_index=int(sys.argv[4]),mode=1)
#print base.lockSeat(sys.argv[1])
#print base.lockSeat('lanhai',seq_no='151000693975',third_id='32012101',seat_index=1,mode=1)
#print base.getSeatStatus('wangpiao',seq_no='16538746',third_id='1032',seat_no='11690721$11690721_1_14')
#print base.signGenerate({"orderId":"txpcBDMO000023478","status":"PAY_SUCCESS","reduction":"0","requestId":"1234567","totalAmount":"1","paidAmount":"1"})
#print base.lockSeat('shiguang')
#print base.randomPhoneNum()
#print base.lockAllSeat('newvista','0000000000004878','dadi0201')
#print len(base.getAllSchedules('newvista'))
| mit | -1,332,399,588,829,917,200 | 40.604613 | 168 | 0.477437 | false |
mugizico/smappPy | smappPy/tools/extract_user_data.py | 2 | 4506 | """
Compiles basic user data by day (followers, friends, name, screen name, ID, langauge)
@auth pablo, dpb
@date 04/02/2013
"""
import os
import csv
import argparse
import simplejson as json
from collections import defaultdict
from pymongo import MongoClient
from pymongo.errors import ConnectionFailure
def extract_user_data(collection, outfile, start_date=None, end_date=None, update=10000):
"""
Extracts user aggregate information from the given collection OF TWEETS, prints basic
data and outputs a CSV. Fields: ScreenName,Name,UserId,Lang,FriendsCount,FollowersCount,
Location,NumTweets.
Takes optional date ranges to constrain query (gte start, lte end. ie, inclusive).
If only one term specified, take everything before end or after start.
"""
csv_header = ["ScreenName", "Name", "UserId", "Lang", "FriendsCount", "FollowersCount", "Location", "NumTweets"]
if start_date and not end_date:
tweets = collection.find({"timestamp": {"$gte": start_date}})
elif not start_date and end_date:
tweets = collection.find({"timestamp": {"$lte": end_date}})
elif start_date and end_date:
tweets = collection.find({"timestamp": {"$gte": start_date, "$lte": end_date}})
else:
tweets = collection.find()
user_tweet_count = defaultdict(int)
user_data = {}
counter = 1
num_tweets = tweets.count()
print "Total collection tweets: {0}".format(collection.count())
print "Total tweets considered: {0}".format(num_tweets)
print "Compiling user data..."
for tweet in tweets:
if counter % update:
print ".. Progress: {0:.2%}\r".format(float(counter)/num_tweets),
counter += 1
if "user" not in tweet or "id_str" not in tweet["user"]:
continue
uid = tweet["user"]["id_str"]
user_tweet_count[uid] += 1
user_data[uid] = [tweet['user']['screen_name'].encode("utf8"),
tweet['user']['name'].replace(",","").replace("\n","").encode("utf8"),
tweet['user']['id_str'],
tweet['user']['lang'],
tweet['user']['friends_count'],
tweet['user']['followers_count'],
tweet['user']['location'].replace(",","").replace("\n","").encode("utf8"),
user_tweet_count[uid]]
print "Writing aggregate data to file: '{0}'".format(outfile)
with open(outfile, "wb") as out_handle:
csv_handle = csv.writer(out_handle)
csv_handle.writerow(csv_header)
for uid, udata in user_data.items():
csv_handle.writerow(udata)
print "Complete"
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="Extract and compile user information")
parser.add_argument("-s", "--host", action="store", dest="host", default="localhost",
help="Database server host (default localhost)")
parser.add_argument("-p", "--port", action="store", type=int, dest="port", default=27017,
help="Database server port (default 27017)")
parser.add_argument("-u", "--user", action="store", dest="user", default=None,
help="Database username (default None, ok when username + pass not required")
parser.add_argument("-w", "--password", action="store", dest="password", default=None,
help="Database password (default None, ok when username + pass not required")
parser.add_argument("-d", "--db", action="store", dest="database", required=True,
help="Database containing tweet collection")
parser.add_argument("-c", "--collection", action="store", dest="collection", required=True,
help="Collection of tweet data to iterate over")
parser.add_argument("-o", "--outfile", action="store", dest="outfile", required=True,
help="File to store CSV user data to")
parser.add_argument("--update", action="store", type=int, dest="update", default=10000,
help="Update counter for print output (progress indicator)")
args = parser.parse_args()
mc = MongoClient(args.host, args.port)
db = mc[args.database]
if args.user and args.password:
if not db.authenticate(args.user, args.password):
raise ConnectionFailure(
"Mongo DB Authentication for User {0}, DB {1} failed".format(args.user, args.database))
collection = db[args.collection]
extract_user_data(collection, args.outfile, update=args.update)
| gpl-2.0 | 4,085,438,130,280,368,600 | 42.747573 | 116 | 0.626276 | false |
Cr4sh/ThinkPwn | scan_thinkpwn.py | 1 | 10620 | #!/usr/bin/python
'''
#############################################################################
THINKPWN SCANNER
This program is used to scan UEFI drivers extracted from firmware image
for ThinkPwn vulnerability in vendor/model agnostic way.
For more information about this vulenrability check the following links:
https://github.com/Cr4sh/ThinkPwn
http://blog.cr4.sh/2016/06/exploring-and-exploiting-lenovo.html
AUTHORS:
@d_olex (aka Cr4sh) -- initial Vivisect based version of the program;
@trufae (aka pankake) -- radare2 based version (this one);
To check the binary for ThinkPwn vulnerability we have to find a vulnerable
System Management Mode (SMM) callback that usually has the following look:
=------------------------------=
| push rbx |
| sub rsp, 0x20 |
| mov rax, qword [rdx + 0x20] |
| mov rbx, rdx |
| test rax, rax |
| je 0xa5c |
=------------------------------=
f t
.----------' '----------------.
| |
| |
=-------------------------------= |
| mov rcx, qword [rax] | |
| lea r8, [rdx + 0x18] | |
| mov rdx, qword [rip + 0x5f4] | |
| call qword [rax + 8] | |
| and qword [rbx + 0x20], 0 | |
=-------------------------------= |
v |
'---------------. .-----------'
| |
| |
=--------------------=
| xor eax, eax |
| add rsp, 0x20 |
| pop rbx |
| ret |
=--------------------=
And decompiled C code of this function:
EFI_STATUS __fastcall sub_AD3AFA54(
EFI_HANDLE SmmImageHandle, VOID *CommunicationBuffer, UINTN *SourceSize)
{
VOID *v3; // rax@1
VOID *v4; // rbx@1
// get some structure pointer from EFI_SMM_COMMUNICATE_HEADER.Data
v3 = *(VOID **)(CommunicationBuffer + 0x20);
v4 = CommunicationBuffer;
if (v3)
{
/*
Vulnarability is here:
this code calls some function by address from obtained v3 structure field.
*/
*(v3 + 0x8)(*(VOID **)v3, &dword_AD002290, CommunicationBuffer + 0x18);
// set zero value to indicate successful operation
*(VOID **)(v4 + 0x20) = 0;
}
return 0;
}
To match the vulnerable function shown above program uses a simple binary heuristics
that checks number of basic blocks, instructions, global variable usage, etc.
See match_func() subroutine for more details.
USAGE:
1) Install radare2 and r2pipe for Python:
https://radare.org/
https://pypi.python.org/pypi/r2pipe
2) Unpack UEFI firmware image from your computer using UEFIExtract, it's a part
of UEFITool (https://github.com/LongSoft/UEFITool):
# UEFIExtract firmware_image.bin all
3) Run scan_thinkpwn.py with path to the extracted firmware image contents as argument:
# python scan_thinkpwn.py firmware_image.bin.dump
4) At the end of the scan you will see the list of vulnerable SMM callbacks and UEFI
drivers where they're located.
Example of program output on vulnerable firmware from ThinkPad T450s:
http://www.everfall.com/paste/id.php?cztv0fmo03gv
#############################################################################
'''
import os, sys, errno
from threading import Thread
from Queue import Queue
import r2pipe
# Do not load r2 plugins to speedup startup times
os.environ['R2_NOPLUGINS'] = '1'
# you might want to change these paramenetrs to tune the heuristics
BB_COUNT = 3
MAX_INSN = 10
MIN_INSN = 3
GUID_LEN = 0x10
# scan only EFI drivers that contains these GUIDs
GUID_LIST = \
[
# SMM base protocol GUID
'\x4D\x95\x90\x13\x95\xDA\x27\x42\x93\x28\x72\x82\xC2\x17\xDA\xA8',
# SMM communication protocol GUID
'\xE2\xD8\x8E\xC6\xC6\x9D\xBD\x4C\x9D\x94\xDB\x65\xAC\xC5\xC3\x32',
# SMM communicate header GUID
'\x6C\xE3\x28\xF3\xB6\x23\x95\x4A\x85\x4B\x32\xE1\x95\x34\xCD\x75'
]
WORKERS = 4
q, results = Queue(), []
def has_guid(file_path, guid_list, find_any = False):
with open(file_path, 'rb') as fd:
data, guid_found = fd.read(), []
# lookup for one or all of the specified GUIDs inside file contents
for guid in guid_list:
if data.find(guid) != -1:
if find_any: return True
if not guid in guid_found: guid_found.append(guid)
return len(guid_found) == len(guid_list)
def is_valid_file(file_path):
with open(file_path, 'rb') as fd:
# check for DOS header signature
if fd.read(2) != 'MZ': return False
# check if image contains needed GUIDs
return has_guid(file_path, GUID_LIST, find_any = True)
def insn_uses_global(op):
if op['type'] == 'mov':
# get global variable information if MOV instruction is using it
return ( op['esil'].find('rip,+,[8]') != -1, op['esil'].find('=[') != -1 )
# not a MOV instruction
return (0, 0)
class BasicBlock(object):
def __init__(self, r2, addr, size, insn_num):
self.addr, self.size = addr, size
self.insn_num = insn_num
self.calls_total, self.calls_matched = 0, 0
self.glob_reads, self.glob_writes = 0, 0
# disassemble basic block
r2ops = r2.cmdj('aoj %d @ 0x%x' % (insn_num, addr))
# update instructions information
for op in r2ops:
# check for the CALL instruction
self.check_call(op)
# check for the MOV instruction with global variable as operand
self.check_glob(op)
def check_call(self, op):
if op['type'] == 'call':
# regular fucntion call
self.calls_total += 1
elif op['type'] == 'ucall' and op['opcode'].find('[') != -1:
# call function by pointer
self.calls_total += 1
self.calls_matched += 1
def check_glob(self, op):
# check if instruction reads or writes some global variable
r, w = insn_uses_global(op)
if r: self.glob_reads += 1
if w: self.glob_writes += 1
def match_func(r2, addr):
bb_all = []
# obtain list of basic blocks for given function
bb_list = r2.cmdj('afbj %s' % addr)
if len(bb_list) != BB_COUNT: return False
for bb in bb_list:
insn_num = bb['ninstr']
# check basic block for proper amount of instruction
if insn_num > MAX_INSN or insn_num < MIN_INSN:
return False
# analyze basic block
bb = BasicBlock(r2, bb['addr'], bb['size'], insn_num)
bb_all.append(bb)
#
# check calls and global variables usage for each basic block
#
if bb_all[0].calls_total != 0 or bb_all[0].calls_matched != 0: return False
if bb_all[0].glob_reads != 0 or bb_all[0].glob_writes != 0: return False
if bb_all[1].calls_total != 1 or bb_all[1].calls_matched != 1: return False
if bb_all[1].glob_reads != 1 or bb_all[1].glob_writes != 0: return False
if bb_all[2].calls_total != 0 or bb_all[2].calls_matched != 0: return False
if bb_all[2].glob_reads != 0 or bb_all[2].glob_writes != 0: return False
# vulnerable function was matched!
return True
class Watcher:
''' This class solves two problems with multithreaded
programs in Python, (1) a signal might be delivered
to any thread (which is just a malfeature) and (2) if
the thread that gets the signal is waiting, the signal
is ignored (which is a bug). '''
def __init__(self):
''' Creates a child thread, which returns. The parent
thread waits for a KeyboardInterrupt and then kills
the child thread. '''
self.child = os.fork()
if self.child == 0: return
else: self.watch()
def watch(self):
try:
os.wait()
except KeyboardInterrupt:
print('\nEXIT')
self.kill()
sys.exit(errno.ECANCELED)
def kill(self):
try: os.kill(self.child, signal.SIGKILL)
except OSError: pass
def scan_file(file_path):
ret = []
print('Scanning \"%s\"...' % file_path)
# start radare instance
r2 = r2pipe.open(file_path)
# perform initial analysis
r2.cmd('aa;aad')
# enumerate available functions
for addr in r2.cmdj('aflqj'):
# check for vulnerable function
if match_func(r2, addr):
print('VULNERABLE FUNCTION: %s' % addr)
ret.append(addr)
# close radare instance
r2.quit()
return ret
def worker():
global q, results
while True:
file_path = q.get()
# scan single file
procs = scan_file(file_path)
if len(procs) > 0:
# save scan results
results.append(( file_path, procs ))
q.task_done()
def scan_dir(dir_path):
for file_name in os.listdir(dir_path):
file_path = os.path.join(dir_path, file_name)
if os.path.isfile(file_path) and is_valid_file(file_path):
# queue scanning of the single file
q.put(file_path)
elif os.path.isdir(file_path):
scan_dir(file_path)
def main():
global q, results
if len(sys.argv) < 2:
print('USAGE: scan_thinkpwn.py <unpacked_firmware_dir>')
return -1
# ctrl+C handling stuff
if sys.platform != 'win32': Watcher()
# run worker threads
for i in range(WORKERS):
t = Thread(target = worker)
t.daemon = True
t.start()
# scan files in target directory
scan_dir(sys.argv[1])
q.join()
print('**************************************')
print('SCAN RESULTS:')
# print scan results
for file_path, matched in results:
print('\n' + file_path + '\n')
for addr in matched:
print(' * %s' % addr)
print('')
return 0
if __name__ == '__main__':
exit(main())
#
# EoF
#
| gpl-3.0 | -4,115,384,228,921,796,600 | 25.287129 | 91 | 0.531262 | false |
kursitet/edx-platform | common/lib/xmodule/xmodule/tests/test_export.py | 28 | 13552 | """
Tests of XML export
"""
import ddt
import lxml.etree
import mock
import os
import pytz
import shutil
import tarfile
import unittest
import uuid
from datetime import datetime, timedelta, tzinfo
from fs.osfs import OSFS
from path import Path as path
from tempfile import mkdtemp
from textwrap import dedent
from xblock.core import XBlock
from xblock.fields import String, Scope, Integer
from xblock.test.tools import blocks_are_equivalent
from opaque_keys.edx.locations import Location
from xmodule.modulestore import EdxJSONEncoder
from xmodule.modulestore.xml import XMLModuleStore
from xmodule.modulestore.xml_exporter import (
convert_between_versions, get_version
)
from xmodule.tests import DATA_DIR
from xmodule.tests.helpers import directories_equal
from xmodule.x_module import XModuleMixin
def strip_filenames(descriptor):
"""
Recursively strips 'filename' from all children's definitions.
"""
print "strip filename from {desc}".format(desc=descriptor.location.to_deprecated_string())
if descriptor._field_data.has(descriptor, 'filename'):
descriptor._field_data.delete(descriptor, 'filename')
if hasattr(descriptor, 'xml_attributes'):
if 'filename' in descriptor.xml_attributes:
del descriptor.xml_attributes['filename']
for child in descriptor.get_children():
strip_filenames(child)
descriptor.save()
class PureXBlock(XBlock):
"""Class for testing pure XBlocks."""
has_children = True
field1 = String(default="something", scope=Scope.user_state)
field2 = Integer(scope=Scope.user_state)
@ddt.ddt
class RoundTripTestCase(unittest.TestCase):
"""
Check that our test courses roundtrip properly.
Same course imported , than exported, then imported again.
And we compare original import with second import (after export).
Thus we make sure that export and import work properly.
"""
def setUp(self):
super(RoundTripTestCase, self).setUp()
self.maxDiff = None
self.temp_dir = mkdtemp()
self.addCleanup(shutil.rmtree, self.temp_dir)
@mock.patch('xmodule.course_module.requests.get')
@ddt.data(
"toy",
"simple",
"conditional_and_poll",
"conditional",
"self_assessment",
"graphic_slider_tool",
"test_exam_registration",
"word_cloud",
"pure_xblock",
)
@XBlock.register_temp_plugin(PureXBlock, 'pure')
def test_export_roundtrip(self, course_dir, mock_get):
# Patch network calls to retrieve the textbook TOC
mock_get.return_value.text = dedent("""
<?xml version="1.0"?><table_of_contents>
<entry page="5" page_label="ii" name="Table of Contents"/>
</table_of_contents>
""").strip()
root_dir = path(self.temp_dir)
print "Copying test course to temp dir {0}".format(root_dir)
data_dir = path(DATA_DIR)
shutil.copytree(data_dir / course_dir, root_dir / course_dir)
print "Starting import"
initial_import = XMLModuleStore(root_dir, source_dirs=[course_dir], xblock_mixins=(XModuleMixin,))
courses = initial_import.get_courses()
self.assertEquals(len(courses), 1)
initial_course = courses[0]
# export to the same directory--that way things like the custom_tags/ folder
# will still be there.
print "Starting export"
file_system = OSFS(root_dir)
initial_course.runtime.export_fs = file_system.makeopendir(course_dir)
root = lxml.etree.Element('root')
initial_course.add_xml_to_node(root)
with initial_course.runtime.export_fs.open('course.xml', 'w') as course_xml:
lxml.etree.ElementTree(root).write(course_xml)
print "Starting second import"
second_import = XMLModuleStore(root_dir, source_dirs=[course_dir], xblock_mixins=(XModuleMixin,))
courses2 = second_import.get_courses()
self.assertEquals(len(courses2), 1)
exported_course = courses2[0]
print "Checking course equality"
# HACK: filenames change when changing file formats
# during imports from old-style courses. Ignore them.
strip_filenames(initial_course)
strip_filenames(exported_course)
self.assertTrue(blocks_are_equivalent(initial_course, exported_course))
self.assertEquals(initial_course.id, exported_course.id)
course_id = initial_course.id
print "Checking key equality"
self.assertItemsEqual(
initial_import.modules[course_id].keys(),
second_import.modules[course_id].keys()
)
print "Checking module equality"
for location in initial_import.modules[course_id].keys():
print("Checking", location)
self.assertTrue(blocks_are_equivalent(
initial_import.modules[course_id][location],
second_import.modules[course_id][location]
))
class TestEdxJsonEncoder(unittest.TestCase):
"""
Tests for xml_exporter.EdxJSONEncoder
"""
def setUp(self):
super(TestEdxJsonEncoder, self).setUp()
self.encoder = EdxJSONEncoder()
class OffsetTZ(tzinfo):
"""A timezone with non-None utcoffset"""
def utcoffset(self, _dt):
return timedelta(hours=4)
self.offset_tz = OffsetTZ()
class NullTZ(tzinfo):
"""A timezone with None as its utcoffset"""
def utcoffset(self, _dt):
return None
self.null_utc_tz = NullTZ()
def test_encode_location(self):
loc = Location('org', 'course', 'run', 'category', 'name', None)
self.assertEqual(loc.to_deprecated_string(), self.encoder.default(loc))
loc = Location('org', 'course', 'run', 'category', 'name', 'version')
self.assertEqual(loc.to_deprecated_string(), self.encoder.default(loc))
def test_encode_naive_datetime(self):
self.assertEqual(
"2013-05-03T10:20:30.000100",
self.encoder.default(datetime(2013, 5, 3, 10, 20, 30, 100))
)
self.assertEqual(
"2013-05-03T10:20:30",
self.encoder.default(datetime(2013, 5, 3, 10, 20, 30))
)
def test_encode_utc_datetime(self):
self.assertEqual(
"2013-05-03T10:20:30+00:00",
self.encoder.default(datetime(2013, 5, 3, 10, 20, 30, 0, pytz.UTC))
)
self.assertEqual(
"2013-05-03T10:20:30+04:00",
self.encoder.default(datetime(2013, 5, 3, 10, 20, 30, 0, self.offset_tz))
)
self.assertEqual(
"2013-05-03T10:20:30Z",
self.encoder.default(datetime(2013, 5, 3, 10, 20, 30, 0, self.null_utc_tz))
)
def test_fallthrough(self):
with self.assertRaises(TypeError):
self.encoder.default(None)
with self.assertRaises(TypeError):
self.encoder.default({})
class ConvertExportFormat(unittest.TestCase):
"""
Tests converting between export formats.
"""
def setUp(self):
""" Common setup. """
super(ConvertExportFormat, self).setUp()
# Directory for expanding all the test archives
self.temp_dir = mkdtemp()
self.addCleanup(shutil.rmtree, self.temp_dir)
# Directory where new archive will be created
self.result_dir = path(self.temp_dir) / uuid.uuid4().hex
os.mkdir(self.result_dir)
# Expand all the test archives and store their paths.
self.data_dir = path(__file__).realpath().parent / 'data'
self._version0_nodrafts = None
self._version1_nodrafts = None
self._version0_drafts = None
self._version1_drafts = None
self._version1_drafts_extra_branch = None
self._no_version = None
@property
def version0_nodrafts(self):
"lazily expand this"
if self._version0_nodrafts is None:
self._version0_nodrafts = self._expand_archive('Version0_nodrafts.tar.gz')
return self._version0_nodrafts
@property
def version1_nodrafts(self):
"lazily expand this"
if self._version1_nodrafts is None:
self._version1_nodrafts = self._expand_archive('Version1_nodrafts.tar.gz')
return self._version1_nodrafts
@property
def version0_drafts(self):
"lazily expand this"
if self._version0_drafts is None:
self._version0_drafts = self._expand_archive('Version0_drafts.tar.gz')
return self._version0_drafts
@property
def version1_drafts(self):
"lazily expand this"
if self._version1_drafts is None:
self._version1_drafts = self._expand_archive('Version1_drafts.tar.gz')
return self._version1_drafts
@property
def version1_drafts_extra_branch(self):
"lazily expand this"
if self._version1_drafts_extra_branch is None:
self._version1_drafts_extra_branch = self._expand_archive('Version1_drafts_extra_branch.tar.gz')
return self._version1_drafts_extra_branch
@property
def no_version(self):
"lazily expand this"
if self._no_version is None:
self._no_version = self._expand_archive('NoVersionNumber.tar.gz')
return self._no_version
def _expand_archive(self, name):
""" Expand archive into a directory and return the directory. """
target = path(self.temp_dir) / uuid.uuid4().hex
os.mkdir(target)
with tarfile.open(self.data_dir / name) as tar_file:
tar_file.extractall(path=target)
return target
def test_no_version(self):
""" Test error condition of no version number specified. """
errstring = "unknown version"
with self.assertRaisesRegexp(ValueError, errstring):
convert_between_versions(self.no_version, self.result_dir)
def test_no_published(self):
""" Test error condition of a version 1 archive with no published branch. """
errstring = "version 1 archive must contain a published branch"
no_published = self._expand_archive('Version1_nopublished.tar.gz')
with self.assertRaisesRegexp(ValueError, errstring):
convert_between_versions(no_published, self.result_dir)
def test_empty_course(self):
""" Test error condition of a version 1 archive with no published branch. """
errstring = "source archive does not have single course directory at top level"
empty_course = self._expand_archive('EmptyCourse.tar.gz')
with self.assertRaisesRegexp(ValueError, errstring):
convert_between_versions(empty_course, self.result_dir)
def test_convert_to_1_nodrafts(self):
"""
Test for converting from version 0 of export format to version 1 in a course with no drafts.
"""
self._verify_conversion(self.version0_nodrafts, self.version1_nodrafts)
def test_convert_to_1_drafts(self):
"""
Test for converting from version 0 of export format to version 1 in a course with drafts.
"""
self._verify_conversion(self.version0_drafts, self.version1_drafts)
def test_convert_to_0_nodrafts(self):
"""
Test for converting from version 1 of export format to version 0 in a course with no drafts.
"""
self._verify_conversion(self.version1_nodrafts, self.version0_nodrafts)
def test_convert_to_0_drafts(self):
"""
Test for converting from version 1 of export format to version 0 in a course with drafts.
"""
self._verify_conversion(self.version1_drafts, self.version0_drafts)
def test_convert_to_0_extra_branch(self):
"""
Test for converting from version 1 of export format to version 0 in a course
with drafts and an extra branch.
"""
self._verify_conversion(self.version1_drafts_extra_branch, self.version0_drafts)
def test_equality_function(self):
"""
Check equality function returns False for unequal directories.
"""
self.assertFalse(directories_equal(self.version1_nodrafts, self.version0_nodrafts))
self.assertFalse(directories_equal(self.version1_drafts_extra_branch, self.version1_drafts))
def test_version_0(self):
"""
Check that get_version correctly identifies a version 0 archive (old format).
"""
self.assertEqual(0, self._version_test(self.version0_nodrafts))
def test_version_1(self):
"""
Check that get_version correctly identifies a version 1 archive (new format).
"""
self.assertEqual(1, self._version_test(self.version1_nodrafts))
def test_version_missing(self):
"""
Check that get_version returns None if no version number is specified,
and the archive is not version 0.
"""
self.assertIsNone(self._version_test(self.no_version))
def _version_test(self, archive_dir):
"""
Helper function for version tests.
"""
root = os.listdir(archive_dir)
course_directory = archive_dir / root[0]
return get_version(course_directory)
def _verify_conversion(self, source_archive, comparison_archive):
"""
Helper function for conversion tests.
"""
convert_between_versions(source_archive, self.result_dir)
self.assertTrue(directories_equal(self.result_dir, comparison_archive))
| agpl-3.0 | -5,525,782,953,217,438,000 | 34.018088 | 108 | 0.642857 | false |
archatas/whoosh | whoosh/lang/lovins.py | 1 | 12657 | """This module implements the Lovins stemming algorithm. Use the ``stem()``
function::
stemmed_word = stem(word)
"""
from whoosh.util.collections2 import defaultdict
# Conditions
def A(base):
# A No restrictions on stem
return True
def B(base):
# B Minimum stem length = 3
return len(base) > 2
def C(base):
# C Minimum stem length = 4
return len(base) > 3
def D(base):
# D Minimum stem length = 5
return len(base) > 4
def E(base):
# E Do not remove ending after e
return base[-1] != "e"
def F(base):
# F Minimum stem length = 3 and do not remove ending after e
return len(base) > 2 and base[-1] != "e"
def G(base):
# G Minimum stem length = 3 and remove ending only after f
return len(base) > 2 and base[-1] == "f"
def H(base):
# H Remove ending only after t or ll
c1, c2 = base[-2:]
return c2 == "t" or (c2 == "l" and c1 == "l")
def I(base):
# I Do not remove ending after o or e
c = base[-1]
return c != "o" and c != "e"
def J(base):
# J Do not remove ending after a or e
c = base[-1]
return c != "a" and c != "e"
def K(base):
# K Minimum stem length = 3 and remove ending only after l, i or u*e
c = base[-1]
cc = base[-3]
return len(base) > 2 and (c == "l" or c == "i" or (c == "e" and cc == "u"))
def L(base):
# L Do not remove ending after u, x or s, unless s follows o
c1, c2 = base[-2:]
return c2 != "u" and c2 != "x" and (c2 != "s" or c1 == "o")
def M(base):
# M Do not remove ending after a, c, e or m
c = base[-1]
return c != "a" and c!= "c" and c != "e" and c != "m"
def N(base):
# N Minimum stem length = 4 after s**, elsewhere = 3
return len(base) > 3 or (len(base) == 3 and base[-1] != "s")
def O(base):
# O Remove ending only after l or i
c = base[-1]
return c == "l" or c == "i"
def P(base):
# P Do not remove ending after c
return base[-1] != "c"
def Q(base):
# Q Minimum stem length = 3 and do not remove ending after l or n
c = base[-1]
return len(base) > 2 and (c != "l" and c != "n")
def R(base):
# R Remove ending only after n or r
c = base[-1]
return c == "n" or c == "r"
def S(base):
# S Remove ending only after dr or t, unless t follows t
l2 = base[-2]
return l2 == "rd" or (base[-1] == "t" and l2 != "tt")
def T(base):
# T Remove ending only after s or t, unless t follows o
c1, c2 = base[-2:]
return c2 == "s" or (c2 == "t" and c1 != "o")
def U(base):
# U Remove ending only after l, m, n or r
c = base[-1]
return c == "l" or c == "m" or c == "n" or c == "r"
def V(base):
# V Remove ending only after c
return base[-1] == "c"
def W(base):
# W Do not remove ending after s or u
c = base[-1]
return c != "s" and c != "u"
def X(base):
# X Remove ending only after l, i or u*e
c = base[-1]
cc = base[-3]
return c == "l" or c == "i" or (c == "e" and cc == "u")
def Y(base):
# Y Remove ending only after in
return base[-2:] == "in"
def Z(base):
# Z Do not remove ending after f
return base[-1] != "f"
def a(base):
# a Remove ending only after d, f, ph, th, l, er, or, es or t
c = base[-1]
l2 = base[-2:]
return (c == "d" or c == "f" or l2 == "ph" or l2 == "th" or c == "l"
or l2 == "er" or l2 == "or" or l2 == "es" or c == "t")
def b(base):
# b Minimum stem length = 3 and do not remove ending after met or ryst
return len(base) > 2 and not (base.endswith("met")
or base.endswith("ryst"))
def c(base):
# c Remove ending only after l
return base[-1] == "l"
# Endings
m = [None] * 12
m[11] = dict((
("alistically", B),
("arizability", A),
("izationally", B)))
m[10] = dict((
("antialness", A),
("arisations", A),
("arizations", A),
("entialness", A)))
m[9] = dict((
("allically", C),
("antaneous", A),
("antiality", A),
("arisation", A),
("arization", A),
("ationally", B),
("ativeness", A),
("eableness", E),
("entations", A),
("entiality", A),
("entialize", A),
("entiation", A),
("ionalness", A),
("istically", A),
("itousness", A),
("izability", A),
("izational", A)))
m[8] = dict((
("ableness", A),
("arizable", A),
("entation", A),
("entially", A),
("eousness", A),
("ibleness", A),
("icalness", A),
("ionalism", A),
("ionality", A),
("ionalize", A),
("iousness", A),
("izations", A),
("lessness", A)))
m[7] = dict((
("ability", A),
("aically", A),
("alistic", B),
("alities", A),
("ariness", E),
("aristic", A),
("arizing", A),
("ateness", A),
("atingly", A),
("ational", B),
("atively", A),
("ativism", A),
("elihood", E),
("encible", A),
("entally", A),
("entials", A),
("entiate", A),
("entness", A),
("fulness", A),
("ibility", A),
("icalism", A),
("icalist", A),
("icality", A),
("icalize", A),
("ication", G),
("icianry", A),
("ination", A),
("ingness", A),
("ionally", A),
("isation", A),
("ishness", A),
("istical", A),
("iteness", A),
("iveness", A),
("ivistic", A),
("ivities", A),
("ization", F),
("izement", A),
("oidally", A),
("ousness", A)))
m[6] = dict((
("aceous", A),
("acious", B),
("action", G),
("alness", A),
("ancial", A),
("ancies", A),
("ancing", B),
("ariser", A),
("arized", A),
("arizer", A),
("atable", A),
("ations", B),
("atives", A),
("eature", Z),
("efully", A),
("encies", A),
("encing", A),
("ential", A),
("enting", C),
("entist", A),
("eously", A),
("ialist", A),
("iality", A),
("ialize", A),
("ically", A),
("icance", A),
("icians", A),
("icists", A),
("ifully", A),
("ionals", A),
("ionate", D),
("ioning", A),
("ionist", A),
("iously", A),
("istics", A),
("izable", E),
("lessly", A),
("nesses", A),
("oidism", A)))
m[5] = dict((
("acies", A),
("acity", A),
("aging", B),
("aical", A),
("alist", A),
("alism", B),
("ality", A),
("alize", A),
("allic", b),
("anced", B),
("ances", B),
("antic", C),
("arial", A),
("aries", A),
("arily", A),
("arity", B),
("arize", A),
("aroid", A),
("ately", A),
("ating", I),
("ation", B),
("ative", A),
("ators", A),
("atory", A),
("ature", E),
("early", Y),
("ehood", A),
("eless", A),
("elily", A),
("ement", A),
("enced", A),
("ences", A),
("eness", E),
("ening", E),
("ental", A),
("ented", C),
("ently", A),
("fully", A),
("ially", A),
("icant", A),
("ician", A),
("icide", A),
("icism", A),
("icist", A),
("icity", A),
("idine", I),
("iedly", A),
("ihood", A),
("inate", A),
("iness", A),
("ingly", B),
("inism", J),
("inity", c),
("ional", A),
("ioned", A),
("ished", A),
("istic", A),
("ities", A),
("itous", A),
("ively", A),
("ivity", A),
("izers", F),
("izing", F),
("oidal", A),
("oides", A),
("otide", A),
("ously", A)))
m[4] = dict((
("able", A),
("ably", A),
("ages", B),
("ally", B),
("ance", B),
("ancy", B),
("ants", B),
("aric", A),
("arly", K),
("ated", I),
("ates", A),
("atic", B),
("ator", A),
("ealy", Y),
("edly", E),
("eful", A),
("eity", A),
("ence", A),
("ency", A),
("ened", E),
("enly", E),
("eous", A),
("hood", A),
("ials", A),
("ians", A),
("ible", A),
("ibly", A),
("ical", A),
("ides", L),
("iers", A),
("iful", A),
("ines", M),
("ings", N),
("ions", B),
("ious", A),
("isms", B),
("ists", A),
("itic", H),
("ized", F),
("izer", F),
("less", A),
("lily", A),
("ness", A),
("ogen", A),
("ward", A),
("wise", A),
("ying", B),
("yish", A)))
m[3] = dict((
("acy", A),
("age", B),
("aic", A),
("als", b),
("ant", B),
("ars", O),
("ary", F),
("ata", A),
("ate", A),
("eal", Y),
("ear", Y),
("ely", E),
("ene", E),
("ent", C),
("ery", E),
("ese", A),
("ful", A),
("ial", A),
("ian", A),
("ics", A),
("ide", L),
("ied", A),
("ier", A),
("ies", P),
("ily", A),
("ine", M),
("ing", N),
("ion", Q),
("ish", C),
("ism", B),
("ist", A),
("ite", a),
("ity", A),
("ium", A),
("ive", A),
("ize", F),
("oid", A),
("one", R),
("ous", A)))
m[2] = dict((
("ae", A),
("al", b),
("ar", X),
("as", B),
("ed", E),
("en", F),
("es", E),
("ia", A),
("ic", A),
("is", A),
("ly", B),
("on", S),
("or", T),
("um", U),
("us", V),
("yl", R),
("s'", A),
("'s", A)))
m[1] = dict((
("a", A),
("e", A),
("i", A),
("o", A),
("s", W),
("y", B)))
def remove_ending(word):
length = len(word)
el = 11
while el > 0:
if length - el > 1:
ending = word[length-el:]
cond = m[el].get(ending)
if cond:
base = word[:length-el]
if cond(base):
return base
el -= 1
return word
_endings = (("iev", "ief"),
("uct", "uc"),
("iev", "ief"),
("uct", "uc"),
("umpt", "um"),
("rpt", "rb"),
("urs", "ur"),
("istr", "ister"),
("metr", "meter"),
("olv", "olut"),
("ul", "l", "aoi"),
("bex", "bic"),
("dex", "dic"),
("pex", "pic"),
("tex", "tic"),
("ax", "ac"),
("ex", "ec"),
("ix", "ic"),
("lux", "luc"),
("uad", "uas"),
("vad", "vas"),
("cid", "cis"),
("lid", "lis"),
("erid", "eris"),
("pand", "pans"),
("end", "ens", "s"),
("ond", "ons"),
("lud", "lus"),
("rud", "rus"),
("her", "hes", "pt"),
("mit", "mis"),
("ent", "ens", "m"),
("ert", "ers"),
("et", "es", "n"),
("yt", "ys"),
("yz", "ys"))
# Hash the ending rules by the last letter of the target ending
_endingrules = defaultdict(list)
for rule in _endings:
_endingrules[rule[0][-1]].append(rule)
_doubles = frozenset(("dd", "gg", "ll", "mm", "nn", "pp", "rr", "ss", "tt"))
def fix_ending(word):
if word[-2:] in _doubles:
word = word[:-1]
for endingrule in _endingrules[word[-1]]:
target, newend = endingrule[:2]
if word.endswith(target):
if len(endingrule) > 2:
exceptafter = endingrule[2]
c = word[0-(len(target)+1)]
if c in exceptafter: return word
return word[:0-len(target)] + newend
return word
def stem(word):
"""Returns the stemmed version of the argument string.
"""
return fix_ending(remove_ending(word))
| apache-2.0 | 895,048,491,556,534,700 | 22.352399 | 79 | 0.372521 | false |
johnchronis/exareme | exareme-tools/madis/src/functions/vtable/mergeunion.py | 3 | 2587 | import setpath
import vtbase
import functions
import heapq
### Classic stream iterator
registered=True
class MergeUnion(vtbase.VT):
def BestIndex(self, constraints, orderbys):
return (None, 0, None, True, 1000)
def VTiter(self, *parsedArgs,**envars):
largs, dictargs = self.full_parse(parsedArgs)
if len(largs) < 1:
raise functions.OperatorError(__name__.rsplit('.')[-1],"Not defined union tables ")
streams = str(largs[0]).split(",")
if len(streams) < 2:
raise functions.OperatorError(__name__.rsplit('.')[-1],"Union tables must be more than one ")
cursors = []
execs = []
for stream in streams:
cursors.append(envars['db'].cursor())
execs.append(cursors[-1].execute("select * from " + str(stream) + ";"))
comparedcursor = str(cursors[0].getdescriptionsafe())
# for cursor in cursors:
# if str(cursor.getdescriptionsafe()) != comparedcursor:
# raise functions.OperatorError(__name__.rsplit('.')[-1],"Union tables with different schemas ")
if 'cols' in dictargs:
try:
cols = int(dictargs['cols'])
except ValueError:
try:
cols = [y[0] for y in cursors[0].getdescriptionsafe()].index(dictargs['cols'])
except ValueError:
raise functions.OperatorError(__name__.rsplit('.')[-1],"Column name does not exists ")
else:
cols=0
if cols >= len(cursors[0].getdescriptionsafe()):
raise functions.OperatorError(__name__.rsplit('.')[-1],"Column position does not exists ")
for x in range(0, len(streams)):
execs[x] = ((v[cols], v) for v in execs[x])
try:
yield list(cursors[0].getdescriptionsafe())
except StopIteration:
try:
raise
finally:
try:
for cur in cursors:
cur.close()
except:
pass
for _, v in heapq.merge(*execs):
yield v
def Source():
return vtbase.VTGenerator(MergeUnion)
if not ('.' in __name__):
"""
This is needed to be able to test the function, put it at the end of every
new function you create
"""
import sys
import setpath
from functions import *
testfunction()
if __name__ == "__main__":
reload(sys)
sys.setdefaultencoding('utf-8')
import doctest
doctest.testmod()
| mit | -5,264,859,311,742,906,000 | 30.168675 | 112 | 0.545033 | false |
astropy/astropy | astropy/visualization/scripts/fits2bitmap.py | 8 | 7364 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
import os
from astropy.visualization.mpl_normalize import simple_norm
from astropy import log
from astropy.io.fits import getdata
def fits2bitmap(filename, ext=0, out_fn=None, stretch='linear',
power=1.0, asinh_a=0.1, min_cut=None, max_cut=None,
min_percent=None, max_percent=None, percent=None,
cmap='Greys_r'):
"""
Create a bitmap file from a FITS image, applying a stretching
transform between minimum and maximum cut levels and a matplotlib
colormap.
Parameters
----------
filename : str
The filename of the FITS file.
ext : int
FITS extension name or number of the image to convert. The
default is 0.
out_fn : str
The filename of the output bitmap image. The type of bitmap
is determined by the filename extension (e.g. '.jpg', '.png').
The default is a PNG file with the same name as the FITS file.
stretch : {'linear', 'sqrt', 'power', log', 'asinh'}
The stretching function to apply to the image. The default is
'linear'.
power : float, optional
The power index for ``stretch='power'``. The default is 1.0.
asinh_a : float, optional
For ``stretch='asinh'``, the value where the asinh curve
transitions from linear to logarithmic behavior, expressed as a
fraction of the normalized image. Must be in the range between
0 and 1. The default is 0.1.
min_cut : float, optional
The pixel value of the minimum cut level. Data values less than
``min_cut`` will set to ``min_cut`` before stretching the image.
The default is the image minimum. ``min_cut`` overrides
``min_percent``.
max_cut : float, optional
The pixel value of the maximum cut level. Data values greater
than ``min_cut`` will set to ``min_cut`` before stretching the
image. The default is the image maximum. ``max_cut`` overrides
``max_percent``.
min_percent : float, optional
The percentile value used to determine the pixel value of
minimum cut level. The default is 0.0. ``min_percent``
overrides ``percent``.
max_percent : float, optional
The percentile value used to determine the pixel value of
maximum cut level. The default is 100.0. ``max_percent``
overrides ``percent``.
percent : float, optional
The percentage of the image values used to determine the pixel
values of the minimum and maximum cut levels. The lower cut
level will set at the ``(100 - percent) / 2`` percentile, while
the upper cut level will be set at the ``(100 + percent) / 2``
percentile. The default is 100.0. ``percent`` is ignored if
either ``min_percent`` or ``max_percent`` is input.
cmap : str
The matplotlib color map name. The default is 'Greys_r'.
"""
import matplotlib
import matplotlib.cm as cm
import matplotlib.image as mimg
# __main__ gives ext as a string
try:
ext = int(ext)
except ValueError:
pass
try:
image = getdata(filename, ext)
except Exception as e:
log.critical(e)
return 1
if image.ndim != 2:
log.critical(f'data in FITS extension {ext} is not a 2D array')
if out_fn is None:
out_fn = os.path.splitext(filename)[0]
if out_fn.endswith('.fits'):
out_fn = os.path.splitext(out_fn)[0]
out_fn += '.png'
# explicitly define the output format
out_format = os.path.splitext(out_fn)[1][1:]
try:
cm.get_cmap(cmap)
except ValueError:
log.critical(f'{cmap} is not a valid matplotlib colormap name.')
return 1
norm = simple_norm(image, stretch=stretch, power=power, asinh_a=asinh_a,
min_cut=min_cut, max_cut=max_cut,
min_percent=min_percent, max_percent=max_percent,
percent=percent)
mimg.imsave(out_fn, norm(image), cmap=cmap, origin='lower',
format=out_format)
log.info(f'Saved file to {out_fn}.')
def main(args=None):
import argparse
parser = argparse.ArgumentParser(
description='Create a bitmap file from a FITS image.')
parser.add_argument('-e', '--ext', metavar='hdu', default=0,
help='Specify the HDU extension number or name '
'(Default is 0).')
parser.add_argument('-o', metavar='filename', type=str, default=None,
help='Filename for the output image (Default is a '
'PNG file with the same name as the FITS file).')
parser.add_argument('--stretch', type=str, default='linear',
help='Type of image stretching ("linear", "sqrt", '
'"power", "log", or "asinh") (Default is "linear").')
parser.add_argument('--power', type=float, default=1.0,
help='Power index for "power" stretching (Default is '
'1.0).')
parser.add_argument('--asinh_a', type=float, default=0.1,
help='The value in normalized image where the asinh '
'curve transitions from linear to logarithmic '
'behavior (used only for "asinh" stretch) '
'(Default is 0.1).')
parser.add_argument('--min_cut', type=float, default=None,
help='The pixel value of the minimum cut level '
'(Default is the image minimum).')
parser.add_argument('--max_cut', type=float, default=None,
help='The pixel value of the maximum cut level '
'(Default is the image maximum).')
parser.add_argument('--min_percent', type=float, default=None,
help='The percentile value used to determine the '
'minimum cut level (Default is 0).')
parser.add_argument('--max_percent', type=float, default=None,
help='The percentile value used to determine the '
'maximum cut level (Default is 100).')
parser.add_argument('--percent', type=float, default=None,
help='The percentage of the image values used to '
'determine the pixel values of the minimum and '
'maximum cut levels (Default is 100).')
parser.add_argument('--cmap', metavar='colormap_name', type=str,
default='Greys_r', help='matplotlib color map name '
'(Default is "Greys_r").')
parser.add_argument('filename', nargs='+',
help='Path to one or more FITS files to convert')
args = parser.parse_args(args)
for filename in args.filename:
fits2bitmap(filename, ext=args.ext, out_fn=args.o,
stretch=args.stretch, min_cut=args.min_cut,
max_cut=args.max_cut, min_percent=args.min_percent,
max_percent=args.max_percent, percent=args.percent,
power=args.power, asinh_a=args.asinh_a, cmap=args.cmap)
| bsd-3-clause | -626,384,243,583,890,300 | 43.902439 | 78 | 0.580255 | false |
dragorosson/heat | heat/engine/resources/openstack/heat/structured_config.py | 3 | 8968 | #
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import collections
import copy
import functools
import six
from heat.common import exception
from heat.common.i18n import _
from heat.engine import constraints
from heat.engine import properties
from heat.engine.resources.openstack.heat import software_config as sc
from heat.engine.resources.openstack.heat import software_deployment as sd
from heat.engine import rsrc_defn
from heat.engine import support
class StructuredConfig(sc.SoftwareConfig):
"""A resource which has same logic with OS::Heat::SoftwareConfig.
This resource is like OS::Heat::SoftwareConfig except that the config
property is represented by a Map rather than a String.
This is useful for configuration tools which use YAML or JSON as their
configuration syntax. The resulting configuration is transferred,
stored and returned by the software_configs API as parsed JSON.
"""
support_status = support.SupportStatus(version='2014.1')
PROPERTIES = (
GROUP,
CONFIG,
OPTIONS,
INPUTS,
OUTPUTS
) = (
sc.SoftwareConfig.GROUP,
sc.SoftwareConfig.CONFIG,
sc.SoftwareConfig.OPTIONS,
sc.SoftwareConfig.INPUTS,
sc.SoftwareConfig.OUTPUTS
)
properties_schema = {
GROUP: sc.SoftwareConfig.properties_schema[GROUP],
OPTIONS: sc.SoftwareConfig.properties_schema[OPTIONS],
INPUTS: sc.SoftwareConfig.properties_schema[INPUTS],
OUTPUTS: sc.SoftwareConfig.properties_schema[OUTPUTS],
CONFIG: properties.Schema(
properties.Schema.MAP,
_('Map representing the configuration data structure which will '
'be serialized to JSON format.')
)
}
class StructuredDeployment(sd.SoftwareDeployment):
"""A resource which has same logic with OS::Heat::SoftwareDeployment.
A deployment resource like OS::Heat::SoftwareDeployment, but which
performs input value substitution on the config defined by a
OS::Heat::StructuredConfig resource.
Some configuration tools have no concept of inputs, so the input value
substitution needs to occur in the deployment resource. An example of this
is the JSON metadata consumed by the cfn-init tool.
Where the config contains {get_input: input_name} this will be substituted
with the value of input_name in this resource's input_values. If get_input
needs to be passed through to the substituted configuration then a
different input_key property value can be specified.
"""
support_status = support.SupportStatus(version='2014.1')
PROPERTIES = (
CONFIG,
SERVER,
INPUT_VALUES,
DEPLOY_ACTIONS,
NAME,
SIGNAL_TRANSPORT,
INPUT_KEY,
INPUT_VALUES_VALIDATE
) = (
sd.SoftwareDeployment.CONFIG,
sd.SoftwareDeployment.SERVER,
sd.SoftwareDeployment.INPUT_VALUES,
sd.SoftwareDeployment.DEPLOY_ACTIONS,
sd.SoftwareDeployment.NAME,
sd.SoftwareDeployment.SIGNAL_TRANSPORT,
'input_key',
'input_values_validate'
)
_sd_ps = sd.SoftwareDeployment.properties_schema
properties_schema = {
CONFIG: _sd_ps[CONFIG],
SERVER: _sd_ps[SERVER],
INPUT_VALUES: _sd_ps[INPUT_VALUES],
DEPLOY_ACTIONS: _sd_ps[DEPLOY_ACTIONS],
SIGNAL_TRANSPORT: _sd_ps[SIGNAL_TRANSPORT],
NAME: _sd_ps[NAME],
INPUT_KEY: properties.Schema(
properties.Schema.STRING,
_('Name of key to use for substituting inputs during deployment'),
default='get_input',
),
INPUT_VALUES_VALIDATE: properties.Schema(
properties.Schema.STRING,
_('Perform a check on the input values passed to verify that '
'each required input has a corresponding value. '
'When the property is set to STRICT and no value is passed, '
'an exception is raised.'),
default='LAX',
constraints=[
constraints.AllowedValues(['LAX', 'STRICT']),
],
)
}
def empty_config(self):
return {}
def _build_derived_config(self, action, source,
derived_inputs, derived_options):
cfg = source.get(sc.SoftwareConfig.CONFIG)
input_key = self.properties[self.INPUT_KEY]
check_input_val = self.properties[self.INPUT_VALUES_VALIDATE]
inputs = dict((i['name'], i['value']) for i in derived_inputs)
return self.parse(inputs, input_key, cfg, check_input_val)
@staticmethod
def get_input_key_arg(snippet, input_key):
if len(snippet) != 1:
return None
fn_name, fn_arg = next(six.iteritems(snippet))
if (fn_name == input_key and isinstance(fn_arg, six.string_types)):
return fn_arg
@staticmethod
def get_input_key_value(fn_arg, inputs, check_input_val='LAX'):
if check_input_val == 'STRICT' and fn_arg not in inputs:
raise exception.UserParameterMissing(key=fn_arg)
return inputs.get(fn_arg)
@staticmethod
def parse(inputs, input_key, snippet, check_input_val='LAX'):
parse = functools.partial(
StructuredDeployment.parse,
inputs,
input_key,
check_input_val=check_input_val)
if isinstance(snippet, collections.Mapping):
fn_arg = StructuredDeployment.get_input_key_arg(snippet, input_key)
if fn_arg is not None:
return StructuredDeployment.get_input_key_value(fn_arg, inputs,
check_input_val
)
return dict((k, parse(v)) for k, v in six.iteritems(snippet))
elif (not isinstance(snippet, six.string_types) and
isinstance(snippet, collections.Iterable)):
return [parse(v) for v in snippet]
else:
return snippet
class StructuredDeploymentGroup(sd.SoftwareDeploymentGroup):
PROPERTIES = (
SERVERS,
CONFIG,
INPUT_VALUES,
DEPLOY_ACTIONS,
NAME,
SIGNAL_TRANSPORT,
INPUT_KEY,
INPUT_VALUES_VALIDATE,
) = (
sd.SoftwareDeploymentGroup.SERVERS,
sd.SoftwareDeploymentGroup.CONFIG,
sd.SoftwareDeploymentGroup.INPUT_VALUES,
sd.SoftwareDeploymentGroup.DEPLOY_ACTIONS,
sd.SoftwareDeploymentGroup.NAME,
sd.SoftwareDeploymentGroup.SIGNAL_TRANSPORT,
StructuredDeployment.INPUT_KEY,
StructuredDeployment.INPUT_VALUES_VALIDATE
)
_sds_ps = sd.SoftwareDeploymentGroup.properties_schema
properties_schema = {
SERVERS: _sds_ps[SERVERS],
CONFIG: _sds_ps[CONFIG],
INPUT_VALUES: _sds_ps[INPUT_VALUES],
DEPLOY_ACTIONS: _sds_ps[DEPLOY_ACTIONS],
SIGNAL_TRANSPORT: _sds_ps[SIGNAL_TRANSPORT],
NAME: _sds_ps[NAME],
INPUT_KEY: StructuredDeployment.properties_schema[INPUT_KEY],
INPUT_VALUES_VALIDATE:
StructuredDeployment.properties_schema[INPUT_VALUES_VALIDATE],
}
def build_resource_definition(self, res_name, res_defn):
props = copy.deepcopy(res_defn)
servers = props.pop(self.SERVERS)
props[StructuredDeployment.SERVER] = servers.get(res_name)
return rsrc_defn.ResourceDefinition(res_name,
'OS::Heat::StructuredDeployment',
props, None)
class StructuredDeployments(StructuredDeploymentGroup):
deprecation_msg = _('The resource OS::Heat::StructuredDeployments is '
'deprecated and usage is discouraged. Please use '
'resource OS::Heat::StructuredDeploymentGroup '
'instead.')
support_status = support.SupportStatus(status=support.DEPRECATED,
message=deprecation_msg,
version='2014.2')
def resource_mapping():
return {
'OS::Heat::StructuredConfig': StructuredConfig,
'OS::Heat::StructuredDeployment': StructuredDeployment,
'OS::Heat::StructuredDeploymentGroup': StructuredDeploymentGroup,
'OS::Heat::StructuredDeployments': StructuredDeployments,
}
| apache-2.0 | -8,884,169,042,803,992,000 | 35.16129 | 79 | 0.634478 | false |
vmendez/DIRAC | Resources/Storage/DIPStorage.py | 1 | 17425 | """ DIPStorage class is the client of the DIRAC Storage Element.
The following methods are available in the Service interface
getMetadata()
get()
getDir()
put()
putDir()
remove()
"""
__RCSID__ = "$Id$"
import os
import random
from DIRAC import gLogger, S_OK, S_ERROR
from DIRAC.Resources.Storage.Utilities import checkArgumentFormat
from DIRAC.Resources.Storage.StorageBase import StorageBase
from DIRAC.Core.Utilities.Pfn import pfnparse, pfnunparse
from DIRAC.Core.DISET.TransferClient import TransferClient
from DIRAC.Core.DISET.RPCClient import RPCClient
from DIRAC.Core.Utilities.File import getSize
class DIPStorage( StorageBase ):
def __init__( self, storageName, parameters ):
"""
"""
StorageBase.__init__( self, storageName, parameters )
self.pluginName = 'DIP'
self.log = gLogger.getSubLogger( "DIPStorage", True )
# Several ports can be specified as comma separated list, choose
# randomly one of those ports
ports = self.protocolParameters['Port'].split( ',' )
random.shuffle( ports )
self.protocolParameters['Port'] = ports[0]
pathDict = dict( self.protocolParameters )
pathDict['Path'] = self.basePath
result = pfnunparse( pathDict )
if result['OK']:
self.url = result['Value']
self.checkSum = "CheckSum"
self.isok = True
def setParameters( self, parameters ):
""" Applying extra storage parameters
"""
StorageBase.setParameters( self, parameters )
if "CheckSum" in parameters and parameters['CheckSum'].lower() in ['0', 'no', 'false', 'off']:
self.checkSum = "NoCheckSum"
return S_OK()
#############################################################
#
# These are the methods for file manipulation
#
def exists( self, path ):
""" Check if the given path exists. The 'path' variable can be a string or a list of strings.
"""
res = checkArgumentFormat( path )
if not res['OK']:
return res
urls = res['Value']
successful = {}
failed = {}
serviceClient = RPCClient( self.url )
for url in urls:
gLogger.debug( "DIPStorage.exists: Determining existence of %s." % url )
res = serviceClient.exists( url )
if res['OK']:
successful[url] = res['Value']
else:
failed[url] = res['Message']
resDict = {'Failed':failed, 'Successful':successful}
return S_OK( resDict )
def putFile( self, path, sourceSize = 0 ):
"""Put a file to the physical storage
"""
res = checkArgumentFormat( path )
if not res['OK']:
return res
urls = res['Value']
successful = {}
failed = {}
for dest_url, src_file in urls.items():
gLogger.debug( "DIPStorage.putFile: Executing transfer of %s to %s" % ( src_file, dest_url ) )
res = self.__putFile( src_file, dest_url )
if res['OK']:
successful[dest_url] = res['Value']
else:
failed[dest_url] = res['Message']
resDict = {'Failed':failed, 'Successful':successful}
return S_OK( resDict )
def __putFile( self, src_file, dest_url ):
res = pfnparse( src_file )
if not res['OK']:
return res
localCache = False
srcDict = res['Value']
if srcDict['Protocol'] in ['dips', 'dip']:
localCache = True
srcSEURL = srcDict['Protocol'] + '://' + srcDict['Host'] + ':' + srcDict['Port'] + srcDict['WSUrl']
transferClient = TransferClient( srcSEURL )
res = transferClient.receiveFile( srcDict['FileName'], os.path.join( srcDict['Path'], srcDict['FileName'] ) )
if not res['OK']:
return res
src_file = srcDict['FileName']
if not os.path.exists( src_file ):
errStr = "DIPStorage.__putFile: The source local file does not exist."
gLogger.error( errStr, src_file )
return S_ERROR( errStr )
sourceSize = getSize( src_file )
if sourceSize == -1:
errStr = "DIPStorage.__putFile: Failed to get file size."
gLogger.error( errStr, src_file )
return S_ERROR( errStr )
transferClient = TransferClient( self.url )
res = transferClient.sendFile( src_file, dest_url, token = self.checkSum )
if localCache:
os.unlink( src_file )
if res['OK']:
return S_OK( sourceSize )
else:
return res
def getFile( self, path, localPath = False ):
"""Get a local copy in the current directory of a physical file specified by its path
"""
res = checkArgumentFormat( path )
if not res['OK']:
return res
urls = res['Value']
successful = {}
failed = {}
for src_url in urls:
fileName = os.path.basename( src_url )
if localPath:
dest_file = "%s/%s" % ( localPath, fileName )
else:
dest_file = "%s/%s" % ( os.getcwd(), fileName )
gLogger.debug( "DIPStorage.getFile: Executing transfer of %s to %s" % ( src_url, dest_file ) )
res = self.__getFile( src_url, dest_file )
if res['OK']:
successful[src_url] = res['Value']
else:
failed[src_url] = res['Message']
resDict = {'Failed':failed, 'Successful':successful}
return S_OK( resDict )
def __getFile( self, src_url, dest_file ):
transferClient = TransferClient( self.url )
res = transferClient.receiveFile( dest_file, src_url, token = self.checkSum )
if not res['OK']:
return res
if not os.path.exists( dest_file ):
errStr = "DIPStorage.__getFile: The destination local file does not exist."
gLogger.error( errStr, dest_file )
return S_ERROR( errStr )
destSize = getSize( dest_file )
if destSize == -1:
errStr = "DIPStorage.__getFile: Failed to get the local file size."
gLogger.error( errStr, dest_file )
return S_ERROR( errStr )
return S_OK( destSize )
def removeFile( self, path ):
"""Remove physically the file specified by its path
"""
res = checkArgumentFormat( path )
if not res['OK']:
return res
urls = res['Value']
if not len( urls ) > 0:
return S_ERROR( "DIPStorage.removeFile: No surls supplied." )
successful = {}
failed = {}
serviceClient = RPCClient( self.url )
for url in urls:
gLogger.debug( "DIPStorage.removeFile: Attempting to remove %s." % url )
res = serviceClient.remove( url, '' )
if res['OK']:
successful[url] = True
else:
failed[url] = res['Message']
resDict = {'Failed':failed, 'Successful':successful}
return S_OK( resDict )
def isFile( self, path ):
""" Determine whether the path is a directory
"""
res = checkArgumentFormat( path )
if not res['OK']:
return res
urls = res['Value']
successful = {}
failed = {}
gLogger.debug( "DIPStorage.isFile: Attempting to determine whether %s paths are files." % len( urls ) )
serviceClient = RPCClient( self.url )
for url in urls:
res = serviceClient.getMetadata( url )
if res['OK']:
if res['Value']['Exists']:
if res['Value']['Type'] == 'File':
gLogger.debug( "DIPStorage.isFile: Successfully obtained metadata for %s." % url )
successful[url] = True
else:
successful[url] = False
else:
failed[url] = 'File does not exist'
else:
gLogger.error( "DIPStorage.isFile: Failed to get metadata for url",
"%s: %s" % ( url, res['Message'] ) )
failed[url] = res['Message']
resDict = {'Failed':failed, 'Successful':successful}
return S_OK( resDict )
def getFileSize( self, path ):
""" Get size of supplied files
"""
res = checkArgumentFormat( path )
if not res['OK']:
return res
urls = res['Value']
successful = {}
failed = {}
gLogger.debug( "DIPStorage.getFileSize: Attempting to obtain size for %s files." % len( urls ) )
res = self.getFileMetadata( urls )
if not res['OK']:
return res
for url, urlDict in res['Value']['Successful'].items():
if urlDict['Exists']:
successful[url] = urlDict['Size']
else:
failed[url] = 'File does not exist'
for url, error in res['Value']['Failed'].items():
failed[url] = error
resDict = {'Failed':failed, 'Successful':successful}
return S_OK( resDict )
def getFileMetadata( self, path ):
""" Get metadata associated to the file
"""
res = checkArgumentFormat( path )
if not res['OK']:
return res
urls = res['Value']
successful = {}
failed = {}
gLogger.debug( "DIPStorage.getFileMetadata: Attempting to obtain metadata for %s files." % len( urls ) )
serviceClient = RPCClient( self.url )
for url in urls:
pfn = url
if url.find( self.url ) == 0:
pfn = url[ ( len( self.url ) ):]
res = serviceClient.getMetadata( pfn )
if res['OK']:
if res['Value']['Exists']:
if res['Value']['Type'] == 'File':
gLogger.debug( "DIPStorage.getFileMetadata: Successfully obtained metadata for %s." % url )
successful[url] = res['Value']
else:
failed[url] = 'Supplied path is not a file'
else:
failed[url] = 'File does not exist'
else:
gLogger.error( "DIPStorage.getFileMetadata: Failed to get metadata for url",
"%s: %s" % ( url, res['Message'] ) )
failed[url] = res['Message']
resDict = {'Failed':failed, 'Successful':successful}
return S_OK( resDict )
#############################################################
#
# These are the methods for directory manipulation
#
def listDirectory( self, path ):
""" List the contents of the directory
"""
res = checkArgumentFormat( path )
if not res['OK']:
return res
urls = res['Value']
successful = {}
failed = {}
gLogger.debug( "DIPStorage.listDirectory: Attempting to list %s directories." % len( urls ) )
serviceClient = RPCClient( self.url )
for url in urls:
res = serviceClient.listDirectory( url, 'l' )
if not res['OK']:
failed[url] = res['Message']
else:
files = {}
subDirs = {}
for subPath, pathDict in res['Value'].items():
if pathDict['Type'] == 'File':
files[subPath] = pathDict
elif pathDict['Type'] == 'Directory':
subDirs[subPath] = pathDict
successful[url] = {}
successful[url]['SubDirs'] = subDirs
successful[url]['Files'] = files
resDict = {'Failed':failed, 'Successful':successful}
return S_OK( resDict )
def isDirectory( self, path ):
""" Determine whether the path is a directory
"""
res = checkArgumentFormat( path )
if not res['OK']:
return res
urls = res['Value']
successful = {}
failed = {}
gLogger.debug( "DIPStorage.isDirectory: Attempting to determine whether %s paths are directories." % len( urls ) )
serviceClient = RPCClient( self.url )
for url in urls:
res = serviceClient.getMetadata( url )
if res['OK']:
if res['Value']['Exists']:
if res['Value']['Type'] == 'Directory':
gLogger.debug( "DIPStorage.isDirectory: Successfully obtained metadata for %s." % url )
successful[url] = True
else:
successful[url] = False
else:
failed[url] = 'Path does not exist'
else:
gLogger.error( "DIPStorage.isDirectory: Failed to get metadata for url",
"%s: %s" % ( url, res['Message'] ) )
failed[url] = res['Message']
resDict = {'Failed':failed, 'Successful':successful}
return S_OK( resDict )
def getDirectorySize( self, path ):
""" Get the size of the contents of the directory
"""
res = checkArgumentFormat( path )
if not res['OK']:
return res
urls = res['Value']
successful = {}
failed = {}
gLogger.debug( "DIPStorage.isDirectory: Attempting to determine whether %s paths are directories." % len( urls ) )
serviceClient = RPCClient( self.url )
for url in urls:
res = serviceClient.getDirectorySize( url )
if not res['OK']:
failed[url] = res['Message']
else:
successful[url] = {'Files':0, 'Size':res['Value'], 'SubDirs':0}
resDict = {'Failed':failed, 'Successful':successful}
return S_OK( resDict )
def getDirectoryMetadata( self, path ):
""" Get metadata associated to the directory
"""
res = checkArgumentFormat( path )
if not res['OK']:
return res
urls = res['Value']
successful = {}
failed = {}
gLogger.debug( "DIPStorage.getFileMetadata: Attempting to obtain metadata for %s directories." % len( urls ) )
serviceClient = RPCClient( self.url )
for url in urls:
res = serviceClient.getMetadata( url )
if res['OK']:
if res['Value']['Exists']:
if res['Value']['Type'] == 'Directory':
res['Value']['Directory'] = True
gLogger.debug( "DIPStorage.getFileMetadata: Successfully obtained metadata for %s." % url )
successful[url] = res['Value']
else:
failed[url] = 'Supplied path is not a directory'
else:
failed[url] = 'Directory does not exist'
else:
gLogger.error( "DIPStorage.getFileMetadata: Failed to get metadata for url",
"%s: %s" % ( url, res['Message'] ) )
failed[url] = res['Message']
resDict = {'Failed':failed, 'Successful':successful}
return S_OK( resDict )
def createDirectory( self, path ):
""" Create the remote directory
"""
res = checkArgumentFormat( path )
if not res['OK']:
return res
urls = res['Value']
successful = {}
failed = {}
gLogger.debug( "DIPStorage.createDirectory: Attempting to create %s directories." % len( urls ) )
serviceClient = RPCClient( self.url )
for url in urls:
res = serviceClient.createDirectory( url )
if res['OK']:
gLogger.debug( "DIPStorage.createDirectory: Successfully created directory on storage: %s" % url )
successful[url] = True
else:
gLogger.error( "DIPStorage.createDirectory: Failed to create directory on storage.", "%s: %s" % ( url, res['Message'] ) )
failed[url] = res['Message']
resDict = {'Failed':failed, 'Successful':successful}
return S_OK( resDict )
def putDirectory( self, path ):
""" Put a local directory to the physical storage together with all its files and subdirectories.
"""
res = checkArgumentFormat( path )
if not res['OK']:
return res
urls = res['Value']
successful = {}
failed = {}
gLogger.debug( "DIPStorage.putDirectory: Attemping to put %s directories to remote storage." % len( urls ) )
transferClient = TransferClient( self.url )
for destDir, sourceDir in urls.items():
tmpList = os.listdir( sourceDir )
sourceFiles = [ "%s/%s" % ( sourceDir, x ) for x in tmpList ]
res = transferClient.sendBulk( sourceFiles, destDir )
if res['OK']:
successful[destDir] = {'Files':0, 'Size':0}
else:
failed[destDir] = res['Message']
resDict = {'Failed':failed, 'Successful':successful}
return S_OK( resDict )
def removeDirectory( self, path, recursive = False ):
""" Remove a directory from the storage together with all its files and subdirectories.
"""
res = checkArgumentFormat( path )
if not res['OK']:
return res
urls = res['Value']
successful = {}
failed = {}
gLogger.debug( "DIPStorage.removeDirectory: Attempting to remove %s directories." % len( urls ) )
serviceClient = RPCClient( self.url )
for url in urls:
res = serviceClient.removeDirectory( url, '' )
if res['OK']:
gLogger.debug( "DIPStorage.removeDirectory: Successfully removed directory on storage: %s" % url )
successful[url] = {'FilesRemoved':0, 'SizeRemoved':0}
else:
gLogger.error( "DIPStorage.removeDirectory: Failed to remove directory from storage.", "%s: %s" % ( url, res['Message'] ) )
failed[url] = res['Message']
resDict = {'Failed':failed, 'Successful':successful}
return S_OK( resDict )
def getDirectory( self, path, localPath = False ):
""" Get a local copy in the current directory of a physical file specified by its path
"""
res = checkArgumentFormat( path )
if not res['OK']:
return res
urls = res['Value']
failed = {}
successful = {}
gLogger.debug( "DIPStorage.getDirectory: Attempting to get local copies of %s directories." % len( urls ) )
transferClient = TransferClient( self.url )
for src_dir in urls:
if localPath:
dest_dir = localPath
else:
dest_dir = os.getcwd()
if not os.path.exists( dest_dir ):
os.mkdir( dest_dir )
res = transferClient.receiveBulk( dest_dir, src_dir )
if res['OK']:
gLogger.debug( "DIPStorage.getDirectory: Successfully got local copy of %s" % src_dir )
successful[src_dir] = {'Files':0, 'Size':0}
else:
gLogger.error( "DIPStorage.getDirectory: Failed to get entire directory.", src_dir )
failed[src_dir] = res['Message']
resDict = {'Failed':failed, 'Successful':successful}
return S_OK( resDict )
| gpl-3.0 | -1,757,257,447,987,831,000 | 34.488798 | 131 | 0.596671 | false |
ctuning/ck-env | soft/tool.scons/customize.py | 3 | 1322 | #
# Collective Knowledge (individual environment - setup)
#
# See CK LICENSE.txt for licensing details
# See CK COPYRIGHT.txt for copyright details
#
# Developer: Grigori Fursin, [email protected], http://fursin.net
#
import os
##############################################################################
# customize directories to automatically find and register software
def dirs(i):
return {'return':0}
##############################################################################
# parse software version
def parse_version(i):
lst=i['output']
ver=''
for x in lst:
j=x.find('engine: v')
if j>=0:
ver=x[j+9:].strip()
j=ver.find('.rel')
if j>0:
ver=ver[:j]
break
return {'return':0, 'version':ver}
##############################################################################
# setup environment
def setup(i):
s=''
cus=i['customize']
env=i['env']
host_d=i.get('host_os_dict',{})
target_d=i.get('host_os_dict',{})
tbits=target_d.get('bits','')
winh=host_d.get('windows_base','')
# Get paths
fp=cus['full_path']
ep=cus['env_prefix']
pb=os.path.dirname(fp)
p2=os.path.dirname(pb)
env[ep]=p2
env[ep+'_BIN']=pb
return {'return':0, 'bat':s}
| bsd-3-clause | -6,188,456,156,614,589,000 | 19.030303 | 78 | 0.474281 | false |
kakkojc/DIE | DIE/Lib/DataParser.py | 9 | 8086 |
from DIE.Lib.DataPluginBase import DataPluginBase
import logging
import idaapi
import idc
import sys
try:
# TODO: Is singleton really required here? python modules are basically singleton by design
from yapsy.PluginManager import PluginManagerSingleton
except ImportError, err:
idaapi.msg("Yapsy not installed (please use 'pip install yapsy' or equivalent : %s\n", err)
# TODO: does this not kill IDA? Instead, the error should be propagated to the plugin initialization.
sys.exit(1)
# TODO: better use new style classes
class DataParser():
"""
Data parser is a class for parsing raw runtime values.
"""
def __init__(self):
self.logger = logging.getLogger(__name__)
# type_parsers is a dictionary that maps type names to the parsers that support them.
# this is done in order to speedup parser lookups and avoid iterating the entire parser list
self.type_parsers = {}
self.pManager = PluginManagerSingleton.get() # Plugin manager
def set_plugin_path(self, plugin_path):
"""
Set the data parser plugin path
@param plugin_path: full path of data-parser root directory
@return:
"""
self.pluginLocation = plugin_path
self.pManager.setPluginPlaces([self.pluginLocation]) # Set plugin directory
self.pManager.setCategoriesFilter({"ParserPlugins": DataPluginBase})
self.logger.info("Plugin path is set to %s", plugin_path)
def loadPlugins(self):
"""
Load\Reload all plugins found in the plugin location.
"""
self.logger.info("Loading Plugins from %s", self.pluginLocation)
self.pManager.collectPlugins()
all_plugins = self.pManager.getAllPlugins()
if len(all_plugins) == 0:
idaapi.msg("Warning - No Plugins were loaded!\n")
self.logger.error("No plugins were loaded")
for pluginInfo in all_plugins:
# TODO: Validate plugins!
self.logger.info("Loading plugin %s", pluginInfo.name)
if pluginInfo.name == "headers":
# headers is an illegal plugin name (see get_parser_list)
continue
# Set a type name normalizing function
pluginInfo.plugin_object.initPlugin(self.typeName_norm)
self.pManager.activatePluginByName(pluginInfo.name)
# Add type to type_parser dict for quick lookups
suported_types = pluginInfo.plugin_object.getSupportedTypes()
if suported_types is not None:
self.addTypeParser(suported_types, pluginInfo.plugin_object)
def deactivatePlugin(self, pluginInfo):
"""
Deactivate a plugin
@param pluginInfo: deactivated plugin plugininfo object
@return:
"""
# Deactivate plugin
self.pManager.deactivatePluginByName(pluginInfo.name)
# Remove from type_parsers
for stype in self.type_parsers:
if pluginInfo.plugin_object in self.type_parsers[stype]:
self.type_parsers[stype].remove(pluginInfo.plugin_object)
def activatePlugin(self, pluginInfo):
"""
Activate a plugin
@param pluginInfo: activated plugin plugininfo object
@return:
"""
# Run plugin initialization
pluginInfo.plugin_object.initPlugin(self.typeName_norm)
# Activate Plugin
self.pManager.activatePluginByName(pluginInfo.name)
def get_parser_list(self):
"""
Query available parsers
@return: Returns a dictionary of all available parsers and their data.
The dictionary key is the parser name, and value is a list of available data in the following format:
Plugin1 -> [Plugin1 Description, Plugin1 Version,
Plugin2 -> [Plugin2 Description, Plugin2 Version, ...]
A special key named "headers" represents the type names of the returned columns
"""
parser_list = {}
# TODO: use classes or named tuples
parser_list["headers"] = ["Description", "Version", "State", "Author"]
for plugin in self.pManager.getAllPlugins():
parser_list[plugin.name] = [plugin.description, plugin.version, plugin.is_activated, plugin.author]
return parser_list
def addTypeParser(self, supported_types, parser_plugin):
"""
Add an entry to the type_parser dictionary
@param supported_types: a list of supported type strings
@param parser_plugin: parser plugin object
"""
for stype, sparams in supported_types:
if stype in self.type_parsers:
self.type_parsers[stype].append(parser_plugin)
else:
self.type_parsers[stype] = [parser_plugin]
def ParseData(self, rawData, type=None, loc=None, custom_parser=None):
"""
Parse Data
@param rawData: The raw data to be parsed
@param type: The data type (If unknown should be None)
@param loc: raw value (memory) location
@param custom_parser: A custom parser to use.
@return: A list of ParsedValue objects (containing the guessed\exact parsed values)
"""
parsedValues = []
try:
# If custom parser was defined
if custom_parser is not None:
custom_parser.run(rawData, type, match_override=True)
ret_vals = custom_parser.getParsedValues()
parsedValues.extend(ret_vals)
return parsedValues
# if type is known, try to look it up in the parser_type dict
if type is not None:
type_name = idaapi.print_tinfo('', 0, 0, idaapi.PRTYPE_1LINE, type, '', '')
type_name = self.typeName_norm(type_name)
if type_name in self.type_parsers:
for parser_plugin in self.type_parsers[type_name]:
parser_plugin.run(rawData, type)
ret_vals = parser_plugin.getParsedValues()
parsedValues.extend(ret_vals)
return parsedValues
# Otherwise, the entire plugin list has to be iterated
for pluginInfo in self.pManager.getAllPlugins():
if pluginInfo.is_activated:
pluginInfo.plugin_object.run(rawData, type)
ret_vals = pluginInfo.plugin_object.getParsedValues()
parsedValues.extend(ret_vals)
return parsedValues
except Exception as ex:
self.logger.exception("Error while parsing data: %s", ex)
def typeName_norm(self, type_name):
"""
Builds and returns a normalized type string.
Normalization deletes all space characters and changes to uppercase.
@param type_name: Type name string (e.g "CONST CHAR *")
@return: a normalized type name
"""
if not type_name:
return None
type_name = type_name.upper()
type_name = type_name.replace(" ", "")
return type_name
### a global dataParser object.
### This should basically be enough in order to create a singleton object, since of the way Python modules are
### loaded (reloading of a module should never be preformed..)
# TODO: Read from configuration file
#config = DieConfig.get_config()
idaapi.msg("[2] Loading data parsers\n")
#_dataParser = DataParser("C:\Users\yanivb\Desktop\Workspace\Projects\DIE\Plugins\DataParsers")
#_dataParser = DataParser(config.data_parser_path)
_dataParser = DataParser()
# Just in case this will someday be a full singleton implementation
def getParser():
"""
Get a parser instance
@return: DataParser instance
"""
return _dataParser
| mit | -7,869,885,376,413,775,000 | 34.423423 | 118 | 0.608583 | false |
doubi-workshop/grpc | src/python/grpcio/grpc/beta/_connectivity_channel.py | 34 | 6379 | # Copyright 2015, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Affords a connectivity-state-listenable channel."""
import threading
import time
from grpc._adapter import _low
from grpc._adapter import _types
from grpc.beta import interfaces
from grpc.framework.foundation import callable_util
_CHANNEL_SUBSCRIPTION_CALLBACK_ERROR_LOG_MESSAGE = (
'Exception calling channel subscription callback!')
_LOW_CONNECTIVITY_STATE_TO_CHANNEL_CONNECTIVITY = {
state: connectivity for state, connectivity in zip(
_types.ConnectivityState, interfaces.ChannelConnectivity)
}
class ConnectivityChannel(object):
def __init__(self, low_channel):
self._lock = threading.Lock()
self._low_channel = low_channel
self._polling = False
self._connectivity = None
self._try_to_connect = False
self._callbacks_and_connectivities = []
self._delivering = False
def _deliveries(self, connectivity):
callbacks_needing_update = []
for callback_and_connectivity in self._callbacks_and_connectivities:
callback, callback_connectivity = callback_and_connectivity
if callback_connectivity is not connectivity:
callbacks_needing_update.append(callback)
callback_and_connectivity[1] = connectivity
return callbacks_needing_update
def _deliver(self, initial_connectivity, initial_callbacks):
connectivity = initial_connectivity
callbacks = initial_callbacks
while True:
for callback in callbacks:
callable_util.call_logging_exceptions(
callback, _CHANNEL_SUBSCRIPTION_CALLBACK_ERROR_LOG_MESSAGE,
connectivity)
with self._lock:
callbacks = self._deliveries(self._connectivity)
if callbacks:
connectivity = self._connectivity
else:
self._delivering = False
return
def _spawn_delivery(self, connectivity, callbacks):
delivering_thread = threading.Thread(
target=self._deliver, args=(connectivity, callbacks,))
delivering_thread.start()
self._delivering = True
# TODO(issue 3064): Don't poll.
def _poll_connectivity(self, low_channel, initial_try_to_connect):
try_to_connect = initial_try_to_connect
low_connectivity = low_channel.check_connectivity_state(try_to_connect)
with self._lock:
self._connectivity = _LOW_CONNECTIVITY_STATE_TO_CHANNEL_CONNECTIVITY[
low_connectivity]
callbacks = tuple(
callback for callback, unused_but_known_to_be_none_connectivity
in self._callbacks_and_connectivities)
for callback_and_connectivity in self._callbacks_and_connectivities:
callback_and_connectivity[1] = self._connectivity
if callbacks:
self._spawn_delivery(self._connectivity, callbacks)
completion_queue = _low.CompletionQueue()
while True:
low_channel.watch_connectivity_state(
low_connectivity, time.time() + 0.2, completion_queue, None)
event = completion_queue.next()
with self._lock:
if not self._callbacks_and_connectivities and not self._try_to_connect:
self._polling = False
self._connectivity = None
completion_queue.shutdown()
break
try_to_connect = self._try_to_connect
self._try_to_connect = False
if event.success or try_to_connect:
low_connectivity = low_channel.check_connectivity_state(try_to_connect)
with self._lock:
self._connectivity = _LOW_CONNECTIVITY_STATE_TO_CHANNEL_CONNECTIVITY[
low_connectivity]
if not self._delivering:
callbacks = self._deliveries(self._connectivity)
if callbacks:
self._spawn_delivery(self._connectivity, callbacks)
def subscribe(self, callback, try_to_connect):
with self._lock:
if not self._callbacks_and_connectivities and not self._polling:
polling_thread = threading.Thread(
target=self._poll_connectivity,
args=(self._low_channel, bool(try_to_connect)))
polling_thread.start()
self._polling = True
self._callbacks_and_connectivities.append([callback, None])
elif not self._delivering and self._connectivity is not None:
self._spawn_delivery(self._connectivity, (callback,))
self._try_to_connect |= bool(try_to_connect)
self._callbacks_and_connectivities.append(
[callback, self._connectivity])
else:
self._try_to_connect |= bool(try_to_connect)
self._callbacks_and_connectivities.append([callback, None])
def unsubscribe(self, callback):
with self._lock:
for index, (subscribed_callback, unused_connectivity) in enumerate(
self._callbacks_and_connectivities):
if callback == subscribed_callback:
self._callbacks_and_connectivities.pop(index)
break
def low_channel(self):
return self._low_channel
| bsd-3-clause | 7,152,827,984,435,939,000 | 39.891026 | 79 | 0.701991 | false |
orgito/ansible | lib/ansible/modules/cloud/google/gcp_container_cluster_facts.py | 2 | 16573 | #!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Copyright (C) 2017 Google
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
# ----------------------------------------------------------------------------
#
# *** AUTO GENERATED CODE *** AUTO GENERATED CODE ***
#
# ----------------------------------------------------------------------------
#
# This file is automatically generated by Magic Modules and manual
# changes will be clobbered when the file is regenerated.
#
# Please read more about how to change this file at
# https://www.github.com/GoogleCloudPlatform/magic-modules
#
# ----------------------------------------------------------------------------
from __future__ import absolute_import, division, print_function
__metaclass__ = type
################################################################################
# Documentation
################################################################################
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ["preview"],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: gcp_container_cluster_facts
description:
- Gather facts for GCP Cluster
short_description: Gather facts for GCP Cluster
version_added: 2.8
author: Google Inc. (@googlecloudplatform)
requirements:
- python >= 2.6
- requests >= 2.18.4
- google-auth >= 1.3.0
options:
zone:
description:
- The zone where the cluster is deployed.
required: true
extends_documentation_fragment: gcp
'''
EXAMPLES = '''
- name: a cluster facts
gcp_container_cluster_facts:
zone: us-central1-a
project: test_project
auth_kind: serviceaccount
service_account_file: "/tmp/auth.pem"
'''
RETURN = '''
items:
description: List of items
returned: always
type: complex
contains:
name:
description:
- The name of this cluster. The name must be unique within this project and
zone, and can be up to 40 characters. Must be Lowercase letters, numbers,
and hyphens only. Must start with a letter. Must end with a number or a letter.
returned: success
type: str
description:
description:
- An optional description of this cluster.
returned: success
type: str
initialNodeCount:
description:
- The number of nodes to create in this cluster. You must ensure that your Compute
Engine resource quota is sufficient for this number of instances. You must
also have available firewall and routes quota. For requests, this field should
only be used in lieu of a "nodePool" object, since this configuration (along
with the "nodeConfig") will be used to create a "NodePool" object with an
auto-generated name. Do not use this and a nodePool at the same time.
returned: success
type: int
nodeConfig:
description:
- Parameters used in creating the cluster's nodes.
- For requests, this field should only be used in lieu of a "nodePool" object,
since this configuration (along with the "initialNodeCount") will be used
to create a "NodePool" object with an auto-generated name. Do not use this
and a nodePool at the same time. For responses, this field will be populated
with the node configuration of the first node pool. If unspecified, the defaults
are used.
returned: success
type: complex
contains:
machineType:
description:
- The name of a Google Compute Engine machine type (e.g.
- n1-standard-1). If unspecified, the default machine type is n1-standard-1.
returned: success
type: str
diskSizeGb:
description:
- Size of the disk attached to each node, specified in GB. The smallest
allowed disk size is 10GB. If unspecified, the default disk size is 100GB.
returned: success
type: int
oauthScopes:
description:
- The set of Google API scopes to be made available on all of the node VMs
under the "default" service account.
- 'The following scopes are recommended, but not required, and by default
are not included: U(https://www.googleapis.com/auth/compute) is required
for mounting persistent storage on your nodes.'
- U(https://www.googleapis.com/auth/devstorage.read_only) is required for
communicating with gcr.io (the Google Container Registry).
- If unspecified, no scopes are added, unless Cloud Logging or Cloud Monitoring
are enabled, in which case their required scopes will be added.
returned: success
type: list
serviceAccount:
description:
- The Google Cloud Platform Service Account to be used by the node VMs.
If no Service Account is specified, the "default" service account is used.
returned: success
type: str
metadata:
description:
- The metadata key/value pairs assigned to instances in the cluster.
- 'Keys must conform to the regexp [a-zA-Z0-9-_]+ and be less than 128 bytes
in length. These are reflected as part of a URL in the metadata server.
Additionally, to avoid ambiguity, keys must not conflict with any other
metadata keys for the project or be one of the four reserved keys: "instance-template",
"kube-env", "startup-script", and "user-data" Values are free-form strings,
and only have meaning as interpreted by the image running in the instance.
The only restriction placed on them is that each value''s size must be
less than or equal to 32 KB.'
- The total size of all keys and values must be less than 512 KB.
- 'An object containing a list of "key": value pairs.'
- 'Example: { "name": "wrench", "mass": "1.3kg", "count": "3" }.'
returned: success
type: dict
imageType:
description:
- The image type to use for this node. Note that for a given image type,
the latest version of it will be used.
returned: success
type: str
labels:
description:
- 'The map of Kubernetes labels (key/value pairs) to be applied to each
node. These will added in addition to any default label(s) that Kubernetes
may apply to the node. In case of conflict in label keys, the applied
set may differ depending on the Kubernetes version -- it''s best to assume
the behavior is undefined and conflicts should be avoided. For more information,
including usage and the valid values, see: U(http://kubernetes.io/v1.1/docs/user-guide/labels.html)
An object containing a list of "key": value pairs.'
- 'Example: { "name": "wrench", "mass": "1.3kg", "count": "3" }.'
returned: success
type: dict
localSsdCount:
description:
- The number of local SSD disks to be attached to the node.
- 'The limit for this value is dependant upon the maximum number of disks
available on a machine per zone. See: U(https://cloud.google.com/compute/docs/disks/local-ssd#local_ssd_limits)
for more information.'
returned: success
type: int
tags:
description:
- The list of instance tags applied to all nodes. Tags are used to identify
valid sources or targets for network firewalls and are specified by the
client during cluster or node pool creation. Each tag within the list
must comply with RFC1035.
returned: success
type: list
preemptible:
description:
- 'Whether the nodes are created as preemptible VM instances. See: U(https://cloud.google.com/compute/docs/instances/preemptible)
for more inforamtion about preemptible VM instances.'
returned: success
type: bool
masterAuth:
description:
- The authentication information for accessing the master endpoint.
returned: success
type: complex
contains:
username:
description:
- The username to use for HTTP basic authentication to the master endpoint.
returned: success
type: str
password:
description:
- The password to use for HTTP basic authentication to the master endpoint.
Because the master endpoint is open to the Internet, you should create
a strong password.
returned: success
type: str
clusterCaCertificate:
description:
- Base64-encoded public certificate that is the root of trust for the cluster.
returned: success
type: str
clientCertificate:
description:
- Base64-encoded public certificate used by clients to authenticate to the
cluster endpoint.
returned: success
type: str
clientKey:
description:
- Base64-encoded private key used by clients to authenticate to the cluster
endpoint.
returned: success
type: str
loggingService:
description:
- 'The logging service the cluster should use to write logs. Currently available
options: logging.googleapis.com - the Google Cloud Logging service.'
- none - no logs will be exported from the cluster.
- if left as an empty string,logging.googleapis.com will be used.
returned: success
type: str
monitoringService:
description:
- The monitoring service the cluster should use to write metrics.
- 'Currently available options: monitoring.googleapis.com - the Google Cloud
Monitoring service.'
- none - no metrics will be exported from the cluster.
- if left as an empty string, monitoring.googleapis.com will be used.
returned: success
type: str
network:
description:
- The name of the Google Compute Engine network to which the cluster is connected.
If left unspecified, the default network will be used.
- To ensure it exists and it is operations, configure the network using 'gcompute_network'
resource.
returned: success
type: str
clusterIpv4Cidr:
description:
- The IP address range of the container pods in this cluster, in CIDR notation
(e.g. 10.96.0.0/14). Leave blank to have one automatically chosen or specify
a /14 block in 10.0.0.0/8.
returned: success
type: str
addonsConfig:
description:
- Configurations for the various addons available to run in the cluster.
returned: success
type: complex
contains:
httpLoadBalancing:
description:
- Configuration for the HTTP (L7) load balancing controller addon, which
makes it easy to set up HTTP load balancers for services in a cluster.
returned: success
type: complex
contains:
disabled:
description:
- Whether the HTTP Load Balancing controller is enabled in the cluster.
When enabled, it runs a small pod in the cluster that manages the
load balancers.
returned: success
type: bool
horizontalPodAutoscaling:
description:
- Configuration for the horizontal pod autoscaling feature, which increases
or decreases the number of replica pods a replication controller has based
on the resource usage of the existing pods.
returned: success
type: complex
contains:
disabled:
description:
- Whether the Horizontal Pod Autoscaling feature is enabled in the cluster.
When enabled, it ensures that a Heapster pod is running in the cluster,
which is also used by the Cloud Monitoring service.
returned: success
type: bool
subnetwork:
description:
- The name of the Google Compute Engine subnetwork to which the cluster is connected.
returned: success
type: str
location:
description:
- The list of Google Compute Engine locations in which the cluster's nodes should
be located.
returned: success
type: list
endpoint:
description:
- The IP address of this cluster's master endpoint.
- The endpoint can be accessed from the internet at https://username:password@endpoint/
See the masterAuth property of this resource for username and password information.
returned: success
type: str
initialClusterVersion:
description:
- The software version of the master endpoint and kubelets used in the cluster
when it was first created. The version can be upgraded over time.
returned: success
type: str
currentMasterVersion:
description:
- The current software version of the master endpoint.
returned: success
type: str
currentNodeVersion:
description:
- The current version of the node software components. If they are currently
at multiple versions because they're in the process of being upgraded, this
reflects the minimum version of all nodes.
returned: success
type: str
createTime:
description:
- The time the cluster was created, in RFC3339 text format.
returned: success
type: str
nodeIpv4CidrSize:
description:
- The size of the address space on each node for hosting containers.
- This is provisioned from within the container_ipv4_cidr range.
returned: success
type: int
servicesIpv4Cidr:
description:
- The IP address range of the Kubernetes services in this cluster, in CIDR notation
(e.g. 1.2.3.4/29). Service addresses are typically put in the last /16 from
the container CIDR.
returned: success
type: str
currentNodeCount:
description:
- The number of nodes currently in the cluster.
returned: success
type: int
expireTime:
description:
- The time the cluster will be automatically deleted in RFC3339 text format.
returned: success
type: str
zone:
description:
- The zone where the cluster is deployed.
returned: success
type: str
'''
################################################################################
# Imports
################################################################################
from ansible.module_utils.gcp_utils import navigate_hash, GcpSession, GcpModule, GcpRequest
import json
################################################################################
# Main
################################################################################
def main():
module = GcpModule(
argument_spec=dict(
zone=dict(required=True, type='str')
)
)
if not module.params['scopes']:
module.params['scopes'] = ['https://www.googleapis.com/auth/cloud-platform']
items = fetch_list(module, collection(module))
if items.get('clusters'):
items = items.get('clusters')
else:
items = []
return_value = {
'items': items
}
module.exit_json(**return_value)
def collection(module):
return "https://container.googleapis.com/v1/projects/{project}/zones/{zone}/clusters".format(**module.params)
def fetch_list(module, link):
auth = GcpSession(module, 'container')
response = auth.get(link)
return return_if_object(module, response)
def return_if_object(module, response):
# If not found, return nothing.
if response.status_code == 404:
return None
# If no content, return nothing.
if response.status_code == 204:
return None
try:
module.raise_for_status(response)
result = response.json()
except getattr(json.decoder, 'JSONDecodeError', ValueError) as inst:
module.fail_json(msg="Invalid JSON response with error: %s" % inst)
if navigate_hash(result, ['error', 'errors']):
module.fail_json(msg=navigate_hash(result, ['error', 'errors']))
return result
if __name__ == "__main__":
main()
| gpl-3.0 | 7,647,188,523,714,107,000 | 38.087264 | 139 | 0.613649 | false |
cernanalysispreservation/analysis-preservation.cern.ch | cap/modules/auth/views.py | 2 | 5687 | # -*- coding: utf-8 -*-
#
# This file is part of CERN Analysis Preservation Framework.
# Copyright (C) 2016 CERN.
#
# CERN Analysis Preservation Framework is free software; you can redistribute
# it and/or modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 2 of the
# License, or (at your option) any later version.
#
# CERN Analysis Preservation Framework is distributed in the hope that it will
# be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with CERN Analysis Preservation Framework; if not, write to the
# Free Software Foundation, Inc., 59 Temple Place, Suite 330, Boston,
# MA 02111-1307, USA.
#
# In applying this license, CERN does not
# waive the privileges and immunities granted to it by virtue of its status
# as an Intergovernmental Organization or submit itself to any jurisdiction.
#
"""Authentication views for CAP."""
from flask import Blueprint, url_for, current_app, jsonify, \
request, redirect, session, abort
from flask_login import current_user
from invenio_db import db
from invenio_userprofiles.models import UserProfile
from sqlalchemy.orm.attributes import flag_modified
from .config import OAUTH_SERVICES, USER_PROFILE
from .models import OAuth2Token
from .proxies import current_auth
from .utils import _create_or_update_token
from cap.modules.access.utils import login_required
blueprint = Blueprint(
'cap_auth',
__name__,
url_prefix='/auth',
)
@blueprint.route('/connect/<name>')
@login_required
def connect(name):
next_url = request.args.get('next')
ui_flag = request.args.get('ui')
session.update({
'next': next_url,
'ui': ui_flag
})
client = current_auth.create_client(name)
redirect_uri = url_for('cap_auth.authorize', name=name, _external=True)
if not current_app.config['DEBUG']:
redirect_uri = redirect_uri.replace(
"/auth/authorize/",
"/api/auth/authorize/")
# DEV FIX for 'CERN Gitlab' to work locally since you can't register
# 'localhost' redirect_uri for testing
#
# redirect_uri = redirect_uri.replace(":5000", '')
# redirect_uri = redirect_uri.replace("http", 'https')
# redirect_uri = redirect_uri.replace("cern.ch/", 'cern.ch/api/')
return client.authorize_redirect(redirect_uri)
@blueprint.route('/disconnect/<name>')
@login_required
def disconnect(name):
_profile = UserProfile.get_by_userid(current_user.id)
_token = OAuth2Token.get(name=name, user_id=current_user.id)
if _profile and _token:
del _profile.extra_data['services'][name]
flag_modified(_profile, "extra_data")
db.session.delete(_token)
db.session.commit()
return jsonify({'message': 'Disconnected from {} '
'successfully.'.format(name)}), 200
else:
abort(403, "Unable to disconnect from {} service.".format(name))
@blueprint.route('/authorize/<name>')
@login_required
def authorize(name):
ui_flag = session.pop('ui', None)
client = current_auth.create_client(name)
token = client.authorize_access_token()
configs = OAUTH_SERVICES.get(name.upper(), {})
extra_data_method = configs.get('extra_data_method')
# TOFIX Add error handlers for reject, auth errors, etc
extra_data = {}
if extra_data_method:
extra_data = extra_data_method(client, token)
_token = _create_or_update_token(name, token)
_token.extra_data = extra_data
db.session.add(_token)
# Add extra data to user profile.
# If user profile doesn't exist yet, it creates one.
_profile = UserProfile.get_by_userid(current_user.id)
if not _profile:
_profile = UserProfile(user_id=current_user.id)
db.session.add(_profile)
profile_data = get_oauth_profile(name, token=_token, client=client)
if _profile.extra_data:
profile_services = _profile.extra_data.get("services", {})
else:
profile_services = {}
profile_services[name] = profile_data
_profile.extra_data = {"services": profile_services}
flag_modified(_profile, "extra_data")
db.session.commit()
if ui_flag:
if current_app.config['DEBUG']:
redirect_url = "http://localhost:3000/settings/auth/connect"
else:
redirect_url = "/settings/auth/connect"
return redirect(redirect_url)
else:
return jsonify({
"message": "Authorization to {} succeeded".format(name)
}), 200
@blueprint.route('/profile/<name>')
@login_required
def profile(name):
_profile = get_oauth_profile(name)
return jsonify(_profile)
def get_oauth_profile(name, token=None, client=None):
_token = token if token \
else OAuth2Token.get(name=name, user_id=current_user.id)
if not _token:
abort(403, "Your account is not connected to the service")
extra_data = _token.extra_data
_client = client if client else current_auth.create_client(name)
if name == 'orcid':
orcid = extra_data.get('orcid_id')
resp = None
if orcid:
resp = _client.get('/{}/record'.format(orcid),
headers={'Accept': 'application/json'})
else:
resp = _client.get(USER_PROFILE[name]['path'])
try:
res_json = USER_PROFILE[name]['serializer'].dump(resp.json()).data
except AttributeError:
res_json = {}
return res_json
| gpl-2.0 | 1,710,167,918,217,202,000 | 30.949438 | 78 | 0.663443 | false |
qiankunshe/sky_engine | sky/tools/webkitpy/common/net/buildbot/chromiumbuildbot_unittest.py | 24 | 2258 | # Copyright (C) 2013 Google Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import unittest
from webkitpy.common.net.buildbot.chromiumbuildbot import ChromiumBuildBot
class ChromiumBuilderTest(unittest.TestCase):
def test_results_url(self):
builder = ChromiumBuildBot().builder_with_name('WebKit Mac10.8 (dbg)')
self.assertEqual(builder.results_url(),
'https://storage.googleapis.com/chromium-layout-test-archives/WebKit_Mac10_8__dbg_')
def test_accumulated_results_url(self):
builder = ChromiumBuildBot().builder_with_name('WebKit Mac10.8 (dbg)')
self.assertEqual(builder.accumulated_results_url(),
'https://storage.googleapis.com/chromium-layout-test-archives/WebKit_Mac10_8__dbg_/results/layout-test-results')
| bsd-3-clause | 1,381,318,044,048,205,600 | 51.511628 | 137 | 0.756422 | false |
jgillis/casadi | experimental/joel/scpgen_chain_mass.py | 1 | 4034 | #
# This file is part of CasADi.
#
# CasADi -- A symbolic framework for dynamic optimization.
# Copyright (C) 2010 by Joel Andersson, Moritz Diehl, K.U.Leuven. All rights reserved.
#
# CasADi is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 3 of the License, or (at your option) any later version.
#
# CasADi is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with CasADi; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
#
#
from casadi import *
import numpy as NP
import matplotlib.pyplot as plt
"""
Optimal control of a chain mass model
L. Wirsching, H.G. Bock, M. Diehl, Fast NMPC of a chain of masses connected by springs
Computer Aided Control System Design, 2006 IEEE International Conference on Control Applications, 2006
@author Joel Andersson, Milan Vukov, K.U. Leuven 2013
"""
# Start of the chain
pStart = [0., 0., 0.]
# End of the chain in steady state
pEnd = [1., 0., 0.]
# Model parameters
g = 9.81 # Gravitational acceleration [N/kg]
L = 0.033 # Rest length of the springs [m]
D = 1.0 # Spring constant [N/m] (0.1 in Wirsching2006)
m = 0.03 # Ball mass [kg]
# Number of balls
N = 9
# Position and velocities of the masses
p = ssym("p", 3,N+1)
pdot = ssym("pdot", 3,N)
# Initial guess (uniformly distributed and in rest)
pInit = DMatrix.zeros(3,N+1)
pInit[2,:] = linspace(0,1.0,N+1).T
pdotInit = DMatrix.zeros(3,N)
# Control vector (velocity of the last ball)
u = ssym("u",3)
# Forces between all the balls
F = SXMatrix.zeros(3,N+1)
for i in range(N+1):
# Spring vector
if i==0:
d = p[:,0] - pStart
else:
d = p[:,i] - p[:,i-1]
# Length of the spring (add small constant to avoid square root of zero)
L_d = sqrt(inner_prod(d,d) + 1e-10)
# Force acting on ball i+1
F[:,i] = D*(1.0 - L/L_d) * d
# Acceleration of the masses
a = (F[:,1:]-F[:,0:-1])/m
# Add gravity
a[2,:] -= g
# State vector with initial guess as well as differential equations
x = SXMatrix.zeros(0,1)
xInit = DMatrix.zeros(0,1)
f = SXMatrix.zeros(0,1)
for i in range(N):
# Position
f.append(pdot[:,i])
x.append(p[:,i])
xInit.append(pInit[:,i])
# Velocity
f.append(a[:,i])
x.append(pdot[:,i])
xInit.append(pdotInit[:,i])
# Last ball is controlled
f.append(u)
x.append(p[:,N])
xInit.append(pInit[:,N])
# Define the optimal control problem
nx = x.size()
nu = u.size()
# Weighting factors in the objective
alpha = 25 # 1/s2
beta = 1
gamma = 0.01
# Deviation from the end point
dpEnd = p[:,-1]-pEnd
# Cost function
L = alpha * inner_prod(dpEnd,dpEnd) + beta * inner_prod(vec(p),vec(p)) + gamma * inner_prod(u,u)
# Number of shooting intervals
nk = 20
# Time horizon
T = 8.0
# Control bounds
u_max = 1e-3 # 1/s
# ODE function
ffcn = SXFunction(daeIn(x=x,p=u),daeOut(ode=f,quad=L))
# Create an integrator
Ffcn = CVodesIntegrator(ffcn)
Ffcn.setOption("abstol",1e-10) # tolerance
Ffcn.setOption("reltol",1e-10) # tolerance
Ffcn.setOption("tf",T/nk) # final time
Ffcn.init()
# All controls
U = msym("U",3*nk)
# The initial state
X = xInit
# Const function
J = 0
# Build a graph of integrator calls
for k in range(nk):
X,Q,_,_ = Ffcn.call(integratorIn(x0=X,p=U[3*k:3*(k+1)]))
J += Q
# Objective function
F = MXFunction([U],[J])
# Allocate an NLP solver
solver = IpoptSolver(F)
solver.init()
# Set bounds and initial guess
solver.setInput(-u_max, "lbx")
solver.setInput( u_max, "ubx")
solver.setInput( 0., "x0")
# Solve the problem
solver.solve()
# Retrieve the solution
u_opt = NP.array(solver.output("x"))
| lgpl-3.0 | 8,160,932,959,147,517,000 | 23.155689 | 102 | 0.665097 | false |
harish2rb/pyGeoNet | pygeonet_V2_1/pygeonet_slope_curvature.py | 1 | 5910 | import numpy as np
from scipy import stats
import statsmodels.api as sm
import matplotlib.pyplot as plt
import prepare_pygeonet_defaults as defaults
import prepare_pygeonet_inputs as parameters
import pygeonet_rasterio as pyg_rio
import pygeonet_plot as pyg_plt
def compute_dem_slope(filteredDemArray, pixelDemScale):
"""
Compute the slope of the input filtered DEM
:param filteredDemArray: the input filtered dem array
:param pixelDemScale: the dem resolution
:return: the slope array
"""
slopeXArray,slopeYArray = np.gradient(filteredDemArray, pixelDemScale)
slopeDemArray = np.sqrt(slopeXArray**2 + slopeYArray**2)
slopeMagnitudeDemArrayQ = slopeDemArray
slopeMagnitudeDemArrayQ = np.reshape(slopeMagnitudeDemArrayQ,
np.size(slopeMagnitudeDemArrayQ))
slopeMagnitudeDemArrayQ = slopeMagnitudeDemArrayQ[~np.isnan(slopeMagnitudeDemArrayQ)]
# Computation of statistics of slope
print('slope statistics')
print('angle min: {}'.format(np.arctan(np.percentile(slopeMagnitudeDemArrayQ,
0.1))*180/np.pi))
print('angle max: {}'.format(np.arctan(np.percentile(slopeMagnitudeDemArrayQ,
99.9))*180/np.pi))
print('mean slope: {}'.format(np.nanmean(slopeDemArray[:])))
print('stdev slope: {}'.format(np.nanstd(slopeDemArray[:])))
return slopeDemArray
def compute_dem_curvature(demArray, pixelDemScale, curvatureCalcMethod):
"""
Compute the dem curvature using a specified cusrvature method
:param demArray: the input dem array
:param pixelDemScale: the dem resolution
:param curvatureCalcMethod: the curvature method to be used for computing curvature
:return: the curvature dem array, the mean and standard deviation of the curavture
"""
gradXArray, gradYArray = np.gradient(demArray, pixelDemScale)
slopeArrayT = np.sqrt(gradXArray**2 + gradYArray**2)
if curvatureCalcMethod == 'geometric':
#Geometric curvature
print(' using geometric curvature')
gradXArrayT = np.divide(gradXArray,slopeArrayT)
gradYArrayT = np.divide(gradYArray,slopeArrayT)
elif curvatureCalcMethod=='laplacian':
# do nothing..
print(' using laplacian curvature')
gradXArrayT = gradXArray
gradYArrayT = gradYArray
gradGradXArray,tmpy = np.gradient(gradXArrayT,pixelDemScale)
tmpx,gradGradYArray = np.gradient(gradYArrayT,pixelDemScale)
curvatureDemArray = gradGradXArray + gradGradYArray
curvatureDemArray[np.isnan(curvatureDemArray)] = 0
del tmpy, tmpx
# Computation of statistics of curvature
print(' curvature statistics')
tt = curvatureDemArray[~np.isnan(curvatureDemArray[:])]
print(' non-nan curvature cell number: {}'.format(tt.shape[0]))
finiteCurvatureDemList = curvatureDemArray[np.isfinite(curvatureDemArray[:])]
print(' non-nan finite curvature cell number: {}'.format(finiteCurvatureDemList.shape[0]))
curvatureDemMean = np.nanmean(finiteCurvatureDemList)
curvatureDemStdDevn = np.nanstd(finiteCurvatureDemList)
print(' mean curvature: {}'.format(curvatureDemMean))
print(' standard deviation: {}'.format(curvatureDemStdDevn))
return curvatureDemArray, curvatureDemMean, curvatureDemStdDevn
def compute_quantile_quantile_curve(x):
"""
Compute the quantile quantile plot
:param x: the x list
:return: the quantile quantile plot
"""
print 'getting qqplot estimate'
if not hasattr(defaults, 'figureNumber'):
defaults.figureNumber = 0
defaults.figureNumber = defaults.figureNumber + 1
plt.figure(defaults.figureNumber)
res = stats.probplot(x, plot=plt)
res1 = sm.ProbPlot(x, stats.t, fit=True)
print res1
return res
def perform_slope_curvature_computations():
filteredDemArray = pyg_rio.read_geotif_filteredDEM()
# Computing slope
print('computing slope')
slopeDemArray = compute_dem_slope(filteredDemArray, parameters.demPixelScale)
slopeDemArray[np.isnan(filteredDemArray)] = np.nan
# Writing the curvature array
outfilepath = parameters.geonetResultsDir
demName = parameters.demFileName.split('.')[0]
outfilename = demName +'_slope.tif'
pyg_rio.write_geotif_generic(slopeDemArray, outfilepath, outfilename)
# Computing curvature
print('computing curvature')
curvatureDemArray, curvatureDemMean, \
curvatureDemStdDevn = compute_dem_curvature(filteredDemArray,
parameters.demPixelScale,
defaults.curvatureCalcMethod)
curvatureDemArray[np.isnan(filteredDemArray)] = np.nan
# Writing the curvature array
outfilename = demName +'_curvature.tif'
pyg_rio.write_geotif_generic(curvatureDemArray, outfilepath, outfilename)
# plotting the curvature image
if defaults.doPlot == 1:
pyg_plt.raster_plot(curvatureDemArray, 'Curvature DEM')
#*************************************************
# TODO have to add method to automatically compute the threshold
# Compute curvature quantile-quantile curve
# This seems to take a long time ... is commented for now
#print 'computing curvature quantile-quantile curve'
#finiteCurvatureDemList = curvatureDemArray[np.isfinite(curvatureDemArray[:])]
#osm,osr = compute_quantile_quantile_curve(finiteCurvatureDemList)
#print osm[0]
#print osr[0]
thresholdCurvatureQQxx = 1
# have to add method to automatically compute the thresold
# .....
# .....
#*************************************************
| gpl-3.0 | 8,037,581,099,626,046,000 | 42.104478 | 96 | 0.665144 | false |
agconti/njode | env/lib/python2.7/site-packages/oauthlib/common.py | 34 | 13310 | # -*- coding: utf-8 -*-
"""
oauthlib.common
~~~~~~~~~~~~~~
This module provides data structures and utilities common
to all implementations of OAuth.
"""
from __future__ import absolute_import, unicode_literals
import collections
import datetime
import logging
import random
import re
import sys
import time
try:
from urllib import quote as _quote
from urllib import unquote as _unquote
from urllib import urlencode as _urlencode
except ImportError:
from urllib.parse import quote as _quote
from urllib.parse import unquote as _unquote
from urllib.parse import urlencode as _urlencode
try:
import urlparse
except ImportError:
import urllib.parse as urlparse
UNICODE_ASCII_CHARACTER_SET = ('abcdefghijklmnopqrstuvwxyz'
'ABCDEFGHIJKLMNOPQRSTUVWXYZ'
'0123456789')
CLIENT_ID_CHARACTER_SET = (r' !"#$%&\'()*+,-./0123456789:;<=>?@ABCDEFGHIJKLMN'
'OPQRSTUVWXYZ[\\]^_`abcdefghijklmnopqrstuvwxyz{|}')
always_safe = ('ABCDEFGHIJKLMNOPQRSTUVWXYZ'
'abcdefghijklmnopqrstuvwxyz'
'0123456789' '_.-')
log = logging.getLogger('oauthlib')
PY3 = sys.version_info[0] == 3
if PY3:
unicode_type = str
bytes_type = bytes
else:
unicode_type = unicode
bytes_type = str
# 'safe' must be bytes (Python 2.6 requires bytes, other versions allow either)
def quote(s, safe=b'/'):
s = s.encode('utf-8') if isinstance(s, unicode_type) else s
s = _quote(s, safe)
# PY3 always returns unicode. PY2 may return either, depending on whether
# it had to modify the string.
if isinstance(s, bytes_type):
s = s.decode('utf-8')
return s
def unquote(s):
s = _unquote(s)
# PY3 always returns unicode. PY2 seems to always return what you give it,
# which differs from quote's behavior. Just to be safe, make sure it is
# unicode before we return.
if isinstance(s, bytes_type):
s = s.decode('utf-8')
return s
def urlencode(params):
utf8_params = encode_params_utf8(params)
urlencoded = _urlencode(utf8_params)
if isinstance(urlencoded, unicode_type): # PY3 returns unicode
return urlencoded
else:
return urlencoded.decode("utf-8")
def encode_params_utf8(params):
"""Ensures that all parameters in a list of 2-element tuples are encoded to
bytestrings using UTF-8
"""
encoded = []
for k, v in params:
encoded.append((
k.encode('utf-8') if isinstance(k, unicode_type) else k,
v.encode('utf-8') if isinstance(v, unicode_type) else v))
return encoded
def decode_params_utf8(params):
"""Ensures that all parameters in a list of 2-element tuples are decoded to
unicode using UTF-8.
"""
decoded = []
for k, v in params:
decoded.append((
k.decode('utf-8') if isinstance(k, bytes_type) else k,
v.decode('utf-8') if isinstance(v, bytes_type) else v))
return decoded
urlencoded = set(always_safe) | set('=&;%+~,*@')
def urldecode(query):
"""Decode a query string in x-www-form-urlencoded format into a sequence
of two-element tuples.
Unlike urlparse.parse_qsl(..., strict_parsing=True) urldecode will enforce
correct formatting of the query string by validation. If validation fails
a ValueError will be raised. urllib.parse_qsl will only raise errors if
any of name-value pairs omits the equals sign.
"""
# Check if query contains invalid characters
if query and not set(query) <= urlencoded:
error = ("Error trying to decode a non urlencoded string. "
"Found invalid characters: %s "
"in the string: '%s'. "
"Please ensure the request/response body is "
"x-www-form-urlencoded.")
raise ValueError(error % (set(query) - urlencoded, query))
# Check for correctly hex encoded values using a regular expression
# All encoded values begin with % followed by two hex characters
# correct = %00, %A0, %0A, %FF
# invalid = %G0, %5H, %PO
invalid_hex = '%[^0-9A-Fa-f]|%[0-9A-Fa-f][^0-9A-Fa-f]'
if len(re.findall(invalid_hex, query)):
raise ValueError('Invalid hex encoding in query string.')
# We encode to utf-8 prior to parsing because parse_qsl behaves
# differently on unicode input in python 2 and 3.
# Python 2.7
# >>> urlparse.parse_qsl(u'%E5%95%A6%E5%95%A6')
# u'\xe5\x95\xa6\xe5\x95\xa6'
# Python 2.7, non unicode input gives the same
# >>> urlparse.parse_qsl('%E5%95%A6%E5%95%A6')
# '\xe5\x95\xa6\xe5\x95\xa6'
# but now we can decode it to unicode
# >>> urlparse.parse_qsl('%E5%95%A6%E5%95%A6').decode('utf-8')
# u'\u5566\u5566'
# Python 3.3 however
# >>> urllib.parse.parse_qsl(u'%E5%95%A6%E5%95%A6')
# u'\u5566\u5566'
query = query.encode(
'utf-8') if not PY3 and isinstance(query, unicode_type) else query
# We want to allow queries such as "c2" whereas urlparse.parse_qsl
# with the strict_parsing flag will not.
params = urlparse.parse_qsl(query, keep_blank_values=True)
# unicode all the things
return decode_params_utf8(params)
def extract_params(raw):
"""Extract parameters and return them as a list of 2-tuples.
Will successfully extract parameters from urlencoded query strings,
dicts, or lists of 2-tuples. Empty strings/dicts/lists will return an
empty list of parameters. Any other input will result in a return
value of None.
"""
if isinstance(raw, bytes_type) or isinstance(raw, unicode_type):
try:
params = urldecode(raw)
except ValueError:
params = None
elif hasattr(raw, '__iter__'):
try:
dict(raw)
except ValueError:
params = None
except TypeError:
params = None
else:
params = list(raw.items() if isinstance(raw, dict) else raw)
params = decode_params_utf8(params)
else:
params = None
return params
def generate_nonce():
"""Generate pseudorandom nonce that is unlikely to repeat.
Per `section 3.3`_ of the OAuth 1 RFC 5849 spec.
Per `section 3.2.1`_ of the MAC Access Authentication spec.
A random 64-bit number is appended to the epoch timestamp for both
randomness and to decrease the likelihood of collisions.
.. _`section 3.2.1`: http://tools.ietf.org/html/draft-ietf-oauth-v2-http-mac-01#section-3.2.1
.. _`section 3.3`: http://tools.ietf.org/html/rfc5849#section-3.3
"""
return unicode_type(unicode_type(random.getrandbits(64)) + generate_timestamp())
def generate_timestamp():
"""Get seconds since epoch (UTC).
Per `section 3.3`_ of the OAuth 1 RFC 5849 spec.
Per `section 3.2.1`_ of the MAC Access Authentication spec.
.. _`section 3.2.1`: http://tools.ietf.org/html/draft-ietf-oauth-v2-http-mac-01#section-3.2.1
.. _`section 3.3`: http://tools.ietf.org/html/rfc5849#section-3.3
"""
return unicode_type(int(time.time()))
def generate_token(length=30, chars=UNICODE_ASCII_CHARACTER_SET):
"""Generates a non-guessable OAuth token
OAuth (1 and 2) does not specify the format of tokens except that they
should be strings of random characters. Tokens should not be guessable
and entropy when generating the random characters is important. Which is
why SystemRandom is used instead of the default random.choice method.
"""
rand = random.SystemRandom()
return ''.join(rand.choice(chars) for x in range(length))
def generate_signed_token(private_pem, request):
import Crypto.PublicKey.RSA as RSA
import jwt
private_key = RSA.importKey(private_pem)
now = datetime.datetime.utcnow()
claims = {
'scope': request.scope,
'exp': now + datetime.timedelta(seconds=request.expires_in)
}
claims.update(request.claims)
token = jwt.encode(claims, private_key, 'RS256')
token = to_unicode(token, "UTF-8")
return token
def verify_signed_token(private_pem, token):
import Crypto.PublicKey.RSA as RSA
import jwt
public_key = RSA.importKey(private_pem).publickey()
try:
# return jwt.verify_jwt(token.encode(), public_key)
return jwt.decode(token, public_key)
except:
raise Exception
def generate_client_id(length=30, chars=CLIENT_ID_CHARACTER_SET):
"""Generates an OAuth client_id
OAuth 2 specify the format of client_id in
http://tools.ietf.org/html/rfc6749#appendix-A.
"""
return generate_token(length, chars)
def add_params_to_qs(query, params):
"""Extend a query with a list of two-tuples."""
if isinstance(params, dict):
params = params.items()
queryparams = urlparse.parse_qsl(query, keep_blank_values=True)
queryparams.extend(params)
return urlencode(queryparams)
def add_params_to_uri(uri, params, fragment=False):
"""Add a list of two-tuples to the uri query components."""
sch, net, path, par, query, fra = urlparse.urlparse(uri)
if fragment:
fra = add_params_to_qs(fra, params)
else:
query = add_params_to_qs(query, params)
return urlparse.urlunparse((sch, net, path, par, query, fra))
def safe_string_equals(a, b):
""" Near-constant time string comparison.
Used in order to avoid timing attacks on sensitive information such
as secret keys during request verification (`rootLabs`_).
.. _`rootLabs`: http://rdist.root.org/2010/01/07/timing-independent-array-comparison/
"""
if len(a) != len(b):
return False
result = 0
for x, y in zip(a, b):
result |= ord(x) ^ ord(y)
return result == 0
def to_unicode(data, encoding='UTF-8'):
"""Convert a number of different types of objects to unicode."""
if isinstance(data, unicode_type):
return data
if isinstance(data, bytes_type):
return unicode_type(data, encoding=encoding)
if hasattr(data, '__iter__'):
try:
dict(data)
except TypeError:
pass
except ValueError:
# Assume it's a one dimensional data structure
return (to_unicode(i, encoding) for i in data)
else:
# We support 2.6 which lacks dict comprehensions
if hasattr(data, 'items'):
data = data.items()
return dict(((to_unicode(k, encoding), to_unicode(v, encoding)) for k, v in data))
return data
class CaseInsensitiveDict(dict):
"""Basic case insensitive dict with strings only keys."""
proxy = {}
def __init__(self, data):
self.proxy = dict((k.lower(), k) for k in data)
for k in data:
self[k] = data[k]
def __contains__(self, k):
return k.lower() in self.proxy
def __delitem__(self, k):
key = self.proxy[k.lower()]
super(CaseInsensitiveDict, self).__delitem__(key)
del self.proxy[k.lower()]
def __getitem__(self, k):
key = self.proxy[k.lower()]
return super(CaseInsensitiveDict, self).__getitem__(key)
def get(self, k, default=None):
return self[k] if k in self else default
def __setitem__(self, k, v):
super(CaseInsensitiveDict, self).__setitem__(k, v)
self.proxy[k.lower()] = k
class Request(object):
"""A malleable representation of a signable HTTP request.
Body argument may contain any data, but parameters will only be decoded if
they are one of:
* urlencoded query string
* dict
* list of 2-tuples
Anything else will be treated as raw body data to be passed through
unmolested.
"""
def __init__(self, uri, http_method='GET', body=None, headers=None,
encoding='utf-8'):
# Convert to unicode using encoding if given, else assume unicode
encode = lambda x: to_unicode(x, encoding) if encoding else x
self.uri = encode(uri)
self.http_method = encode(http_method)
self.headers = CaseInsensitiveDict(encode(headers or {}))
self.body = encode(body)
self.decoded_body = extract_params(encode(body))
self.oauth_params = []
self._params = {}
self._params.update(dict(urldecode(self.uri_query)))
self._params.update(dict(self.decoded_body or []))
self._params.update(self.headers)
def __getattr__(self, name):
return self._params.get(name, None)
def __repr__(self):
return '<oauthlib.Request url="%s", http_method="%s", headers="%s", body="%s">' % (
self.uri, self.http_method, self.headers, self.body)
@property
def uri_query(self):
return urlparse.urlparse(self.uri).query
@property
def uri_query_params(self):
if not self.uri_query:
return []
return urlparse.parse_qsl(self.uri_query, keep_blank_values=True,
strict_parsing=True)
@property
def duplicate_params(self):
seen_keys = collections.defaultdict(int)
all_keys = (p[0]
for p in (self.decoded_body or []) + self.uri_query_params)
for k in all_keys:
seen_keys[k] += 1
return [k for k, c in seen_keys.items() if c > 1]
| bsd-3-clause | 754,211,775,766,494,000 | 30.391509 | 97 | 0.632231 | false |
endlessm/chromium-browser | tools/cygprofile/cluster.py | 10 | 16117 | # Copyright 2018 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Clustering for function call-graph.
See the Clustering class for a detailed description.
"""
import collections
import itertools
import logging
Neighbor = collections.namedtuple('Neighbor', ('src', 'dst', 'dist'))
CalleeInfo = collections.namedtuple('CalleeInfo',
('index', 'callee_symbol',
'misses', 'caller_and_count'))
CallerInfo = collections.namedtuple('CallerInfo', ('caller_symbol', 'count'))
class Clustering(object):
"""Cluster symbols.
We are given a list of the first function calls, ordered by
time. There are multiple lists: different benchmarks run multiple
times, as well as list from startup and then a second list after
startup (5 seconds) that runs until the benchmark memory dump.
We have evidence (see below) that this simple ordering of code from a
single profiling run (a load of a website) improves performance,
presumably by improving code locality. To reconstruct this ordering
using profiling information from multiple files, we cluster. Doing
this clustering over multiple runs on the speedometer benchmark
recovered speedometer performance compared with the legacy benchmark.
For each offset list, we record the distances between each symbol and
its neighborhood of the following k symbols (k=19, chosen
arbitrarily). For example, if we have an offset list of symbols
'abcdef', we add the neighbors (a->b, 1), (a->c, 2), (b->c, 1), (b->e,
3), etc. Then we average distances of a given neighbor pair over all
seen symbol lists. If we see an inversion (for example, (b->a, 3), we
use this as a distance of -3). For each file that a given pair does
not appear, that is, if the pair does not appear in that file or they
are separated by 20 symbols, we use a large distance D (D=1000). The
distances are then averages over all files. If the average is
negative, the neighbor pair is inverted and the distance flipped. The
idea is that if two symbols appear near each other in all profiling
runs, there is high confidence that they are usually called
together. If they don't appear near in some runs, there is less
confidence that they should be colocated. Symbol distances are taken
only as following distances to avoid confusing double-counting
possibilities as well as to give a clear ordering to combining
clusters.
Neighbors are sorted, and starting with the shortest distance, symbols
are coalesced into clusters. If the neighbor pair is (a->b), the
clusters containing a and b are combined in that order. If a and b are
already in the same cluster, nothing happens. After processing all
neighbors there is usually only one cluster; if there are multiple
clusters they are combined in order from largest to smallest (although
that choice may not matter).
Cluster merging may optionally be halted if they get above the size
of an android page. As of November 2018 this slightly reduces
performance and should not be used (1.7% decline in speedometer2,
450K native library memory regression).
"""
NEIGHBOR_DISTANCE = 20
FAR_DISTANCE = 1000
MAX_CLUSTER_SIZE = 4096 # 4k pages on android.
class _Cluster(object):
def __init__(self, syms, size):
assert len(set(syms)) == len(syms), 'Duplicated symbols in cluster'
self._syms = syms
self._size = size
@property
def syms(self):
return self._syms
@property
def binary_size(self):
return self._size
@classmethod
def ClusteredSymbolLists(cls, sym_lists, size_map):
c = cls()
c.AddSymbolLists(sym_lists)
return c.ClusterToList(size_map)
@classmethod
def ClusterSymbolCallGraph(cls, call_graph, whitelist):
c = cls()
c.AddSymbolCallGraph(call_graph, whitelist)
return c.ClusterToList()
def __init__(self):
self._num_lists = None
self._neighbors = None
self._cluster_map = {}
self._symbol_size = lambda _: 0 # Maps a symbol to a size.
def _MakeCluster(self, syms):
c = self._Cluster(syms, sum(self._symbol_size(s) for s in syms))
for s in syms:
self._cluster_map[s] = c
return c
def ClusterOf(self, s):
if isinstance(s, self._Cluster):
assert self._cluster_map[s.syms[0]] == s
return s
if s in self._cluster_map:
return self._cluster_map[s]
return self._MakeCluster([s])
def Combine(self, a, b):
"""Combine clusters.
Args:
a, b: Clusters or str. The canonical cluster (ClusterOf) will be
used to do the combining.
Returns:
A merged cluster from a and b, or None if a and b are in the same cluster.
"""
canonical_a = self.ClusterOf(a)
canonical_b = self.ClusterOf(b)
if canonical_a == canonical_b:
return None
return self._MakeCluster(canonical_a._syms + canonical_b._syms)
def AddSymbolLists(self, sym_lists):
self._num_lists = len(sym_lists)
self._neighbors = self._CoalesceNeighbors(
self._ConstructNeighbors(sym_lists))
def AddSymbolCallGraph(self, call_graph, whitelist):
self._num_lists = len(call_graph)
self._neighbors = self._ConstructNeighborsFromGraph(call_graph, whitelist)
def _ConstructNeighborsFromGraph(self, call_graph, whitelist):
neighbors = []
pairs = collections.defaultdict()
# Each list item is a list of dict.
for process_items in call_graph:
for callee_info in process_items:
callee = callee_info.callee_symbol
for caller_info in callee_info.caller_and_count:
caller = caller_info.caller_symbol
if caller in whitelist or callee == caller:
continue
# Multiply by -1, the bigger the count the smaller the distance
# should be.
dist = caller_info.count * -1
if (caller, callee) in pairs:
pairs[(caller, callee)] += dist
elif (callee, caller) in pairs:
pairs[(callee, caller)] += dist
else:
pairs[(caller, callee)] = dist
for (s, t) in pairs:
assert s != t and (t, s) not in pairs, ('Unexpected shuffled pair:'
' ({}, {})'.format(s, t))
neighbors.append(Neighbor(s, t, pairs[(s, t)]))
return neighbors
def _ConstructNeighbors(self, sym_lists):
neighbors = []
for sym_list in sym_lists:
for i, s in enumerate(sym_list):
for j in xrange(i + 1, min(i + self.NEIGHBOR_DISTANCE, len(sym_list))):
if s == sym_list[j]:
# Free functions that are static inline seem to be the only
# source of these duplicates.
continue
neighbors.append(Neighbor(s, sym_list[j], j - i))
logging.info('Constructed %s symbol neighbors', len(neighbors))
return neighbors
def _CoalesceNeighbors(self, neighbors):
pairs = collections.defaultdict(list)
for n in neighbors:
pairs[(n.src, n.dst)].append(n.dist)
coalesced = []
logging.info('Will coalesce over %s neighbor pairs', len(pairs))
count = 0
for (s, t) in pairs:
assert s != t, '{} != {}'.format(s, t)
if (t, s) in pairs and t < s:
# Only process each unordered pair once.
continue
count += 1
if not (count % 1e6):
logging.info('tick')
distances = []
if (s, t) in pairs:
distances.extend(pairs[(s, t)])
if (t, s) in pairs:
distances.extend(-d for d in pairs[(t, s)])
if distances:
num_missing = self._num_lists - len(distances)
avg_distance = (float(sum(distances)) +
self.FAR_DISTANCE * num_missing) / self._num_lists
if avg_distance > 0:
coalesced.append(Neighbor(s, t, avg_distance))
else:
coalesced.append(Neighbor(t, s, avg_distance))
return coalesced
def ClusterToList(self, size_map=None):
"""Merge the clusters with the smallest distances.
Args:
size_map ({symbol: size} or None): Map symbol names to their size. Cluster
growth will be stopped at MAX_CLUSTER_SIZE. If None, sizes are taken to
be zero and cluster growth is not stopped.
Returns:
An ordered list of symbols from AddSymbolLists, appropriately clustered.
"""
if size_map:
self._symbol_size = lambda s: size_map[s]
if not self._num_lists or not self._neighbors:
# Some sort of trivial set of symbol lists, such as all being
# length 1. Return an empty ordering.
return []
logging.info('Sorting %s neighbors', len(self._neighbors))
self._neighbors.sort(key=lambda n: (-n.dist, n.src, n.dst))
logging.info('Clustering...')
count = 0
while self._neighbors:
count += 1
if not (count % 1e6):
logging.info('tock')
neighbor = self._neighbors.pop()
src = self.ClusterOf(neighbor.src)
dst = self.ClusterOf(neighbor.dst)
if (src == dst or
src.binary_size + dst.binary_size > self.MAX_CLUSTER_SIZE):
continue
self.Combine(src, dst)
if size_map:
clusters_by_size = sorted(list(set(self._cluster_map.values())),
key=lambda c: -c.binary_size)
else:
clusters_by_size = sorted(list(set(self._cluster_map.values())),
key=lambda c: -len(c.syms))
logging.info('Produced %s clusters', len(clusters_by_size))
logging.info('Top sizes: %s', ['{}/{}'.format(len(c.syms), c.binary_size)
for c in clusters_by_size[:4]])
logging.info('Bottom sizes: %s', ['{}/{}'.format(len(c.syms), c.binary_size)
for c in clusters_by_size[-4:]])
ordered_syms = []
for c in clusters_by_size:
ordered_syms.extend(c.syms)
assert len(ordered_syms) == len(set(ordered_syms)), 'Duplicated symbols!'
return ordered_syms
def _GetOffsetSymbolName(processor, dump_offset):
dump_offset_to_symbol_info = \
processor.GetDumpOffsetToSymboInfolIncludingWhitelist()
offset_to_primary = processor.OffsetToPrimaryMap()
idx = dump_offset / 2
assert dump_offset >= 0 and idx < len(dump_offset_to_symbol_info), (
'Dump offset out of binary range')
symbol_info = dump_offset_to_symbol_info[idx]
assert symbol_info, ('A return address (offset = 0x{:08x}) does not map '
'to any symbol'.format(dump_offset))
assert symbol_info.offset in offset_to_primary, (
'Offset not found in primary map!')
return offset_to_primary[symbol_info.offset].name
def _GetSymbolsCallGraph(profiles, processor):
"""Maps each offset in the call graph to the corresponding symbol name.
Args:
profiles (ProfileManager) Manager of the profile dump files.
processor (SymbolOffsetProcessor) Symbol table processor for the dumps.
Returns:
A dict that maps each process type (ex: browser, renderer, etc.) to a list
of processes of that type. Each process is a list that contains the
call graph information. The call graph is represented by a list where each
item is a dict that contains: callee, 3 caller-count pairs, misses.
"""
offsets_graph = profiles.GetProcessOffsetGraph();
process_symbols_graph = collections.defaultdict(list)
# |process_type| can be : browser, renderer, gpu-process, etc.
for process_type in offsets_graph:
for process in offsets_graph[process_type]:
process = sorted(process, key=lambda k: long(k['index']))
graph_list = []
for el in process:
index = long(el['index'])
callee_symbol = _GetOffsetSymbolName(processor,
long(el['callee_offset']))
misses = 0
caller_and_count = []
for bucket in el['caller_and_count']:
caller_offset = long(bucket['caller_offset'])
count = long(bucket['count'])
if caller_offset == 0:
misses += count
continue
caller_symbol_name = _GetOffsetSymbolName(processor, caller_offset)
caller_info = CallerInfo(caller_symbol=caller_symbol_name,
count=count)
caller_and_count.append(caller_info)
callee_info = CalleeInfo(index=index,
callee_symbol=callee_symbol,
misses=misses,
caller_and_count=caller_and_count)
graph_list.append(callee_info)
process_symbols_graph[process_type].append(graph_list)
return process_symbols_graph
def _ClusterOffsetsFromCallGraph(profiles, processor):
symbols_call_graph = _GetSymbolsCallGraph(profiles, processor)
# Process names from the profile dumps that are treated specially.
_RENDERER = 'renderer'
_BROWSER = 'browser'
assert _RENDERER in symbols_call_graph
assert _BROWSER in symbols_call_graph
whitelist = processor.GetWhitelistSymbols()
renderer_clustering = Clustering.ClusterSymbolCallGraph(
symbols_call_graph[_RENDERER], whitelist)
browser_clustering = Clustering.ClusterSymbolCallGraph(
symbols_call_graph[_BROWSER], whitelist)
other_lists = []
for process in symbols_call_graph:
if process not in (_RENDERER, _BROWSER):
other_lists.extend(symbols_call_graph[process])
if other_lists:
other_clustering = Clustering.ClusterSymbolCallGraph(other_lists, whitelist)
else:
other_clustering = []
# Start with the renderer cluster to favor rendering performance.
final_ordering = [s for s in renderer_clustering]
seen = set(final_ordering)
final_ordering.extend(s for s in browser_clustering if s not in seen)
seen |= set(browser_clustering)
final_ordering.extend(s for s in other_clustering if s not in seen)
return final_ordering
def _ClusterOffsetsLists(profiles, processor, limit_cluster_size=False):
raw_offsets = profiles.GetProcessOffsetLists()
process_symbols = collections.defaultdict(list)
seen_symbols = set()
for p in raw_offsets:
for offsets in raw_offsets[p]:
symbol_names = processor.GetOrderedSymbols(
processor.GetReachedOffsetsFromDump(offsets))
process_symbols[p].append(symbol_names)
seen_symbols |= set(symbol_names)
if limit_cluster_size:
name_map = processor.NameToSymbolMap()
size_map = {name: name_map[name].size for name in seen_symbols}
else:
size_map = None
# Process names from the profile dumps that are treated specially.
_RENDERER = 'renderer'
_BROWSER = 'browser'
assert _RENDERER in process_symbols
assert _BROWSER in process_symbols
renderer_clustering = Clustering.ClusteredSymbolLists(
process_symbols[_RENDERER], size_map)
browser_clustering = Clustering.ClusteredSymbolLists(
process_symbols[_BROWSER], size_map)
other_lists = []
for process, syms in process_symbols.items():
if process not in (_RENDERER, _BROWSER):
other_lists.extend(syms)
if other_lists:
other_clustering = Clustering.ClusteredSymbolLists(other_lists, size_map)
else:
other_clustering = []
# Start with the renderer cluster to favor rendering performance.
final_ordering = [s for s in renderer_clustering]
seen = set(final_ordering)
final_ordering.extend(s for s in browser_clustering if s not in seen)
seen |= set(browser_clustering)
final_ordering.extend(s for s in other_clustering if s not in seen)
return final_ordering
def ClusterOffsets(profiles, processor, limit_cluster_size=False,
call_graph=False):
"""Cluster profile offsets.
Args:
profiles (ProfileManager) Manager of the profile dump files.
processor (SymbolOffsetProcessor) Symbol table processor for the dumps.
call_graph (bool) whether the call graph instrumentation was used.
Returns:
A list of clustered symbol offsets.
"""
if not call_graph:
return _ClusterOffsetsLists(profiles, processor, limit_cluster_size)
else:
return _ClusterOffsetsFromCallGraph(profiles, processor)
| bsd-3-clause | -1,921,120,404,200,820,000 | 37.742788 | 80 | 0.664019 | false |
yury-s/v8-inspector | Source/chrome/tools/telemetry/telemetry/core/backends/android_app_backend.py | 5 | 3603 | # Copyright 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import re
from telemetry.core import android_process
from telemetry.core.backends import android_browser_backend_settings
from telemetry.core.backends import android_command_line_backend
from telemetry.core.backends import app_backend
from telemetry.core import util
class AndroidAppBackend(app_backend.AppBackend):
def __init__(self, android_platform_backend, start_intent,
is_app_ready_predicate=None):
super(AndroidAppBackend, self).__init__(
start_intent.package, android_platform_backend)
self._default_process_name = start_intent.package
self._start_intent = start_intent
self._is_app_ready_predicate = is_app_ready_predicate
self._is_running = False
self._existing_processes_by_pid = {}
@property
def _adb(self):
return self.platform_backend.adb
def _IsAppReady(self):
if self._is_app_ready_predicate is None:
return True
return self._is_app_ready_predicate(self.app)
def Start(self):
"""Start an Android app and wait for it to finish launching.
AppStory derivations can customize the wait-for-ready-state to wait
for a more specific event if needed.
"""
webview_startup_args = self.GetWebviewStartupArgs()
backend_settings = android_browser_backend_settings.WebviewBackendSettings(
'android-webview')
with android_command_line_backend.SetUpCommandLineFlags(
self._adb, backend_settings, webview_startup_args):
# TODO(slamm): check if can use "blocking=True" instead of needing to
# sleep. If "blocking=True" does not work, switch sleep to "ps" check.
self._adb.device().StartActivity(self._start_intent, blocking=False)
util.WaitFor(self._IsAppReady, timeout=60)
self._is_running = True
def Close(self):
self._is_running = False
self.platform_backend.KillApplication(self._start_intent.package)
def IsAppRunning(self):
return self._is_running
def GetStandardOutput(self):
raise NotImplementedError
def GetStackTrace(self):
raise NotImplementedError
def GetProcesses(self, process_filter=None):
if process_filter is None:
process_filter = lambda n: re.match('^' + self._default_process_name, n)
processes = set()
ps_output = self.platform_backend.GetPsOutput(['pid', 'name'])
for pid, name in ps_output:
if not process_filter(name):
continue
if pid not in self._existing_processes_by_pid:
self._existing_processes_by_pid[pid] = android_process.AndroidProcess(
self, pid, name)
processes.add(self._existing_processes_by_pid[pid])
return processes
def GetProcess(self, subprocess_name):
assert subprocess_name.startswith(':')
process_name = self._default_process_name + subprocess_name
return self.GetProcesses(lambda n: n == process_name).pop()
def GetWebViews(self):
webviews = set()
for process in self.GetProcesses():
webviews.update(process.GetWebViews())
return webviews
def GetWebviewStartupArgs(self):
args = []
# Turn on GPU benchmarking extension for all runs. The only side effect of
# the extension being on is that render stats are tracked. This is believed
# to be effectively free. And, by doing so here, it avoids us having to
# programmatically inspect a pageset's actions in order to determine if it
# might eventually scroll.
args.append('--enable-gpu-benchmarking')
return args
| bsd-3-clause | 182,121,754,240,124,130 | 34.323529 | 79 | 0.710797 | false |
antoan2/incubator-mxnet | example/fcn-xs/init_fcnxs.py | 52 | 4639 | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: skip-file
import mxnet as mx
import numpy as np
import sys
import logging
logger = logging.getLogger()
logger.setLevel(logging.INFO)
# make a bilinear interpolation kernel, return a numpy.ndarray
def upsample_filt(size):
factor = (size + 1) // 2
if size % 2 == 1:
center = factor - 1.0
else:
center = factor - 0.5
og = np.ogrid[:size, :size]
return (1 - abs(og[0] - center) / factor) * \
(1 - abs(og[1] - center) / factor)
def init_from_vgg16(ctx, fcnxs_symbol, vgg16fc_args, vgg16fc_auxs):
fcnxs_args = vgg16fc_args.copy()
fcnxs_auxs = vgg16fc_auxs.copy()
for k,v in fcnxs_args.items():
if(v.context != ctx):
fcnxs_args[k] = mx.nd.zeros(v.shape, ctx)
v.copyto(fcnxs_args[k])
for k,v in fcnxs_auxs.items():
if(v.context != ctx):
fcnxs_auxs[k] = mx.nd.zeros(v.shape, ctx)
v.copyto(fcnxs_auxs[k])
data_shape=(1,3,500,500)
arg_names = fcnxs_symbol.list_arguments()
arg_shapes, _, _ = fcnxs_symbol.infer_shape(data=data_shape)
rest_params = dict([(x[0], mx.nd.zeros(x[1], ctx)) for x in zip(arg_names, arg_shapes)
if x[0] in ['score_weight', 'score_bias', 'score_pool4_weight', 'score_pool4_bias', \
'score_pool3_weight', 'score_pool3_bias']])
fcnxs_args.update(rest_params)
deconv_params = dict([(x[0], x[1]) for x in zip(arg_names, arg_shapes)
if x[0] in ["bigscore_weight", 'score2_weight', 'score4_weight']])
for k, v in deconv_params.items():
filt = upsample_filt(v[3])
initw = np.zeros(v)
initw[range(v[0]), range(v[1]), :, :] = filt # becareful here is the slice assing
fcnxs_args[k] = mx.nd.array(initw, ctx)
return fcnxs_args, fcnxs_auxs
def init_from_fcnxs(ctx, fcnxs_symbol, fcnxs_args_from, fcnxs_auxs_from):
""" use zero initialization for better convergence, because it tends to oputut 0,
and the label 0 stands for background, which may occupy most size of one image.
"""
fcnxs_args = fcnxs_args_from.copy()
fcnxs_auxs = fcnxs_auxs_from.copy()
for k,v in fcnxs_args.items():
if(v.context != ctx):
fcnxs_args[k] = mx.nd.zeros(v.shape, ctx)
v.copyto(fcnxs_args[k])
for k,v in fcnxs_auxs.items():
if(v.context != ctx):
fcnxs_auxs[k] = mx.nd.zeros(v.shape, ctx)
v.copyto(fcnxs_auxs[k])
data_shape=(1,3,500,500)
arg_names = fcnxs_symbol.list_arguments()
arg_shapes, _, _ = fcnxs_symbol.infer_shape(data=data_shape)
rest_params = {}
deconv_params = {}
# this is fcn8s init from fcn16s
if 'score_pool3_weight' in arg_names:
rest_params = dict([(x[0], mx.nd.zeros(x[1], ctx)) for x in zip(arg_names, arg_shapes)
if x[0] in ['score_pool3_bias', 'score_pool3_weight']])
deconv_params = dict([(x[0], x[1]) for x in zip(arg_names, arg_shapes) if x[0] \
in ["bigscore_weight", 'score4_weight']])
# this is fcn16s init from fcn32s
elif 'score_pool4_weight' in arg_names:
rest_params = dict([(x[0], mx.nd.zeros(x[1], ctx)) for x in zip(arg_names, arg_shapes)
if x[0] in ['score_pool4_weight', 'score_pool4_bias']])
deconv_params = dict([(x[0], x[1]) for x in zip(arg_names, arg_shapes) if x[0] \
in ["bigscore_weight", 'score2_weight']])
# this is fcn32s init
else:
logging.error("you are init the fcn32s model, so you should use init_from_vgg16()")
sys.exit()
fcnxs_args.update(rest_params)
for k, v in deconv_params.items():
filt = upsample_filt(v[3])
initw = np.zeros(v)
initw[range(v[0]), range(v[1]), :, :] = filt # becareful here is the slice assing
fcnxs_args[k] = mx.nd.array(initw, ctx)
return fcnxs_args, fcnxs_auxs
| apache-2.0 | 8,981,062,588,885,491,000 | 42.764151 | 97 | 0.624488 | false |
Sonicbids/django | django/contrib/gis/db/models/fields.py | 5 | 13852 | from django.db.models.fields import Field
from django.db.models.expressions import ExpressionNode
from django.utils.translation import ugettext_lazy as _
from django.contrib.gis import forms
from django.contrib.gis.db.models.lookups import gis_lookups
from django.contrib.gis.db.models.proxy import GeometryProxy
from django.contrib.gis.geometry.backend import Geometry, GeometryException
from django.utils import six
# Local cache of the spatial_ref_sys table, which holds SRID data for each
# spatial database alias. This cache exists so that the database isn't queried
# for SRID info each time a distance query is constructed.
_srid_cache = {}
def get_srid_info(srid, connection):
"""
Returns the units, unit name, and spheroid WKT associated with the
given SRID from the `spatial_ref_sys` (or equivalent) spatial database
table for the given database connection. These results are cached.
"""
global _srid_cache
try:
# The SpatialRefSys model for the spatial backend.
SpatialRefSys = connection.ops.spatial_ref_sys()
except NotImplementedError:
# No `spatial_ref_sys` table in spatial backend (e.g., MySQL).
return None, None, None
if connection.alias not in _srid_cache:
# Initialize SRID dictionary for database if it doesn't exist.
_srid_cache[connection.alias] = {}
if srid not in _srid_cache[connection.alias]:
# Use `SpatialRefSys` model to query for spatial reference info.
sr = SpatialRefSys.objects.using(connection.alias).get(srid=srid)
units, units_name = sr.units
spheroid = SpatialRefSys.get_spheroid(sr.wkt)
_srid_cache[connection.alias][srid] = (units, units_name, spheroid)
return _srid_cache[connection.alias][srid]
class GeoSelectFormatMixin(object):
def select_format(self, compiler, sql, params):
"""
Returns the selection format string, depending on the requirements
of the spatial backend. For example, Oracle and MySQL require custom
selection formats in order to retrieve geometries in OGC WKT. For all
other fields a simple '%s' format string is returned.
"""
connection = compiler.connection
srid = compiler.query.get_context('transformed_srid')
if srid:
sel_fmt = '%s(%%s, %s)' % (connection.ops.transform, srid)
else:
sel_fmt = '%s'
if connection.ops.select:
# This allows operations to be done on fields in the SELECT,
# overriding their values -- used by the Oracle and MySQL
# spatial backends to get database values as WKT, and by the
# `transform` method.
sel_fmt = connection.ops.select % sel_fmt
return sel_fmt % sql, params
class GeometryField(GeoSelectFormatMixin, Field):
"The base GIS field -- maps to the OpenGIS Specification Geometry type."
# The OpenGIS Geometry name.
geom_type = 'GEOMETRY'
form_class = forms.GeometryField
# Geodetic units.
geodetic_units = ('decimal degree', 'degree')
description = _("The base GIS field -- maps to the OpenGIS Specification Geometry type.")
def __init__(self, verbose_name=None, srid=4326, spatial_index=True, dim=2,
geography=False, **kwargs):
"""
The initialization function for geometry fields. Takes the following
as keyword arguments:
srid:
The spatial reference system identifier, an OGC standard.
Defaults to 4326 (WGS84).
spatial_index:
Indicates whether to create a spatial index. Defaults to True.
Set this instead of 'db_index' for geographic fields since index
creation is different for geometry columns.
dim:
The number of dimensions for this geometry. Defaults to 2.
extent:
Customize the extent, in a 4-tuple of WGS 84 coordinates, for the
geometry field entry in the `USER_SDO_GEOM_METADATA` table. Defaults
to (-180.0, -90.0, 180.0, 90.0).
tolerance:
Define the tolerance, in meters, to use for the geometry field
entry in the `USER_SDO_GEOM_METADATA` table. Defaults to 0.05.
"""
# Setting the index flag with the value of the `spatial_index` keyword.
self.spatial_index = spatial_index
# Setting the SRID and getting the units. Unit information must be
# easily available in the field instance for distance queries.
self.srid = srid
# Setting the dimension of the geometry field.
self.dim = dim
# Setting the verbose_name keyword argument with the positional
# first parameter, so this works like normal fields.
kwargs['verbose_name'] = verbose_name
# Is this a geography rather than a geometry column?
self.geography = geography
# Oracle-specific private attributes for creating the entry in
# `USER_SDO_GEOM_METADATA`
self._extent = kwargs.pop('extent', (-180.0, -90.0, 180.0, 90.0))
self._tolerance = kwargs.pop('tolerance', 0.05)
super(GeometryField, self).__init__(**kwargs)
def deconstruct(self):
name, path, args, kwargs = super(GeometryField, self).deconstruct()
# Always include SRID for less fragility; include others if they're
# not the default values.
kwargs['srid'] = self.srid
if self.dim != 2:
kwargs['dim'] = self.dim
if self.spatial_index is not True:
kwargs['spatial_index'] = self.spatial_index
if self.geography is not False:
kwargs['geography'] = self.geography
return name, path, args, kwargs
# The following functions are used to get the units, their name, and
# the spheroid corresponding to the SRID of the GeometryField.
def _get_srid_info(self, connection):
# Get attributes from `get_srid_info`.
self._units, self._units_name, self._spheroid = get_srid_info(self.srid, connection)
def spheroid(self, connection):
if not hasattr(self, '_spheroid'):
self._get_srid_info(connection)
return self._spheroid
def units(self, connection):
if not hasattr(self, '_units'):
self._get_srid_info(connection)
return self._units
def units_name(self, connection):
if not hasattr(self, '_units_name'):
self._get_srid_info(connection)
return self._units_name
### Routines specific to GeometryField ###
def geodetic(self, connection):
"""
Returns true if this field's SRID corresponds with a coordinate
system that uses non-projected units (e.g., latitude/longitude).
"""
return self.units_name(connection).lower() in self.geodetic_units
def get_distance(self, value, lookup_type, connection):
"""
Returns a distance number in units of the field. For example, if
`D(km=1)` was passed in and the units of the field were in meters,
then 1000 would be returned.
"""
return connection.ops.get_distance(self, value, lookup_type)
def get_prep_value(self, value):
"""
Spatial lookup values are either a parameter that is (or may be
converted to) a geometry, or a sequence of lookup values that
begins with a geometry. This routine will setup the geometry
value properly, and preserve any other lookup parameters before
returning to the caller.
"""
value = super(GeometryField, self).get_prep_value(value)
if isinstance(value, ExpressionNode):
return value
elif isinstance(value, (tuple, list)):
geom = value[0]
seq_value = True
else:
geom = value
seq_value = False
# When the input is not a GEOS geometry, attempt to construct one
# from the given string input.
if isinstance(geom, Geometry):
pass
elif isinstance(geom, (bytes, six.string_types)) or hasattr(geom, '__geo_interface__'):
try:
geom = Geometry(geom)
except GeometryException:
raise ValueError('Could not create geometry from lookup value.')
else:
raise ValueError('Cannot use object with type %s for a geometry lookup parameter.' % type(geom).__name__)
# Assigning the SRID value.
geom.srid = self.get_srid(geom)
if seq_value:
lookup_val = [geom]
lookup_val.extend(value[1:])
return tuple(lookup_val)
else:
return geom
def from_db_value(self, value, connection, context):
if value and not isinstance(value, Geometry):
value = Geometry(value)
return value
def get_srid(self, geom):
"""
Returns the default SRID for the given geometry, taking into account
the SRID set for the field. For example, if the input geometry
has no SRID, then that of the field will be returned.
"""
gsrid = geom.srid # SRID of given geometry.
if gsrid is None or self.srid == -1 or (gsrid == -1 and self.srid != -1):
return self.srid
else:
return gsrid
### Routines overloaded from Field ###
def contribute_to_class(self, cls, name, **kwargs):
super(GeometryField, self).contribute_to_class(cls, name, **kwargs)
# Setup for lazy-instantiated Geometry object.
setattr(cls, self.attname, GeometryProxy(Geometry, self))
def db_type(self, connection):
return connection.ops.geo_db_type(self)
def formfield(self, **kwargs):
defaults = {'form_class': self.form_class,
'geom_type': self.geom_type,
'srid': self.srid,
}
defaults.update(kwargs)
if (self.dim > 2 and 'widget' not in kwargs and
not getattr(defaults['form_class'].widget, 'supports_3d', False)):
defaults['widget'] = forms.Textarea
return super(GeometryField, self).formfield(**defaults)
def get_db_prep_lookup(self, lookup_type, value, connection, prepared=False):
"""
Prepare for the database lookup, and return any spatial parameters
necessary for the query. This includes wrapping any geometry
parameters with a backend-specific adapter and formatting any distance
parameters into the correct units for the coordinate system of the
field.
"""
# special case for isnull lookup
if lookup_type == 'isnull':
return []
elif lookup_type in self.class_lookups:
# Populating the parameters list, and wrapping the Geometry
# with the Adapter of the spatial backend.
if isinstance(value, (tuple, list)):
params = [connection.ops.Adapter(value[0])]
if self.class_lookups[lookup_type].distance:
# Getting the distance parameter in the units of the field.
params += self.get_distance(value[1:], lookup_type, connection)
elif lookup_type in connection.ops.truncate_params:
# Lookup is one where SQL parameters aren't needed from the
# given lookup value.
pass
else:
params += value[1:]
elif isinstance(value, ExpressionNode):
params = []
else:
params = [connection.ops.Adapter(value)]
return params
else:
raise ValueError('%s is not a valid spatial lookup for %s.' %
(lookup_type, self.__class__.__name__))
def get_prep_lookup(self, lookup_type, value):
if lookup_type == 'isnull':
return bool(value)
else:
return self.get_prep_value(value)
def get_db_prep_save(self, value, connection):
"Prepares the value for saving in the database."
if not value:
return None
else:
return connection.ops.Adapter(self.get_prep_value(value))
def get_placeholder(self, value, compiler, connection):
"""
Returns the placeholder for the geometry column for the
given value.
"""
return connection.ops.get_geom_placeholder(self, value, compiler)
for klass in gis_lookups.values():
GeometryField.register_lookup(klass)
# The OpenGIS Geometry Type Fields
class PointField(GeometryField):
geom_type = 'POINT'
form_class = forms.PointField
description = _("Point")
class LineStringField(GeometryField):
geom_type = 'LINESTRING'
form_class = forms.LineStringField
description = _("Line string")
class PolygonField(GeometryField):
geom_type = 'POLYGON'
form_class = forms.PolygonField
description = _("Polygon")
class MultiPointField(GeometryField):
geom_type = 'MULTIPOINT'
form_class = forms.MultiPointField
description = _("Multi-point")
class MultiLineStringField(GeometryField):
geom_type = 'MULTILINESTRING'
form_class = forms.MultiLineStringField
description = _("Multi-line string")
class MultiPolygonField(GeometryField):
geom_type = 'MULTIPOLYGON'
form_class = forms.MultiPolygonField
description = _("Multi polygon")
class GeometryCollectionField(GeometryField):
geom_type = 'GEOMETRYCOLLECTION'
form_class = forms.GeometryCollectionField
description = _("Geometry collection")
class ExtentField(GeoSelectFormatMixin, Field):
"Used as a return value from an extent aggregate"
description = _("Extent Aggregate Field")
def get_internal_type(self):
return "ExtentField"
| bsd-3-clause | -1,494,347,700,590,514,200 | 36.539295 | 117 | 0.632039 | false |
jeffdwyatt/taiga-back | tests/integration/test_custom_attributes_user_stories.py | 20 | 7254 | # Copyright (C) 2015 Andrey Antukh <[email protected]>
# Copyright (C) 2015 Jesús Espino <[email protected]>
# Copyright (C) 2015 David Barragán <[email protected]>
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from django.core.urlresolvers import reverse
from taiga.base.utils import json
from .. import factories as f
import pytest
pytestmark = pytest.mark.django_db
#########################################################
# User Story Custom Attributes
#########################################################
def test_userstory_custom_attribute_duplicate_name_error_on_create(client):
custom_attr_1 = f.UserStoryCustomAttributeFactory()
member = f.MembershipFactory(user=custom_attr_1.project.owner,
project=custom_attr_1.project,
is_owner=True)
url = reverse("userstory-custom-attributes-list")
data = {"name": custom_attr_1.name,
"project": custom_attr_1.project.pk}
client.login(member.user)
response = client.json.post(url, json.dumps(data))
assert response.status_code == 400
def test_userstory_custom_attribute_duplicate_name_error_on_update(client):
custom_attr_1 = f.UserStoryCustomAttributeFactory()
custom_attr_2 = f.UserStoryCustomAttributeFactory(project=custom_attr_1.project)
member = f.MembershipFactory(user=custom_attr_1.project.owner,
project=custom_attr_1.project,
is_owner=True)
url = reverse("userstory-custom-attributes-detail", kwargs={"pk": custom_attr_2.pk})
data = {"name": custom_attr_1.name}
client.login(member.user)
response = client.json.patch(url, json.dumps(data))
assert response.status_code == 400
def test_userstory_custom_attribute_duplicate_name_error_on_move_between_projects(client):
custom_attr_1 = f.UserStoryCustomAttributeFactory()
custom_attr_2 = f.UserStoryCustomAttributeFactory(name=custom_attr_1.name)
member = f.MembershipFactory(user=custom_attr_1.project.owner,
project=custom_attr_1.project,
is_owner=True)
f.MembershipFactory(user=custom_attr_1.project.owner,
project=custom_attr_2.project,
is_owner=True)
url = reverse("userstory-custom-attributes-detail", kwargs={"pk": custom_attr_2.pk})
data = {"project": custom_attr_1.project.pk}
client.login(member.user)
response = client.json.patch(url, json.dumps(data))
assert response.status_code == 400
#########################################################
# User Story Custom Attributes Values
#########################################################
def test_userstory_custom_attributes_values_when_create_us(client):
user_story = f.UserStoryFactory()
assert user_story.custom_attributes_values.attributes_values == {}
def test_userstory_custom_attributes_values_update(client):
user_story = f.UserStoryFactory()
member = f.MembershipFactory(user=user_story.project.owner,
project=user_story.project,
is_owner=True)
custom_attr_1 = f.UserStoryCustomAttributeFactory(project=user_story.project)
ct1_id = "{}".format(custom_attr_1.id)
custom_attr_2 = f.UserStoryCustomAttributeFactory(project=user_story.project)
ct2_id = "{}".format(custom_attr_2.id)
custom_attrs_val = user_story.custom_attributes_values
url = reverse("userstory-custom-attributes-values-detail", args=[user_story.id])
data = {
"attributes_values": {
ct1_id: "test_1_updated",
ct2_id: "test_2_updated"
},
"version": custom_attrs_val.version
}
assert user_story.custom_attributes_values.attributes_values == {}
client.login(member.user)
response = client.json.patch(url, json.dumps(data))
assert response.status_code == 200
assert response.data["attributes_values"] == data["attributes_values"]
user_story = user_story.__class__.objects.get(id=user_story.id)
assert user_story.custom_attributes_values.attributes_values == data["attributes_values"]
def test_userstory_custom_attributes_values_update_with_error_invalid_key(client):
user_story = f.UserStoryFactory()
member = f.MembershipFactory(user=user_story.project.owner,
project=user_story.project,
is_owner=True)
custom_attr_1 = f.UserStoryCustomAttributeFactory(project=user_story.project)
ct1_id = "{}".format(custom_attr_1.id)
custom_attr_2 = f.UserStoryCustomAttributeFactory(project=user_story.project)
custom_attrs_val = user_story.custom_attributes_values
url = reverse("userstory-custom-attributes-values-detail", args=[user_story.id])
data = {
"attributes_values": {
ct1_id: "test_1_updated",
"123456": "test_2_updated"
},
"version": custom_attrs_val.version
}
assert user_story.custom_attributes_values.attributes_values == {}
client.login(member.user)
response = client.json.patch(url, json.dumps(data))
assert response.status_code == 400
#########################################################
# Test tristres triggers :-P
#########################################################
def test_trigger_update_userstorycustomvalues_afeter_remove_userstorycustomattribute(client):
user_story = f.UserStoryFactory()
member = f.MembershipFactory(user=user_story.project.owner,
project=user_story.project,
is_owner=True)
custom_attr_1 = f.UserStoryCustomAttributeFactory(project=user_story.project)
ct1_id = "{}".format(custom_attr_1.id)
custom_attr_2 = f.UserStoryCustomAttributeFactory(project=user_story.project)
ct2_id = "{}".format(custom_attr_2.id)
custom_attrs_val = user_story.custom_attributes_values
custom_attrs_val.attributes_values = {ct1_id: "test_1", ct2_id: "test_2"}
custom_attrs_val.save()
assert ct1_id in custom_attrs_val.attributes_values.keys()
assert ct2_id in custom_attrs_val.attributes_values.keys()
url = reverse("userstory-custom-attributes-detail", kwargs={"pk": custom_attr_2.pk})
client.login(member.user)
response = client.json.delete(url)
assert response.status_code == 204
custom_attrs_val = custom_attrs_val.__class__.objects.get(id=custom_attrs_val.id)
assert ct1_id in custom_attrs_val.attributes_values.keys()
assert ct2_id not in custom_attrs_val.attributes_values.keys()
| agpl-3.0 | -7,595,145,645,926,785,000 | 39.513966 | 93 | 0.644788 | false |
gmr/tredis | tests/sets_tests.py | 1 | 9695 | import mock
from tornado import testing
from tredis import exceptions
from . import base
class SetTests(base.AsyncTestCase):
@testing.gen_test
def test_sadd_single(self):
key, value = self.uuid4(2)
result = yield self.client.sadd(key, value)
self.assertEqual(result, 1)
@testing.gen_test
def test_sadd_multiple(self):
key, value1, value2, value3 = self.uuid4(4)
result = yield self.client.sadd(key, value1, value2, value3)
self.assertTrue(result)
@testing.gen_test
def test_sadd_multiple_dupe(self):
key, value1, value2, value3 = self.uuid4(4)
result = yield self.client.sadd(key, value1, value2, value3, value3)
self.assertEqual(result, 3)
@testing.gen_test
def test_sadd_with_error(self):
key, value = self.uuid4(2)
self._execute_result = exceptions.RedisError('Test Exception')
with mock.patch.object(self.client, '_execute', self._execute):
with self.assertRaises(exceptions.RedisError):
yield self.client.sadd(key, value)
@testing.gen_test
def test_sdiff(self):
key1, key2, value1, value2, value3 = self.uuid4(5)
result = yield self.client.sadd(key1, value1, value2)
self.assertTrue(result)
result = yield self.client.sadd(key2, value1, value3)
self.assertTrue(result)
result = yield self.client.sdiff(key1, key2)
self.assertListEqual(result, [value2])
@testing.gen_test
def test_sdiffstore(self):
key1, key2, key3, value1, value2, value3 = self.uuid4(6)
result = yield self.client.sadd(key1, value1, value2)
self.assertTrue(result)
result = yield self.client.sadd(key2, value1, value3)
self.assertTrue(result)
result = yield self.client.sdiffstore(key3, key1, key2)
self.assertEqual(result, 1)
result = yield self.client.sismember(key3, value2)
self.assertTrue(result)
@testing.gen_test
def test_sinter(self):
key1, key2, value1, value2, value3 = self.uuid4(5)
result = yield self.client.sadd(key1, value1, value2)
self.assertTrue(result)
result = yield self.client.sadd(key2, value2, value3)
self.assertTrue(result)
result = yield self.client.sinter(key1, key2)
self.assertListEqual(result, [value2])
@testing.gen_test
def test_sinterstore(self):
key1, key2, key3, value1, value2, value3 = self.uuid4(6)
result = yield self.client.sadd(key1, value1, value2)
self.assertTrue(result)
result = yield self.client.sadd(key2, value2, value3)
self.assertTrue(result)
result = yield self.client.sinterstore(key3, key1, key2)
self.assertEqual(result, 1)
result = yield self.client.sismember(key3, value2)
self.assertTrue(result)
@testing.gen_test
def test_sadd_sismember_true(self):
key, value = self.uuid4(2)
result = yield self.client.sadd(key, value)
self.assertTrue(result)
result = yield self.client.sismember(key, value)
self.assertTrue(result)
@testing.gen_test
def test_sadd_sismember_false(self):
key, value1, value2 = self.uuid4(3)
result = yield self.client.sadd(key, value1)
self.assertTrue(result)
result = yield self.client.sismember(key, value2)
self.assertFalse(result)
@testing.gen_test
def test_scard(self):
key, value1, value2, value3 = self.uuid4(4)
result = yield self.client.sadd(key, value1, value2, value3)
self.assertTrue(result)
result = yield self.client.scard(key)
self.assertEqual(result, 3)
@testing.gen_test
def test_smembers(self):
key, value1, value2, value3 = self.uuid4(4)
result = yield self.client.sadd(key, value1, value2, value3)
self.assertTrue(result)
result = yield self.client.smembers(key)
self.assertListEqual(sorted(result), sorted([value1, value2, value3]))
@testing.gen_test
def test_smove(self):
key1, key2, value1 = self.uuid4(3)
result = yield self.client.sadd(key1, value1)
self.assertTrue(result)
result = yield self.client.smove(key1, key2, value1)
self.assertTrue(result)
result = yield self.client.sismember(key1, value1)
self.assertFalse(result)
result = yield self.client.sismember(key2, value1)
self.assertTrue(result)
@testing.gen_test
def test_spop(self):
key, value1, value2, value3 = self.uuid4(4)
values = [value1, value2, value3]
result = yield self.client.sadd(key, *values)
self.assertTrue(result)
member = yield self.client.spop(key)
self.assertIn(member, values)
members = yield self.client.smembers(key)
self.assertNotIn(member, members)
@testing.gen_test
def test_srandmember(self):
key, value1, value2, value3 = self.uuid4(4)
values = [value1, value2, value3]
result = yield self.client.sadd(key, *values)
self.assertTrue(result)
member = yield self.client.srandmember(key)
self.assertIn(member, values)
members = yield self.client.smembers(key)
self.assertIn(member, members)
@testing.gen_test
def test_srandmember_multi(self):
key, value1, value2, value3 = self.uuid4(4)
values = [value1, value2, value3]
result = yield self.client.sadd(key, *values)
self.assertTrue(result)
members = yield self.client.srandmember(key, 2)
for member in members:
self.assertIn(member, values)
self.assertEqual(len(members), 2)
@testing.gen_test
def test_srem(self):
key, value1, value2, value3 = self.uuid4(4)
values = [value1, value2, value3]
result = yield self.client.sadd(key, *values)
self.assertTrue(result)
result = yield self.client.srem(key, value2, value3)
self.assertTrue(result)
members = yield self.client.smembers(key)
self.assertNotIn(value2, members)
self.assertNotIn(value3, members)
@testing.gen_test
def test_srem_dupe(self):
key = self.uuid4()
key, value1, value2, value3 = self.uuid4(4)
values = [value1, value2, value3]
result = yield self.client.sadd(key, *values)
self.assertTrue(result)
result = yield self.client.srem(key, value2, value3, value3)
self.assertEqual(result, 2)
members = yield self.client.smembers(key)
self.assertNotIn(value2, members)
self.assertNotIn(value3, members)
@testing.gen_test
def test_srem_with_error(self):
key, value = self.uuid4(2)
self._execute_result = exceptions.RedisError('Test Exception')
with mock.patch.object(self.client, '_execute', self._execute):
with self.assertRaises(exceptions.RedisError):
yield self.client.srem(key, value)
@testing.gen_test
def test_sscan(self):
key, value1, value2, value3 = self.uuid4(4)
values = [value1, value2, value3]
result = yield self.client.sadd(key, *values)
self.assertTrue(result)
cursor, result = yield self.client.sscan(key, 0)
self.assertListEqual(sorted(result), sorted(values))
self.assertEqual(cursor, 0)
@testing.gen_test
def test_sscan_with_pattern(self):
key, value1, value2, value3 = self.uuid4(4)
values = [value1, value2, value3]
result = yield self.client.sadd(key, *values)
self.assertTrue(result)
cursor, result = yield self.client.sscan(key, 0, '*')
self.assertListEqual(sorted(result), sorted(values))
self.assertEqual(cursor, 0)
@testing.gen_test
def test_sscan_with_pattern_and_count(self):
key, value1, value2, value3 = self.uuid4(4)
values = [value1, value2, value3]
result = yield self.client.sadd(key, *values)
self.assertTrue(result)
cursor, result = yield self.client.sscan(key, 0, '*', 10)
self.assertListEqual(sorted(result), sorted(values))
self.assertEqual(cursor, 0)
@testing.gen_test
def test_sscan_with_error(self):
key = self.uuid4()
self._execute_result = exceptions.RedisError('Test Exception')
with mock.patch.object(self.client, '_execute', self._execute):
with self.assertRaises(exceptions.RedisError):
yield self.client.sscan(key, 0)
@testing.gen_test
def test_sunion(self):
key1, key2, key3, value1, value2, value3 = self.uuid4(6)
result = yield self.client.sadd(key1, value1, value2)
self.assertTrue(result)
result = yield self.client.sadd(key2, value2, value3)
self.assertTrue(result)
result = yield self.client.sunion(key1, key2)
self.assertListEqual(sorted(result), sorted([value1, value2, value3]))
@testing.gen_test
def test_suinionstore(self):
key1, key2, key3, value1, value2, value3 = self.uuid4(6)
result = yield self.client.sadd(key1, value1, value2)
self.assertTrue(result)
result = yield self.client.sadd(key2, value2, value3)
self.assertTrue(result)
result = yield self.client.sunionstore(key3, key1, key2)
self.assertEqual(result, 3)
result = yield self.client.sismember(key3, value1)
self.assertTrue(result)
result = yield self.client.sismember(key3, value2)
self.assertTrue(result)
result = yield self.client.sismember(key3, value3)
self.assertTrue(result)
| bsd-3-clause | -1,511,673,348,091,621,400 | 37.169291 | 78 | 0.637648 | false |
spanezz/django-housekeeping | django_housekeeping/task.py | 1 | 2144 | # Pluggable housekeeping framework for Django sites
#
# Copyright (C) 2013--2014 Enrico Zini <[email protected]>
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 3.0 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library.
from __future__ import annotations
import inspect
# Order of stages.
#
# For any string listed here, a run_$STRING method will be called, in the
# same order as the STAGES list.
#
# In each stage, tasks are run in dependency order.
STAGES = ["main"]
class Task:
"""
A housekeeping task. Any subclass of this in an appname.housekeeping module
will be automatically found and run during housekeeping
"""
# Define NAME to have this task made available to other tasks as a member
# of Housekeeping
NAME = None
# Unique, human and machine readable identifier for this task,
# automatically filled by Housekeeping during task discovery
IDENTIFIER = None
# Task classes that should be run before this one
DEPENDS = []
def __init__(self, hk, **kw):
"""
Constructor
hk: the Housekeeping object
"""
self.hk = hk
def get_stages(self):
"""
Get the ordered list of stages for this task.
"""
# First look in the object or its class
res = getattr(self, "STAGES", None)
if res is not None:
return res
module = inspect.getmodule(self.__class__)
# If that fails, look in the module
res = getattr(module, "STAGES", None)
if res is not None:
return res
# If that fails, return a default
return ("main", )
| gpl-3.0 | 664,481,234,151,734,100 | 29.628571 | 79 | 0.670243 | false |
moden-py/pywinauto | examples/uninstall_7zip.py | 1 | 1904 | """
Uninstall script for 7zip 9.20 (64-bit)
Requirements:
- Win7 or Win8.1 x64, 64-bit Python
- pywinauto 0.5.2+
- UAC is fully disabled
"""
from __future__ import print_function
import pywinauto
pywinauto.Application().Start(r'explorer.exe')
explorer = pywinauto.Application().Connect(path='explorer.exe')
# Go to "Control Panel -> Programs and Features"
NewWindow = explorer.Window_(top_level_only=True, active_only=True, class_name='CabinetWClass')
try:
NewWindow.AddressBandRoot.click_input()
NewWindow.type_keys(r'Control Panel\Programs\Programs and Features{ENTER}',
with_spaces=True, set_foreground=False)
ProgramsAndFeatures = explorer.Window_(top_level_only=True, active_only=True,
title='Programs and Features', class_name='CabinetWClass')
# wait while the list of programs is loading
explorer.WaitCPUUsageLower(threshold=5)
item_7z = ProgramsAndFeatures.FolderView.get_item('7-Zip 9.20 (x64 edition)')
item_7z.ensure_visible()
item_7z.click_input(button='right', where='icon')
explorer.PopupMenu.menu_item('Uninstall').click()
Confirmation = explorer.Window_(title='Programs and Features', class_name='#32770', active_only=True)
if Confirmation.Exists():
Confirmation.Yes.click_input()
Confirmation.WaitNot('visible')
WindowsInstaller = explorer.Window_(title='Windows Installer', class_name='#32770', active_only=True)
if WindowsInstaller.Exists():
WindowsInstaller.WaitNot('visible', timeout=20)
SevenZipInstaller = explorer.Window_(title='7-Zip 9.20 (x64 edition)', class_name='#32770', active_only=True)
if SevenZipInstaller.Exists():
SevenZipInstaller.WaitNot('visible', timeout=20)
if '7-Zip 9.20 (x64 edition)' not in ProgramsAndFeatures.FolderView.texts():
print('OK')
finally:
NewWindow.close() | lgpl-2.1 | -3,219,262,022,870,870,500 | 38.6875 | 113 | 0.695378 | false |
triplefox/2017STDIOGameJam | asweigart/bagels_pico_fermi/bagels.py | 1 | 2056 | import random
NUM_DIGITS = 3
MAX_GUESS = 10
def getSecretNum():
# Returns a string of unique random digits that is NUM_DIGITS long.
numbers = list(range(10))
random.shuffle(numbers)
secretNum = ''
for i in range(NUM_DIGITS):
secretNum += str(numbers[i])
return secretNum
def getClues(guess, secretNum):
# Returns a string with the Pico, Fermi, & Bagels clues to the user.
if guess == secretNum:
return 'You got it!'
clues = []
for i in range(len(guess)):
if guess[i] == secretNum[i]:
clues.append('Fermi')
elif guess[i] in secretNum:
clues.append('Pico')
if len(clues) == 0:
return 'Bagels'
clues.sort()
return ' '.join(clues)
def isOnlyDigits(num):
# Returns True if num is a string of only digits. Otherwise, returns False.
if num == '':
return False
for i in num:
if i not in '0 1 2 3 4 5 6 7 8 9'.split():
return False
return True
print('I am thinking of a %s-digit number. Try to guess what it is.' % (NUM_DIGITS))
print('The clues I give are...')
print('When I say: That means:')
print(' Bagels None of the digits is correct.')
print(' Pico One digit is correct but in the wrong position.')
print(' Fermi One digit is correct and in the right position.')
while True:
secretNum = getSecretNum()
print('I have thought up a number. You have %s guesses to get it.' % (MAX_GUESS))
guessesTaken = 1
while guessesTaken <= MAX_GUESS:
guess = ''
while len(guess) != NUM_DIGITS or not isOnlyDigits(guess):
print('Guess #%s: ' % (guessesTaken))
guess = input()
print(getClues(guess, secretNum))
guessesTaken += 1
if guess == secretNum:
break
if guessesTaken > MAX_GUESS:
print('You ran out of guesses. The answer was %s.' % (secretNum))
print('Do you want to play again? (yes or no)')
if not input().lower().startswith('y'):
break
| mit | 8,580,243,675,894,636,000 | 27.555556 | 85 | 0.593872 | false |
ClovisIRex/Snake-django | env/lib/python3.6/site-packages/pylint/test/functional/mapping_context.py | 10 | 2010 | """
Checks that only valid values are used in a mapping context.
"""
# pylint: disable=missing-docstring,invalid-name,too-few-public-methods,no-self-use,import-error,wrong-import-position
from __future__ import print_function
def test(**kwargs):
print(kwargs)
# dictionary value/comprehension
dict_value = dict(a=1, b=2, c=3)
dict_comp = {chr(x): x for x in range(256)}
test(**dict_value)
test(**dict_comp)
# in order to be used in kwargs custom mapping class should define
# __iter__(), __getitem__(key) and keys().
class CustomMapping(object):
def __init__(self):
self.data = dict(a=1, b=2, c=3, d=4, e=5)
def __getitem__(self, key):
return self.data[key]
def keys(self):
return self.data.keys()
test(**CustomMapping())
test(**CustomMapping) # [not-a-mapping]
class NotMapping(object):
pass
test(**NotMapping()) # [not-a-mapping]
# skip checks if statement is inside mixin/base/abstract class
class SomeMixin(object):
kwargs = None
def get_kwargs(self):
return self.kwargs
def run(self, **kwargs):
print(kwargs)
def dispatch(self):
kws = self.get_kwargs()
self.run(**kws)
class AbstractThing(object):
kwargs = None
def get_kwargs(self):
return self.kwargs
def run(self, **kwargs):
print(kwargs)
def dispatch(self):
kws = self.get_kwargs()
self.run(**kws)
class BaseThing(object):
kwargs = None
def get_kwargs(self):
return self.kwargs
def run(self, **kwargs):
print(kwargs)
def dispatch(self):
kws = self.get_kwargs()
self.run(**kws)
# abstract class
class Thing(object):
def get_kwargs(self):
raise NotImplementedError
def run(self, **kwargs):
print(kwargs)
def dispatch(self):
kwargs = self.get_kwargs()
self.run(**kwargs)
# skip uninferable instances
from some_missing_module import Mapping
class MyClass(Mapping):
pass
test(**MyClass())
| mit | -8,358,436,918,026,497,000 | 19.721649 | 118 | 0.630846 | false |
lochiiconnectivity/libcloud | libcloud/test/compute/test_vpsnet.py | 46 | 8658 | # Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sys
import unittest
from libcloud.utils.py3 import httplib
from libcloud.compute.drivers.vpsnet import VPSNetNodeDriver
from libcloud.compute.base import Node
from libcloud.compute.types import NodeState
from libcloud.test import MockHttp
from libcloud.test.compute import TestCaseMixin
from libcloud.test.secrets import VPSNET_PARAMS
from libcloud.test.file_fixtures import ComputeFileFixtures
class VPSNetTests(unittest.TestCase, TestCaseMixin):
def setUp(self):
VPSNetNodeDriver.connectionCls.conn_classes = (None, VPSNetMockHttp)
self.driver = VPSNetNodeDriver(*VPSNET_PARAMS)
def test_create_node(self):
VPSNetMockHttp.type = 'create'
image = self.driver.list_images()[0]
size = self.driver.list_sizes()[0]
node = self.driver.create_node('foo', image, size)
self.assertEqual(node.name, 'foo')
def test_list_nodes(self):
VPSNetMockHttp.type = 'virtual_machines'
node = self.driver.list_nodes()[0]
self.assertEqual(node.id, '1384')
self.assertEqual(node.state, NodeState.RUNNING)
def test_reboot_node(self):
VPSNetMockHttp.type = 'virtual_machines'
node = self.driver.list_nodes()[0]
VPSNetMockHttp.type = 'reboot'
ret = self.driver.reboot_node(node)
self.assertEqual(ret, True)
def test_destroy_node(self):
VPSNetMockHttp.type = 'delete'
node = Node('2222', None, None, None, None, self.driver)
ret = self.driver.destroy_node(node)
self.assertTrue(ret)
VPSNetMockHttp.type = 'delete_fail'
node = Node('2223', None, None, None, None, self.driver)
self.assertRaises(Exception, self.driver.destroy_node, node)
def test_list_images(self):
VPSNetMockHttp.type = 'templates'
ret = self.driver.list_images()
self.assertEqual(ret[0].id, '9')
self.assertEqual(ret[-1].id, '160')
def test_list_sizes(self):
VPSNetMockHttp.type = 'sizes'
ret = self.driver.list_sizes()
self.assertEqual(len(ret), 1)
self.assertEqual(ret[0].id, '1')
self.assertEqual(ret[0].name, '1 Node')
def test_destroy_node_response(self):
# should return a node object
node = Node('2222', None, None, None, None, self.driver)
VPSNetMockHttp.type = 'delete'
ret = self.driver.destroy_node(node)
self.assertTrue(isinstance(ret, bool))
def test_reboot_node_response(self):
# should return a node object
VPSNetMockHttp.type = 'virtual_machines'
node = self.driver.list_nodes()[0]
VPSNetMockHttp.type = 'reboot'
ret = self.driver.reboot_node(node)
self.assertTrue(isinstance(ret, bool))
class VPSNetMockHttp(MockHttp):
fixtures = ComputeFileFixtures('vpsnet')
def _nodes_api10json_sizes(self, method, url, body, headers):
body = """[{"slice":{"virtual_machine_id":8592,"id":12256,"consumer_id":0}},
{"slice":{"virtual_machine_id":null,"id":12258,"consumer_id":0}},
{"slice":{"virtual_machine_id":null,"id":12434,"consumer_id":0}}]"""
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _nodes_api10json_create(self, method, url, body, headers):
body = """[{"slice":{"virtual_machine_id":8592,"id":12256,"consumer_id":0}},
{"slice":{"virtual_machine_id":null,"id":12258,"consumer_id":0}},
{"slice":{"virtual_machine_id":null,"id":12434,"consumer_id":0}}]"""
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _virtual_machines_2222_api10json_delete_fail(self, method, url, body, headers):
return (httplib.FORBIDDEN, '', {}, httplib.responses[httplib.FORBIDDEN])
def _virtual_machines_2222_api10json_delete(self, method, url, body, headers):
return (httplib.OK, '', {}, httplib.responses[httplib.OK])
def _virtual_machines_1384_reboot_api10json_reboot(self, method, url, body, headers):
body = """{
"virtual_machine":
{
"running": true,
"updated_at": "2009-05-15T06:55:02-04:00",
"power_action_pending": false,
"system_template_id": 41,
"id": 1384,
"cloud_id": 3,
"domain_name": "demodomain.com",
"hostname": "web01",
"consumer_id": 0,
"backups_enabled": false,
"password": "a8hjsjnbs91",
"label": "foo",
"slices_count": null,
"created_at": "2009-04-16T08:17:39-04:00"
}
}"""
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _virtual_machines_api10json_create(self, method, url, body, headers):
body = """{
"virtual_machine":
{
"running": true,
"updated_at": "2009-05-15T06:55:02-04:00",
"power_action_pending": false,
"system_template_id": 41,
"id": 1384,
"cloud_id": 3,
"domain_name": "demodomain.com",
"hostname": "web01",
"consumer_id": 0,
"backups_enabled": false,
"password": "a8hjsjnbs91",
"label": "foo",
"slices_count": null,
"created_at": "2009-04-16T08:17:39-04:00"
}
}"""
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _virtual_machines_api10json_virtual_machines(self, method, url, body, headers):
body = """ [{
"virtual_machine":
{
"running": true,
"updated_at": "2009-05-15T06:55:02-04:00",
"power_action_pending": false,
"system_template_id": 41,
"id": 1384,
"cloud_id": 3,
"domain_name": "demodomain.com",
"hostname": "web01",
"consumer_id": 0,
"backups_enabled": false,
"password": "a8hjsjnbs91",
"label": "Web Server 01",
"slices_count": null,
"created_at": "2009-04-16T08:17:39-04:00"
}
},
{
"virtual_machine":
{
"running": true,
"updated_at": "2009-05-15T06:55:02-04:00",
"power_action_pending": false,
"system_template_id": 41,
"id": 1385,
"cloud_id": 3,
"domain_name": "demodomain.com",
"hostname": "mysql01",
"consumer_id": 0,
"backups_enabled": false,
"password": "dsi8h38hd2s",
"label": "MySQL Server 01",
"slices_count": null,
"created_at": "2009-04-16T08:17:39-04:00"
}
}]"""
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _available_clouds_api10json_templates(self, method, url, body, headers):
body = self.fixtures.load('_available_clouds_api10json_templates.json')
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _available_clouds_api10json_create(self, method, url, body, headers):
body = """
[{"cloud":{"system_templates":[{"id":9,"label":"Ubuntu 8.04 x64"}],"id":2,"label":"USA VPS Cloud"}}]
"""
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
if __name__ == '__main__':
sys.exit(unittest.main())
| apache-2.0 | 9,166,843,932,081,830,000 | 39.839623 | 108 | 0.558212 | false |
gramps-project/gramps | gramps/gui/views/treemodels/citationbasemodel.py | 5 | 11870 | #
# Gramps - a GTK+/GNOME based genealogy program
#
# Copyright (C) 2000-2006 Donald N. Allingham
# Copyright (C) 2011 Tim G L Lyons, Nick Hall
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
"""
CitationBaseModel classes for Gramps.
"""
#-------------------------------------------------------------------------
#
# python modules
#
#-------------------------------------------------------------------------
from html import escape
import logging
log = logging.getLogger(".")
LOG = logging.getLogger(".citation")
#-------------------------------------------------------------------------
#
# Gramps modules
#
#-------------------------------------------------------------------------
from gramps.gen.const import GRAMPS_LOCALE as glocale
_ = glocale.translation.gettext
from gramps.gen.datehandler import format_time, get_date, get_date_valid
from gramps.gen.lib import Citation
from gramps.gen.utils.string import conf_strings
from gramps.gen.config import config
#-------------------------------------------------------------------------
#
# COLUMN constants
#
#-------------------------------------------------------------------------
# These are the column numbers in the serialize/unserialize interfaces in
# the Citation object
COLUMN_HANDLE = 0
COLUMN_ID = 1
COLUMN_DATE = 2
COLUMN_PAGE = 3
COLUMN_CONFIDENCE = 4
COLUMN_SOURCE = 5
COLUMN_CHANGE = 9
COLUMN_TAGS = 10
COLUMN_PRIV = 11
# Data for the Source object
COLUMN2_HANDLE = 0
COLUMN2_ID = 1
COLUMN2_TITLE = 2
COLUMN2_AUTHOR = 3
COLUMN2_PUBINFO = 4
COLUMN2_ABBREV = 7
COLUMN2_CHANGE = 8
COLUMN2_TAGS = 11
COLUMN2_PRIV = 12
INVALID_DATE_FORMAT = config.get('preferences.invalid-date-format')
#-------------------------------------------------------------------------
#
# CitationModel
#
#-------------------------------------------------------------------------
class CitationBaseModel:
# Fields access when 'data' is a Citation
def citation_date(self, data):
if data[COLUMN_DATE]:
citation = Citation()
citation.unserialize(data)
date_str = get_date(citation)
if date_str != "":
retval = escape(date_str)
if not get_date_valid(citation):
return INVALID_DATE_FORMAT % retval
else:
return retval
return ''
def citation_sort_date(self, data):
if data[COLUMN_DATE]:
citation = Citation()
citation.unserialize(data)
retval = "%09d" % citation.get_date_object().get_sort_value()
if not get_date_valid(citation):
return INVALID_DATE_FORMAT % retval
else:
return retval
return ''
def citation_id(self, data):
return data[COLUMN_ID]
def citation_page(self, data):
return data[COLUMN_PAGE]
def citation_sort_confidence(self, data):
if data[COLUMN_CONFIDENCE]:
return str(data[COLUMN_CONFIDENCE])
return ''
def citation_confidence(self, data):
return _(conf_strings[data[COLUMN_CONFIDENCE]])
def citation_private(self, data):
if data[COLUMN_PRIV]:
return 'gramps-lock'
else:
# There is a problem returning None here.
return ''
def citation_tags(self, data):
"""
Return the sorted list of tags.
"""
tag_list = list(map(self.get_tag_name, data[COLUMN_TAGS]))
# TODO for Arabic, should the next line's comma be translated?
return ', '.join(sorted(tag_list, key=glocale.sort_key))
def citation_tag_color(self, data):
"""
Return the tag color.
"""
tag_handle = data[0]
cached, tag_color = self.get_cached_value(tag_handle, "TAG_COLOR")
if not cached:
tag_color = ""
tag_priority = None
for handle in data[COLUMN_TAGS]:
tag = self.db.get_tag_from_handle(handle)
this_priority = tag.get_priority()
if tag_priority is None or this_priority < tag_priority:
tag_color = tag.get_color()
tag_priority = this_priority
self.set_cached_value(tag_handle, "TAG_COLOR", tag_color)
return tag_color
def citation_change(self, data):
return format_time(data[COLUMN_CHANGE])
def citation_sort_change(self, data):
return "%012x" % data[COLUMN_CHANGE]
def citation_source(self, data):
return data[COLUMN_SOURCE]
def citation_src_title(self, data):
source_handle = data[COLUMN_SOURCE]
cached, value = self.get_cached_value(source_handle, "SRC_TITLE")
if not cached:
try:
source = self.db.get_source_from_handle(source_handle)
value = source.get_title()
except:
value = ''
self.set_cached_value(source_handle, "SRC_TITLE", value)
return value
def citation_src_id(self, data):
source_handle = data[COLUMN_SOURCE]
cached, value = self.get_cached_value(source_handle, "SRC_ID")
if not cached:
try:
source = self.db.get_source_from_handle(source_handle)
value = source.gramps_id
except:
value = ''
self.set_cached_value(source_handle, "SRC_ID", value)
return value
def citation_src_auth(self, data):
source_handle = data[COLUMN_SOURCE]
cached, value = self.get_cached_value(source_handle, "SRC_AUTH")
if not cached:
try:
source = self.db.get_source_from_handle(source_handle)
value = source.get_author()
except:
value = ''
self.set_cached_value(source_handle, "SRC_AUTH", value)
return value
def citation_src_abbr(self, data):
source_handle = data[COLUMN_SOURCE]
cached, value = self.get_cached_value(source_handle, "SRC_ABBR")
if not cached:
try:
source = self.db.get_source_from_handle(source_handle)
value = source.get_abbreviation()
except:
value = ''
self.set_cached_value(source_handle, "SRC_ABBR", value)
return value
def citation_src_pinfo(self, data):
source_handle = data[COLUMN_SOURCE]
cached, value = self.get_cached_value(source_handle, "SRC_PINFO")
if not cached:
try:
source = self.db.get_source_from_handle(source_handle)
value = source.get_publication_info()
except:
value = ''
self.set_cached_value(source_handle, "SRC_PINFO", value)
return value
def citation_src_private(self, data):
source_handle = data[COLUMN_SOURCE]
cached, value = self.get_cached_value(source_handle, "SRC_PRIVATE")
if not cached:
try:
source = self.db.get_source_from_handle(source_handle)
if source.get_privacy():
value = 'gramps-lock'
else:
# There is a problem returning None here.
value = ''
except:
value = ''
self.set_cached_value(source_handle, "SRC_PRIVATE", value)
return value
def citation_src_tags(self, data):
source_handle = data[COLUMN_SOURCE]
cached, value = self.get_cached_value(source_handle, "SRC_TAGS")
if not cached:
try:
source = self.db.get_source_from_handle(source_handle)
tag_list = list(map(self.get_tag_name, source.get_tag_list()))
# TODO for Arabic, should the next line's comma be translated?
value = ', '.join(sorted(tag_list, key=glocale.sort_key))
except:
value = ''
self.set_cached_value(source_handle, "SRC_TAGS", value)
return value
def citation_src_chan(self, data):
source_handle = data[COLUMN_SOURCE]
cached, value = self.get_cached_value(source_handle, "SRC_CHAN")
if not cached:
try:
source = self.db.get_source_from_handle(source_handle)
value = format_time(source.change)
except:
value = ''
self.set_cached_value(source_handle, "SRC_CHAN", value)
return value
def citation_src_sort_change(self, data):
source_handle = data[COLUMN_SOURCE]
cached, value = self.get_cached_value(source_handle, "SRC_CHAN")
if not cached:
try:
source = self.db.get_source_from_handle(source_handle)
value = "%012x" % source.change
except:
value = ''
self.set_cached_value(source_handle, "SRC_CHAN", value)
return value
# Fields access when 'data' is a Source
def source_src_title(self, data):
return data[COLUMN2_TITLE]
def source_src_id(self, data):
return data[COLUMN2_ID]
def source_src_auth(self, data):
return data[COLUMN2_AUTHOR]
def source_src_abbr(self, data):
return data[COLUMN2_ABBREV]
def source_src_pinfo(self, data):
return data[COLUMN2_PUBINFO]
def source_src_private(self, data):
if data[COLUMN2_PRIV]:
return 'gramps-lock'
else:
# There is a problem returning None here.
return ''
def source_src_tags(self, data):
"""
Return the sorted list of tags.
"""
tag_list = list(map(self.get_tag_name, data[COLUMN2_TAGS]))
# TODO for Arabic, should the next line's comma be translated?
return ', '.join(sorted(tag_list, key=glocale.sort_key))
def source_src_tag_color(self, data):
"""
Return the tag color.
"""
tag_handle = data[0]
cached, tag_color = self.get_cached_value(tag_handle, "TAG_COLOR")
if not cached:
tag_color = ""
tag_priority = None
for handle in data[COLUMN2_TAGS]:
tag = self.db.get_tag_from_handle(handle)
this_priority = tag.get_priority()
if tag_priority is None or this_priority < tag_priority:
tag_color = tag.get_color()
tag_priority = this_priority
self.set_cached_value(tag_handle, "TAG_COLOR", tag_color)
return tag_color
def source_src_chan(self, data):
return format_time(data[COLUMN2_CHANGE])
def source_sort2_change(self, data):
return "%012x" % data[COLUMN2_CHANGE]
def dummy_sort_key(self, data):
# dummy sort key for columns that don't have data
return None
def get_tag_name(self, tag_handle):
"""
Return the tag name from the given tag handle.
"""
cached, value = self.get_cached_value(tag_handle, "TAG_NAME")
if not cached:
value = self.db.get_tag_from_handle(tag_handle).get_name()
self.set_cached_value(tag_handle, "TAG_NAME", value)
return value
| gpl-2.0 | 8,066,906,672,199,134,000 | 33.011461 | 79 | 0.55754 | false |
yuxiang-zhou/menpofit | menpofit/aam/fitter.py | 2 | 22442 | import numpy as np
from copy import deepcopy
from menpo.base import name_of_callable
from menpo.transform import AlignmentUniformScale
from menpo.image import BooleanImage
from menpofit.fitter import (MultiScaleParametricFitter,
noisy_shape_from_bounding_box)
from menpofit.sdm import SupervisedDescentFitter
import menpofit.checks as checks
from menpofit.result import MultiScaleParametricIterativeResult
from .algorithm.lk import WibergInverseCompositional
from .algorithm.sd import ProjectOutNewton
from .result import AAMResult
class AAMFitter(MultiScaleParametricFitter):
r"""
Abstract class for defining an AAM fitter.
.. note:: When using a method with a parametric shape model, the first step
is to **reconstruct the initial shape** using the shape model. The
generated reconstructed shape is then used as initialisation for
the iterative optimisation. This step takes place at each scale
and it is not considered as an iteration, thus it is not counted
for the provided `max_iters`.
Parameters
----------
aam : :map:`AAM` or `subclass`
The trained AAM model.
algorithms : `list` of `class`
The list of algorithm objects that will perform the fitting per scale.
"""
def __init__(self, aam, algorithms):
self._model = aam
# Call superclass
super(AAMFitter, self).__init__(
scales=aam.scales, reference_shape=aam.reference_shape,
holistic_features=aam.holistic_features, algorithms=algorithms)
@property
def aam(self):
r"""
The trained AAM model.
:type: :map:`AAM` or `subclass`
"""
return self._model
def _fitter_result(self, image, algorithm_results, affine_transforms,
scale_transforms, gt_shape=None):
r"""
Function the creates the multi-scale fitting result object.
Parameters
----------
image : `menpo.image.Image` or subclass
The image that was fitted.
algorithm_results : `list` of :map:`AAMAlgorithmResult` or subclass
The list of fitting result per scale.
affine_transforms : `list` of `menpo.transform.Affine`
The list of affine transforms per scale that are the inverses of the
transformations introduced by the rescale wrt the reference shape as
well as the feature extraction.
scale_transforms : `list` of `menpo.shape.Scale`
The list of inverse scaling transforms per scale.
gt_shape : `menpo.shape.PointCloud`, optional
The ground truth shape associated to the image.
Returns
-------
fitting_result : :map:`AAMResult` or subclass
The multi-scale fitting result containing the result of the fitting
procedure.
"""
return AAMResult(results=algorithm_results, scales=self.scales,
affine_transforms=affine_transforms,
scale_transforms=scale_transforms, image=image,
gt_shape=gt_shape)
class LucasKanadeAAMFitter(AAMFitter):
r"""
Class for defining an AAM fitter using the Lucas-Kanade optimisation.
.. note:: When using a method with a parametric shape model, the first step
is to **reconstruct the initial shape** using the shape model. The
generated reconstructed shape is then used as initialisation for
the iterative optimisation. This step takes place at each scale
and it is not considered as an iteration, thus it is not counted
for the provided `max_iters`.
Parameters
----------
aam : :map:`AAM` or `subclass`
The trained AAM model.
lk_algorithm_cls : `class`, optional
The Lukas-Kanade optimisation algorithm that will get applied. The
possible algorithms are:
============================================== =====================
Class Method
============================================== =====================
:map:`AlternatingForwardCompositional` Alternating
:map:`AlternatingInverseCompositional`
:map:`ModifiedAlternatingForwardCompositional` Modified Alternating
:map:`ModifiedAlternatingInverseCompositional`
:map:`ProjectOutForwardCompositional` Project-Out
:map:`ProjectOutInverseCompositional`
:map:`SimultaneousForwardCompositional` Simultaneous
:map:`SimultaneousInverseCompositional`
:map:`WibergForwardCompositional` Wiberg
:map:`WibergInverseCompositional`
============================================== =====================
n_shape : `int` or `float` or `list` of those or ``None``, optional
The number of shape components that will be used. If `int`, then it
defines the exact number of active components. If `float`, then it
defines the percentage of variance to keep. If `int` or `float`, then
the provided value will be applied for all scales. If `list`, then it
defines a value per scale. If ``None``, then all the available
components will be used. Note that this simply sets the active
components without trimming the unused ones. Also, the available
components may have already been trimmed to `max_shape_components`
during training.
n_appearance : `int` or `float` or `list` of those or ``None``, optional
The number of appearance components that will be used. If `int`, then it
defines the exact number of active components. If `float`, then it
defines the percentage of variance to keep. If `int` or `float`, then
the provided value will be applied for all scales. If `list`, then it
defines a value per scale. If ``None``, then all the available
components will be used. Note that this simply sets the active
components without trimming the unused ones. Also, the available
components may have already been trimmed to `max_appearance_components`
during training.
sampling : `list` of `int` or `ndarray` or ``None``
It defines a sampling mask per scale. If `int`, then it defines the
sub-sampling step of the sampling mask. If `ndarray`, then it
explicitly defines the sampling mask. If ``None``, then no
sub-sampling is applied.
"""
def __init__(self, aam, lk_algorithm_cls=WibergInverseCompositional,
n_shape=None, n_appearance=None, sampling=None):
# Check parameters
checks.set_models_components(aam.shape_models, n_shape)
checks.set_models_components(aam.appearance_models, n_appearance)
self._sampling = checks.check_sampling(sampling, aam.n_scales)
# Get list of algorithm objects per scale
interfaces = aam.build_fitter_interfaces(self._sampling)
algorithms = [lk_algorithm_cls(interface) for interface in interfaces]
# Call superclass
super(LucasKanadeAAMFitter, self).__init__(aam=aam,
algorithms=algorithms)
def appearance_reconstructions(self, appearance_parameters,
n_iters_per_scale):
r"""
Method that generates the appearance reconstructions given a set of
appearance parameters. This is to be combined with a :map:`AAMResult`
object, in order to generate the appearance reconstructions of a
fitting procedure.
Parameters
----------
appearance_parameters : `list` of ``(n_params,)`` `ndarray`
A set of appearance parameters per fitting iteration. It can be
retrieved as a property of an :map:`AAMResult` object.
n_iters_per_scale : `list` of `int`
The number of iterations per scale. This is necessary in order to
figure out which appearance parameters correspond to the model of
each scale. It can be retrieved as a property of a :map:`AAMResult`
object.
Returns
-------
appearance_reconstructions : `list` of `menpo.image.Image`
`List` of the appearance reconstructions that correspond to the
provided parameters.
"""
return self.aam.appearance_reconstructions(
appearance_parameters=appearance_parameters,
n_iters_per_scale=n_iters_per_scale)
def warped_images(self, image, shapes):
r"""
Given an input test image and a list of shapes, it warps the image
into the shapes. This is useful for generating the warped images of a
fitting procedure stored within an :map:`AAMResult`.
Parameters
----------
image : `menpo.image.Image` or `subclass`
The input image to be warped.
shapes : `list` of `menpo.shape.PointCloud`
The list of shapes in which the image will be warped. The shapes
are obtained during the iterations of a fitting procedure.
Returns
-------
warped_images : `list` of `menpo.image.MaskedImage` or `ndarray`
The warped images.
"""
return self.algorithms[-1].interface.warped_images(image=image,
shapes=shapes)
def __str__(self):
# Compute scale info strings
scales_info = []
lvl_str_tmplt = r""" - Scale {}
- {} active shape components
- {} similarity transform components
- {} active appearance components"""
for k, s in enumerate(self.scales):
scales_info.append(lvl_str_tmplt.format(
s,
self.aam.shape_models[k].n_active_components,
self.aam.shape_models[k].n_global_parameters,
self.aam.appearance_models[k].n_active_components))
scales_info = '\n'.join(scales_info)
cls_str = r"""{class_title}
- Scales: {scales}
{scales_info}
""".format(class_title=self.algorithms[0].__str__(),
scales=self.scales,
scales_info=scales_info)
return self.aam.__str__() + cls_str
class SupervisedDescentAAMFitter(SupervisedDescentFitter):
r"""
Class for training a multi-scale cascaded-regression Supervised Descent AAM
fitter.
Parameters
----------
images : `list` of `menpo.image.Image`
The `list` of training images.
aam : :map:`AAM` or `subclass`
The trained AAM model.
group : `str` or ``None``, optional
The landmark group that will be used to train the fitter. If ``None`` and
the images only have a single landmark group, then that is the one
that will be used. Note that all the training images need to have the
specified landmark group.
bounding_box_group_glob : `glob` or ``None``, optional
Glob that defines the bounding boxes to be used for training. If
``None``, then the bounding boxes of the ground truth shapes are used.
n_shape : `int` or `float` or `list` of those or ``None``, optional
The number of shape components that will be used. If `int`, then it
defines the exact number of active components. If `float`, then it
defines the percentage of variance to keep. If `int` or `float`, then
the provided value will be applied for all scales. If `list`, then it
defines a value per scale. If ``None``, then all the available
components will be used. Note that this simply sets the active
components without trimming the unused ones. Also, the available
components may have already been trimmed to `max_shape_components`
during training.
n_appearance : `int` or `float` or `list` of those or ``None``, optional
The number of appearance components that will be used. If `int`, then it
defines the exact number of active components. If `float`, then it
defines the percentage of variance to keep. If `int` or `float`, then
the provided value will be applied for all scales. If `list`, then it
defines a value per scale. If ``None``, then all the available
components will be used. Note that this simply sets the active
components without trimming the unused ones. Also, the available
components may have already been trimmed to `max_appearance_components`
during training.
sampling : `list` of `int` or `ndarray` or ``None``
It defines a sampling mask per scale. If `int`, then it defines the
sub-sampling step of the sampling mask. If `ndarray`, then it explicitly
defines the sampling mask. If ``None``, then no sub-sampling is applied.
sd_algorithm_cls : `class`, optional
The Supervised Descent algorithm to be used. The possible algorithms
are:
=================================== ============= =====================
Class Features Regression
=================================== ============= =====================
:map:`MeanTemplateNewton` Mean Template :map:`IRLRegression`
:map:`MeanTemplateGaussNewton` :map:`IIRLRegression`
:map:`ProjectOutNewton` Project-Out :map:`IRLRegression`
:map:`ProjectOutGaussNewton` :map:`IIRLRegression`
:map:`AppearanceWeightsNewton` App. Weights :map:`IRLRegression`
:map:`AppearanceWeightsGaussNewton` :map:`IIRLRegression`
=================================== ============= =====================
n_iterations : `int` or `list` of `int`, optional
The number of iterations (cascades) of each level. If `list`, it must
specify a value per scale. If `int`, then it defines the total number of
iterations (cascades) over all scales.
n_perturbations : `int` or ``None``, optional
The number of perturbations to be generated from the provided bounding
boxes.
perturb_from_gt_bounding_box : `callable`, optional
The function that will be used to generate the perturbations.
batch_size : `int` or ``None``, optional
If an `int` is provided, then the training is performed in an
incremental fashion on image batches of size equal to the provided
value. If ``None``, then the training is performed directly on the
all the images.
verbose : `bool`, optional
If ``True``, then the progress of training will be printed.
"""
def __init__(self, images, aam, group=None, bounding_box_group_glob=None,
n_shape=None, n_appearance=None, sampling=None,
sd_algorithm_cls=ProjectOutNewton,
n_iterations=6, n_perturbations=30,
perturb_from_gt_bounding_box=noisy_shape_from_bounding_box,
batch_size=None, verbose=False):
self.aam = aam
# Check parameters
checks.set_models_components(aam.shape_models, n_shape)
checks.set_models_components(aam.appearance_models, n_appearance)
self._sampling = checks.check_sampling(sampling, aam.n_scales)
# patch_feature and patch_shape are not actually
# used because they are fully defined by the AAM already. Therefore,
# we just leave them as their 'defaults' because they won't be used.
super(SupervisedDescentAAMFitter, self).__init__(
images, group=group, bounding_box_group_glob=bounding_box_group_glob,
reference_shape=self.aam.reference_shape,
sd_algorithm_cls=sd_algorithm_cls,
holistic_features=self.aam.holistic_features,
diagonal=self.aam.diagonal,
scales=self.aam.scales, n_iterations=n_iterations,
n_perturbations=n_perturbations,
perturb_from_gt_bounding_box=perturb_from_gt_bounding_box,
batch_size=batch_size, verbose=verbose)
def _setup_algorithms(self):
interfaces = self.aam.build_fitter_interfaces(self._sampling)
self.algorithms = [self._sd_algorithm_cls[j](
interface, n_iterations=self.n_iterations[j])
for j, interface in enumerate(interfaces)]
def _fitter_result(self, image, algorithm_results, affine_transforms,
scale_transforms, gt_shape=None):
r"""
Function the creates the multi-scale fitting result object.
Parameters
----------
image : `menpo.image.Image` or subclass
The image that was fitted.
algorithm_results : `list` of :map:`ParametricIterativeResult` or subclass
The list of fitting result per scale.
affine_transforms : `list` of `menpo.transform.Affine`
The list of affine transforms per scale that are the inverses of the
transformations introduced by the rescale wrt the reference shape as
well as the feature extraction.
scale_transforms : `list` of `menpo.shape.Scale`
The list of inverse scaling transforms per scale.
gt_shape : `menpo.shape.PointCloud`, optional
The ground truth shape associated to the image.
Returns
-------
fitting_result : :map:`MultiScaleParametricIterativeResult` or subclass
The multi-scale fitting result containing the result of the fitting
procedure.
"""
return MultiScaleParametricIterativeResult(
results=algorithm_results, scales=self.scales,
affine_transforms=affine_transforms,
scale_transforms=scale_transforms, image=image, gt_shape=gt_shape)
def warped_images(self, image, shapes):
r"""
Given an input test image and a list of shapes, it warps the image
into the shapes. This is useful for generating the warped images of a
fitting procedure stored within a
:map:`MultiScaleParametricIterativeResult`.
Parameters
----------
image : `menpo.image.Image` or `subclass`
The input image to be warped.
shapes : `list` of `menpo.shape.PointCloud`
The list of shapes in which the image will be warped. The shapes
are obtained during the iterations of a fitting procedure.
Returns
-------
warped_images : `list` of `menpo.image.MaskedImage` or `ndarray`
The warped images.
"""
return self.algorithms[-1].interface.warped_images(image=image,
shapes=shapes)
def __str__(self):
is_custom_perturb_func = (self._perturb_from_gt_bounding_box !=
noisy_shape_from_bounding_box)
if is_custom_perturb_func:
is_custom_perturb_func = name_of_callable(
self._perturb_from_gt_bounding_box)
regressor_cls = self.algorithms[0]._regressor_cls
# Compute scale info strings
scales_info = []
lvl_str_tmplt = r""" - Scale {}
- {} iterations"""
for k, s in enumerate(self.scales):
scales_info.append(lvl_str_tmplt.format(s, self.n_iterations[k]))
scales_info = '\n'.join(scales_info)
cls_str = r"""Supervised Descent Method
- Regression performed using the {reg_alg} algorithm
- Regression class: {reg_cls}
- Perturbations generated per shape: {n_perturbations}
- Custom perturbation scheme used: {is_custom_perturb_func}
- Scales: {scales}
{scales_info}
""".format(
reg_alg=name_of_callable(self._sd_algorithm_cls[0]),
reg_cls=name_of_callable(regressor_cls),
n_perturbations=self.n_perturbations,
is_custom_perturb_func=is_custom_perturb_func,
scales=self.scales,
scales_info=scales_info)
return self.aam.__str__() + cls_str
def holistic_sampling_from_scale(aam, scale=0.35):
r"""
Function that generates a sampling reference mask given a scale value.
Parameters
----------
aam : :map:`AAM` or subclass
The trained AAM.
scale : `float`, optional
The scale value.
Returns
-------
true_positions : `ndarray` of `bool`
The array that has ``True`` for the points of the reference shape that
belong to the new mask.
boolean_image : `menpo.image.BooleanImage`
The boolean image of the mask.
"""
reference = aam.appearance_models[0].mean()
scaled_reference = reference.rescale(scale)
t = AlignmentUniformScale(scaled_reference.landmarks['source'].lms,
reference.landmarks['source'].lms)
new_indices = np.require(np.round(t.apply(
scaled_reference.mask.true_indices())), dtype=np.int)
modified_mask = deepcopy(reference.mask.pixels)
modified_mask[:] = False
modified_mask[:, new_indices[:, 0], new_indices[:, 1]] = True
true_positions = np.nonzero(
modified_mask[:, reference.mask.mask].ravel())[0]
return true_positions, BooleanImage(modified_mask[0])
def holistic_sampling_from_step(aam, step=8):
r"""
Function that generates a sampling reference mask given a sampling step.
Parameters
----------
aam : :map:`AAM` or subclass
The trained AAM.
step : `int`, optional
The sampling step.
Returns
-------
true_positions : `ndarray` of `bool`
The array that has ``True`` for the points of the reference shape that
belong to the new mask.
boolean_image : `menpo.image.BooleanImage`
The boolean image of the mask.
"""
reference = aam.appearance_models[0].mean()
n_true_pixels = reference.n_true_pixels()
true_positions = np.zeros(n_true_pixels, dtype=np.bool)
sampling = range(0, n_true_pixels, step)
true_positions[sampling] = True
modified_mask = reference.mask.copy()
new_indices = modified_mask.true_indices()[sampling, :]
modified_mask.mask[:] = False
modified_mask.mask[new_indices[:, 0], new_indices[:, 1]] = True
return true_positions, modified_mask
| bsd-3-clause | -3,890,095,185,733,189,000 | 44.15493 | 82 | 0.617414 | false |
jessefeinman/FintechHackathon | venv/Lib/site-packages/setuptools/command/install_egg_info.py | 22 | 4096 | from distutils import log, dir_util
import os
from setuptools.extern.six.moves import map
from setuptools import Command
from setuptools.archive_util import unpack_archive
import pkg_resources
class install_egg_info(Command):
"""Install an .egg-info directory for the package"""
description = "Install an .egg-info directory for the package"
user_options = [
('install-dir=', 'd', "directory to install to"),
]
def initialize_options(self):
self.install_dir = None
def finalize_options(self):
self.set_undefined_options('install_lib',
('install_dir', 'install_dir'))
ei_cmd = self.get_finalized_command("egg_info")
basename = pkg_resources.Distribution(
None, None, ei_cmd.egg_name, ei_cmd.egg_version
).egg_name() + '.egg-info'
self.source = ei_cmd.egg_info
self.target = os.path.join(self.install_dir, basename)
self.outputs = []
def run(self):
self.run_command('egg_info')
if os.path.isdir(self.target) and not os.path.islink(self.target):
dir_util.remove_tree(self.target, dry_run=self.dry_run)
elif os.path.exists(self.target):
self.execute(os.unlink, (self.target,), "Removing " + self.target)
if not self.dry_run:
pkg_resources.ensure_directory(self.target)
self.execute(
self.copytree, (), "Copying %s to %s" % (self.source, self.target)
)
self.install_namespaces()
def get_outputs(self):
return self.outputs
def copytree(self):
# Copy the .egg-info tree to site-packages
def skimmer(src, dst):
# filter out source-control directories; note that 'src' is always
# a '/'-separated path, regardless of platform. 'dst' is a
# platform-specific path.
for skip in '.svn/', 'CVS/':
if src.startswith(skip) or '/' + skip in src:
return None
self.outputs.append(dst)
log.debug("Copying %s to %s", src, dst)
return dst
unpack_archive(self.source, self.target, skimmer)
def install_namespaces(self):
nsp = self._get_all_ns_packages()
if not nsp:
return
filename, ext = os.path.splitext(self.target)
filename += '-nspkg.pth'
self.outputs.append(filename)
log.info("Installing %s", filename)
lines = map(self._gen_nspkg_line, nsp)
if self.dry_run:
# always generate the lines, even in dry run
list(lines)
return
with open(filename, 'wt') as f:
f.writelines(lines)
_nspkg_tmpl = (
"import sys, types, os",
"pep420 = sys.version_info > (3, 3)",
"p = os.path.join(sys._getframe(1).f_locals['sitedir'], *%(pth)r)",
"ie = os.path.exists(os.path.join(p,'__init__.py'))",
"m = not ie and not pep420 and "
"sys.modules.setdefault(%(pkg)r, types.ModuleType(%(pkg)r))",
"mp = (m or []) and m.__dict__.setdefault('__path__',[])",
"(p not in mp) and mp.append(p)",
)
"lines for the namespace installer"
_nspkg_tmpl_multi = (
'm and setattr(sys.modules[%(parent)r], %(child)r, m)',
)
"additional line(s) when a parent package is indicated"
@classmethod
def _gen_nspkg_line(cls, pkg):
# ensure pkg is not a unicode string under Python 2.7
pkg = str(pkg)
pth = tuple(pkg.split('.'))
tmpl_lines = cls._nspkg_tmpl
parent, sep, child = pkg.rpartition('.')
if parent:
tmpl_lines += cls._nspkg_tmpl_multi
return ';'.join(tmpl_lines) % locals() + '\n'
def _get_all_ns_packages(self):
"""Return sorted list of all package namespaces"""
nsp = set()
for pkg in self.distribution.namespace_packages or []:
pkg = pkg.split('.')
while pkg:
nsp.add('.'.join(pkg))
pkg.pop()
return sorted(nsp)
| bsd-2-clause | 4,333,745,931,624,503,000 | 33.420168 | 78 | 0.565918 | false |
erichiggins/gae-mixy | appengine_config.py | 1 | 1050 | #/usr/bin/env python
# -*- coding: utf-8 -*-
import os
import sys
# Zipimport libraries.
# Eric's note: I've swapped these zips with new files created from
# 'python setup.py build'. when available.
THIRD_PARTY_PATH = os.path.join(os.path.abspath(os.path.dirname(__file__)), 'third_party')
LIBS = (
'arrow-0.4.1.zip',
'babel-0.9.6.zip',
'crockford-0.0.2-modified.zip',
'dateutil-2.1.zip',
'gdata-2.0.17.zip',
'html5lib-0.95.zip',
'httplib2-0.7.6.zip',
'jinja2htmlcompress.zip',
'markdown-2.2.0.zip',
'oauth2-1.5.211.zip',
'pytz-2012c-modified.zip',
'simpleauth-0.1.3.zip',
'six-1.4.1.zip',
'unidecode-0.04.9.zip',
'wtforms-1.0.5.zip',
)
for filename in LIBS:
sys.path.insert(1, os.path.join(THIRD_PARTY_PATH, filename))
# AppStats configuration.
appstats_CALC_RPC_COSTS = True
appstats_MAX_STACK = 15
def webapp_add_wsgi_middleware(app):
"""Enables appstats middleware."""
from google.appengine.ext.appstats import recording
return recording.appstats_wsgi_middleware(app)
| apache-2.0 | -4,125,297,717,674,693,000 | 24 | 90 | 0.662857 | false |
marcelveldt/plugin.video.plexbmc | resources/lib/plexbmc.py | 1 | 174368 | '''
@document : plexbmc.py
@package : PleXBMC add-on
@author : Hippojay (aka Dave Hawes-Johnson)
@copyright : 2011-2015, Hippojay
@version : 4.0 (Helix)
@license : Gnu General Public License - see LICENSE.TXT
@description: pleXBMC XBMC add-on
This file is part of the XBMC PleXBMC Plugin.
PleXBMC Plugin is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 2 of the License, or
(at your option) any later version.
PleXBMC Plugin is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with PleXBMC Plugin. If not, see <http://www.gnu.org/licenses/>.
'''
import urllib
import urlparse
import re
import xbmcplugin
import xbmcgui
import httplib
import socket
import sys
import os
import time
import random
import xbmc
import datetime
from common import * #Needed first to setup import locations
import plex
def mediaType( partData, server, dvdplayback=False ):
stream=partData['key']
file=partData['file']
if ( file is None ) or ( settings.get_stream() == "1" ):
printDebug.debug( "Selecting stream")
return server.get_formatted_url(stream)
#First determine what sort of 'file' file is
if file[0:2] == "\\\\":
printDebug.debug("Detected UNC source file")
type="UNC"
elif file[0:1] == "/" or file[0:1] == "\\":
printDebug.debug("Detected unix source file")
type="nixfile"
elif file[1:3] == ":\\" or file[1:2] == ":/":
printDebug.debug("Detected windows source file")
type="winfile"
else:
printDebug.debug("Unknown file type source: %s" % file)
type=None
# 0 is auto select. basically check for local file first, then stream if not found
if settings.get_stream() == "0":
#check if the file can be found locally
if type == "nixfile" or type == "winfile":
printDebug.debug("Checking for local file")
try:
exists = open(file, 'r')
printDebug.debug("Local file found, will use this")
exists.close()
return "file:%s" % file
except: pass
printDebug.debug("No local file")
if dvdplayback:
printDebug.debug("Forcing SMB for DVD playback")
settings.set_stream("2")
else:
return server.get_formatted_url(stream)
# 2 is use SMB
elif settings.get_stream() == "2" or settings.get_stream() == "3":
file=urllib.unquote(file)
if settings.get_stream() == "2":
protocol="smb"
else:
protocol="afp"
printDebug.debug( "Selecting smb/unc")
if type == "UNC":
filelocation="%s:%s" % (protocol, file.replace("\\","/"))
else:
#Might be OSX type, in which case, remove Volumes and replace with server
server=server.get_location().split(':')[0]
loginstring=""
if settings.get_setting('nasoverride'):
if settings.get_setting('nasoverrideip'):
server=settings.get_setting('nasoverrideip')
printDebug.debug("Overriding server with: %s" % server)
if settings.get_setting('nasuserid'):
loginstring="%s:%s@" % (settings.get_setting('nasuserid'), settings.get_setting('naspass'))
printDebug.debug("Adding AFP/SMB login info for user: %s" % settings.get_setting('nasuserid'))
if file.find('Volumes') > 0:
filelocation="%s:/%s" % (protocol, file.replace("Volumes",loginstring+server))
else:
if type == "winfile":
filelocation=("%s://%s%s/%s" % (protocol, loginstring, server, file[3:].replace("\\","/")))
else:
#else assume its a file local to server available over smb/samba (now we have linux PMS). Add server name to file path.
filelocation="%s://%s%s%s" % (protocol,loginstring, server, file)
if settings.get_setting('nasoverride') and settings.get_setting('nasroot'):
#Re-root the file path
printDebug.debug("Altering path %s so root is: %s" % (filelocation, settings.get_setting('nasroot')))
if '/'+settings.get_setting('nasroot')+'/' in filelocation:
components = filelocation.split('/')
index = components.index(settings.get_setting('nasroot'))
for i in range(3,index):
components.pop(3)
filelocation='/'.join(components)
else:
printDebug.debug( "No option detected, streaming is safest to choose" )
filelocation=server.get_formatted_url(stream)
printDebug.debug("Returning URL: %s " % filelocation)
return filelocation
def addGUIItem(url, details, extraData, context=None, folder=True):
printDebug.debug("Adding Dir for [%s]\n Passed details: %s\n Passed extraData: %s" % ( details.get('title', 'Unknown'), details, extraData))
#Create the URL to pass to the item
if not folder and extraData['type'] == "image" :
link_url=url
elif url.startswith('http') or url.startswith('file'):
link_url="%s?url=%s&mode=%s" % ( sys.argv[0], urllib.quote(url), extraData.get('mode',0))
else:
link_url="%s?url=%s&mode=%s" % ( sys.argv[0], url, extraData.get('mode',0))
if extraData.get('parameters'):
for argument, value in extraData.get('parameters').items():
link_url = "%s&%s=%s" % (link_url, argument, urllib.quote(value))
printDebug.debug("URL to use for listing: %s" % link_url)
liz=xbmcgui.ListItem(details.get('title', 'Unknown'), thumbnailImage=extraData.get('thumb', GENERIC_THUMBNAIL))
printDebug.debug("Setting thumbnail as %s" % extraData.get('thumb', GENERIC_THUMBNAIL))
#Set the properties of the item, such as summary, name, season, etc
liz.setInfo(type=extraData.get('type','Video'), infoLabels=details )
#Music related tags
if extraData.get('type','').lower() == "music":
liz.setProperty('Artist_Genre', details.get('genre',''))
liz.setProperty('Artist_Description', extraData.get('plot',''))
liz.setProperty('Album_Description', extraData.get('plot',''))
#For all end items
if not folder:
liz.setProperty('IsPlayable', 'true')
if extraData.get('type','video').lower() == "video":
liz.setProperty('TotalTime', str(extraData.get('duration')))
liz.setProperty('ResumeTime', str(extraData.get('resume')))
if not settings.get_setting('skipflags'):
printDebug.debug("Setting VrR as : %s" % extraData.get('VideoResolution',''))
liz.setProperty('VideoResolution', extraData.get('VideoResolution',''))
liz.setProperty('VideoCodec', extraData.get('VideoCodec',''))
liz.setProperty('AudioCodec', extraData.get('AudioCodec',''))
liz.setProperty('AudioChannels', extraData.get('AudioChannels',''))
liz.setProperty('VideoAspect', extraData.get('VideoAspect',''))
video_codec={}
if extraData.get('xbmc_VideoCodec'): video_codec['codec'] = extraData.get('xbmc_VideoCodec')
if extraData.get('xbmc_VideoAspect') : video_codec['aspect'] = float(extraData.get('xbmc_VideoAspect'))
if extraData.get('xbmc_height') : video_codec['height'] = int(extraData.get('xbmc_height'))
if extraData.get('xbmc_width') : video_codec['width'] = int(extraData.get('xbmc_width'))
if extraData.get('duration') : video_codec['duration'] = int(extraData.get('duration'))
audio_codec={}
if extraData.get('xbmc_AudioCodec') : audio_codec['codec'] = extraData.get('xbmc_AudioCodec')
if extraData.get('xbmc_AudioChannels') : audio_codec['channels'] = int(extraData.get('xbmc_AudioChannels'))
liz.addStreamInfo('video', video_codec )
liz.addStreamInfo('audio', audio_codec )
if extraData.get('source') == 'tvshows' or extraData.get('source') =='tvseasons':
#Then set the number of watched and unwatched, which will be displayed per season
liz.setProperty('TotalEpisodes', str(extraData['TotalEpisodes']))
liz.setProperty('WatchedEpisodes', str(extraData['WatchedEpisodes']))
liz.setProperty('UnWatchedEpisodes', str(extraData['UnWatchedEpisodes']))
#Hack to show partial flag for TV shows and seasons
if extraData.get('partialTV') == 1:
liz.setProperty('TotalTime', '100')
liz.setProperty('ResumeTime', '50')
#assign artwork
fanart = extraData.get('fanart_image','')
thumb = extraData.get('thumb', '')
banner = extraData.get('banner', '')
#tvshow poster
season_thumb = extraData.get('season_thumb', '')
if season_thumb:
poster = season_thumb
else:
poster = thumb
if fanart:
printDebug.debug("Setting fan art as %s" % fanart)
liz.setProperty('fanart_image', fanart)
if banner:
printDebug.debug("Setting banner as %s" % banner)
liz.setProperty('banner', '%s' % banner)
if season_thumb:
printDebug.debug("Setting season Thumb as %s" % season_thumb)
liz.setProperty('seasonThumb', '%s' % season_thumb)
liz.setArt({"fanart":fanart, "poster":poster, "banner":banner, "thumb":thumb})
if context is not None:
if not folder and extraData.get('type','video').lower() == "video":
#Play Transcoded
context.insert(0,('Play Transcoded', "XBMC.PlayMedia(%s&transcode=1)" % link_url , ))
printDebug.debug("Setting transcode options to [%s&transcode=1]" % link_url)
printDebug.debug("Building Context Menus")
liz.addContextMenuItems( context, settings.get_setting('contextreplace') )
return xbmcplugin.addDirectoryItem(handle=pluginhandle,url=link_url,listitem=liz,isFolder=folder)
def displaySections( filter=None, display_shared=False ):
printDebug.debug("== ENTER ==")
xbmcplugin.setContent(pluginhandle, 'files')
server_list=plex_network.get_server_list()
printDebug.debug( "Using list of %s servers: %s" % ( len(server_list), server_list))
for server in server_list:
server.discover_sections()
for section in server.get_sections():
if display_shared and server.is_owned():
continue
details={'title' : section.get_title() }
if len(server_list) > 1:
details['title']="%s: %s" % (server.get_name(), details['title'])
extraData={ 'fanart_image' : server.get_fanart(section),
'type' : "Video"}
#Determine what we are going to do process after a link is selected by the user, based on the content we find
path=section.get_path()
if section.is_show():
mode=MODE_TVSHOWS
if (filter is not None) and (filter != "tvshows"):
continue
elif section.is_movie():
mode=MODE_MOVIES
if (filter is not None) and (filter != "movies"):
continue
elif section.is_artist():
mode=MODE_ARTISTS
if (filter is not None) and (filter != "music"):
continue
elif section.is_photo():
mode=MODE_PHOTOS
if (filter is not None) and (filter != "photos"):
continue
else:
printDebug.debug("Ignoring section %s of type %s as unable to process" % ( details['title'], section.get_type() ) )
continue
if settings.get_setting('secondary'):
mode=MODE_GETCONTENT
else:
path=path+'/all'
extraData['mode']=mode
section_url='%s%s' % ( server.get_url_location(), path)
if not settings.get_setting('skipcontextmenus'):
context=[]
context.append(('Refresh library section', 'RunScript(plugin.video.plexbmc, update, %s, %s)' % (server.get_uuid(), section.get_key()) ))
else:
context=None
#Build that listing..
addGUIItem(section_url, details,extraData, context)
if display_shared:
xbmcplugin.endOfDirectory(pluginhandle, cacheToDisc=settings.get_setting('kodicache'))
return
#For each of the servers we have identified
if plex_network.is_myplex_signedin():
addGUIItem('http://myplexqueue', {'title': 'myplex Queue'}, {'type': 'Video', 'mode': MODE_MYPLEXQUEUE})
for server in server_list:
if server.is_offline() or server.is_secondary():
continue
#Plex plugin handling
if (filter is not None) and (filter != "plugins"):
continue
if len(server_list) > 1:
prefix=server.get_name()+": "
else:
prefix=""
details={'title' : prefix+"Channels" }
extraData={'type' : "Video"}
extraData['mode']=MODE_CHANNELVIEW
u="%s/channels/all" % server.get_url_location()
addGUIItem(u,details,extraData)
#Create plexonline link
details['title']=prefix+"Plex Online"
extraData['type'] = "file"
extraData['mode'] = MODE_PLEXONLINE
u="%s/system/plexonline" % server.get_url_location()
addGUIItem(u,details,extraData)
#create playlist link
details['title']=prefix+"Playlists"
extraData['type'] = "file"
extraData['mode'] = MODE_PLAYLISTS
u="%s/playlists" % server.get_url_location()
addGUIItem(u,details,extraData)
if plex_network.is_myplex_signedin():
if plex_network.is_plexhome_enabled():
details = {'title' : "Switch User"}
extraData = {'type' : 'file'}
u="cmd:switchuser"
addGUIItem(u,details,extraData)
details = {'title' : "Sign Out"}
extraData = {'type' : 'file'}
u="cmd:signout"
addGUIItem(u,details,extraData)
else:
details = {'title' : "Sign In"}
extraData = {'type' : 'file'}
u="cmd:signintemp"
addGUIItem(u,details,extraData)
if settings.get_setting('cache'):
details = {'title' : "Refresh Data"}
extraData = {}
extraData['type']="file"
extraData['mode']= MODE_DELETE_REFRESH
u="http://nothing"
addGUIItem(u,details,extraData)
#All XML entries have been parsed and we are ready to allow the user to browse around. So end the screen listing.
xbmcplugin.endOfDirectory(pluginhandle, cacheToDisc=settings.get_setting('kodicache'))
def enforceSkinView(mode):
'''
Ensure that the views are consistance across plugin usage, depending
upon view selected by user
@input: User view selection
@return: view id for skin
'''
printDebug.debug("== ENTER ==")
if not settings.get_setting('skinoverride'):
return None
skinname = settings.get_setting('skinname')
current_skin_name = xbmc.getSkinDir()
skin_map = { '2' : 'skin.confluence' ,
'0' : 'skin.quartz' ,
'1' : 'skin.quartz3' ,
'3' : 'skin.amber',
'4' : 'skin.aeon.nox.5' }
if skin_map[skinname] not in current_skin_name:
printDebug.debug("Do not have the correct skin [%s] selected in settings [%s] - ignoring" % (current_skin_name, skin_map[skinname]))
return None
if mode == "movie":
printDebug.debug("Looking for movie skin settings")
viewname = settings.get_setting('mo_view_%s' % skinname)
elif mode == "tv":
printDebug.debug("Looking for tv skin settings")
viewname = settings.get_setting('tv_view_%s' % skinname)
elif mode == "music":
printDebug.debug("Looking for music skin settings")
viewname = settings.get_setting('mu_view_%s' % skinname)
elif mode == "episode":
printDebug.debug("Looking for music skin settings")
viewname = settings.get_setting('ep_view_%s' % skinname)
elif mode == "season":
printDebug.debug("Looking for music skin settings")
viewname = settings.get_setting('se_view_%s' % skinname)
else:
viewname = "None"
printDebug.debug("view name is %s" % viewname)
if viewname == "None":
return None
QuartzV3_views={ 'List' : 50,
'Big List' : 51,
'MediaInfo' : 52,
'MediaInfo 2' : 54,
'Big Icons' : 501,
'Icons': 53,
'Panel' : 502,
'Wide' : 55,
'Fanart 1' : 57,
'Fanart 2' : 59,
'Fanart 3' : 500 }
Quartz_views={ 'List' : 50,
'MediaInfo' : 51,
'MediaInfo 2' : 52,
'Icons': 53,
'Wide' : 54,
'Big Icons' : 55,
'Icons 2' : 56 ,
'Panel' : 57,
'Fanart' : 58,
'Fanart 2' : 59 }
Confluence_views={ 'List' : 50,
'Big List' : 51,
'Thumbnail' : 500,
'Poster Wrap': 501,
'Fanart' : 508,
'Media Info' : 504,
'Media Info 2' : 503,
'Media Info 3' : 515,
'Wide Icons' : 505 }
Amber_views = { 'List' : 50,
'Big List' : 52,
'Panel': 51,
'Low List' : 54,
'Icons' : 53,
'Big Panel' : 55,
'Fanart' : 59 }
aeon_nox_views = { 'List' : 50 ,
'InfoWall' : 51 ,
'Landscape' : 52 ,
'ShowCase1' : 53 ,
'ShowCase2' : 54 ,
'TriPanel' : 55 ,
'Posters' : 56 ,
'Shift' : 57 ,
'BannerWall' : 58 ,
'Logo' : 59 ,
'Wall' : 500 ,
'LowList' : 501 ,
'Episode' : 502 ,
'Wall' : 503 ,
'BigList' : 510 }
skin_list={"0" : Quartz_views ,
"1" : QuartzV3_views,
"2" : Confluence_views,
"3" : Amber_views,
"4" : aeon_nox_views }
printDebug.debug("Using skin view: %s" % skin_list[skinname][viewname])
try:
return skin_list[skinname][viewname]
except:
print "PleXBMC -> skin name or view name error"
return None
def Movies( url, tree=None ):
printDebug.debug("== ENTER ==")
xbmcplugin.setContent(pluginhandle, 'movies')
xbmcplugin.addSortMethod(pluginhandle, 37 ) #maintain original plex sorted
xbmcplugin.addSortMethod(pluginhandle, 25 ) #video title ignore THE
xbmcplugin.addSortMethod(pluginhandle, 19 ) #date added
xbmcplugin.addSortMethod(pluginhandle, 3 ) #date
xbmcplugin.addSortMethod(pluginhandle, 18 ) #rating
xbmcplugin.addSortMethod(pluginhandle, 17 ) #year
xbmcplugin.addSortMethod(pluginhandle, 29 ) #runtime
xbmcplugin.addSortMethod(pluginhandle, 28 ) #by MPAA
#get the server name from the URL, which was passed via the on screen listing..
server=plex_network.get_server_from_url(url)
tree=getXML(url,tree)
if tree is None:
return
setWindowHeading(tree)
randomNumber=str(random.randint(1000000000,9999999999))
#Find all the video tags, as they contain the data we need to link to a file.
start_time=time.time()
count=0
for movie in tree:
if movie.tag == "Video":
movieTag(url, server, tree, movie, randomNumber)
count+=1
printDebug.info("PROCESS: It took %s seconds to process %s items" % (time.time()-start_time, count))
printDebug.debug("Skin override is: %s" % settings.get_setting('skinoverride'))
view_id = enforceSkinView('movie')
if view_id:
xbmc.executebuiltin("Container.SetViewMode(%s)" % view_id)
xbmcplugin.endOfDirectory(pluginhandle, cacheToDisc=settings.get_setting('kodicache'))
def buildContextMenu( url, itemData, server ):
context=[]
url_parts = urlparse.urlparse(url)
section=url_parts.path.split('/')[3]
ID=itemData.get('ratingKey','0')
#Mark media unwatched
context.append(('Mark as Unwatched', 'RunScript(plugin.video.plexbmc, watch, %s, %s, %s)' % ( server.get_uuid(), ID, 'unwatch' ) ))
context.append(('Mark as Watched', 'RunScript(plugin.video.plexbmc, watch, %s, %s, %s)' % ( server.get_uuid(), ID, 'watch' ) ))
context.append(('Rescan library section', 'RunScript(plugin.video.plexbmc, update, %s, %s)' % ( server.get_uuid(), section ) ))
context.append(('Delete media', "RunScript(plugin.video.plexbmc, delete, %s, %s)" % ( server.get_uuid(), ID) ))
context.append(('Reload Section', 'RunScript(plugin.video.plexbmc, refresh)' ))
context.append(('Select Audio', "RunScript(plugin.video.plexbmc, audio, %s, %s)" % ( server.get_uuid(), ID) ))
context.append(('Select Subtitle', "RunScript(plugin.video.plexbmc, subs, %s, %s)" % ( server.get_uuid(), ID) ))
printDebug.debug("Using context menus: %s" % context)
return context
def TVShows( url, tree=None ):
printDebug.debug("== ENTER ==")
xbmcplugin.setContent(pluginhandle, 'tvshows')
xbmcplugin.addSortMethod(pluginhandle, 37 ) #maintain original plex sorted
xbmcplugin.addSortMethod(pluginhandle, 25 ) #video title ignore THE
xbmcplugin.addSortMethod(pluginhandle, 3 ) #date
xbmcplugin.addSortMethod(pluginhandle, 18 ) #rating
xbmcplugin.addSortMethod(pluginhandle, 17 ) #year
xbmcplugin.addSortMethod(pluginhandle, 28 ) #by MPAA
#Get the URL and server name. Get the XML and parse
tree=getXML(url,tree)
if tree is None:
return
server=plex_network.get_server_from_url(url)
setWindowHeading(tree)
#For each directory tag we find
ShowTags=tree.findall('Directory')
for show in ShowTags:
tempgenre=[]
for child in show:
if child.tag == "Genre":
tempgenre.append(child.get('tag',''))
watched = int(show.get('viewedLeafCount',0))
#Create the basic data structures to pass up
details={'title' : show.get('title','Unknown').encode('utf-8') ,
'sorttitle' : show.get('titleSort', show.get('title','Unknown')).encode('utf-8') ,
'tvshowname' : show.get('title','Unknown').encode('utf-8') ,
'studio' : show.get('studio','').encode('utf-8') ,
'plot' : show.get('summary','').encode('utf-8') ,
'season' : 0 ,
'episode' : int(show.get('leafCount',0)) ,
'mpaa' : show.get('contentRating','') ,
'aired' : show.get('originallyAvailableAt','') ,
'genre' : " / ".join(tempgenre) }
extraData={'type' : 'video' ,
'source' : 'tvshows',
'UnWatchedEpisodes' : int(details['episode']) - watched,
'WatchedEpisodes' : watched,
'TotalEpisodes' : details['episode'],
'thumb' : getThumb(show, server) ,
'fanart_image' : getFanart(show, server) ,
'key' : show.get('key','') ,
'ratingKey' : str(show.get('ratingKey',0)) }
#banner art
if show.get('banner') is not None:
extraData['banner'] = server.get_url_location()+show.get('banner')
else:
extraData['banner'] = GENERIC_THUMBNAIL
#Set up overlays for watched and unwatched episodes
if extraData['WatchedEpisodes'] == 0:
details['playcount'] = 0
elif extraData['UnWatchedEpisodes'] == 0:
details['playcount'] = 1
else:
extraData['partialTV'] = 1
#Create URL based on whether we are going to flatten the season view
if settings.get_setting('flatten') == "2":
printDebug.debug("Flattening all shows")
extraData['mode']=MODE_TVEPISODES
u='%s%s' % ( server.get_url_location(), extraData['key'].replace("children","allLeaves"))
else:
extraData['mode']=MODE_TVSEASONS
u='%s%s' % ( server.get_url_location(), extraData['key'])
if not settings.get_setting('skipcontextmenus'):
context=buildContextMenu(url, extraData, server)
else:
context=None
addGUIItem(u,details,extraData, context)
printDebug ("Skin override is: %s" % settings.get_setting('skinoverride'))
view_id = enforceSkinView('tv')
if view_id:
xbmc.executebuiltin("Container.SetViewMode(%s)" % view_id)
xbmcplugin.endOfDirectory(pluginhandle, cacheToDisc=settings.get_setting('kodicache'))
def TVSeasons( url ):
printDebug.debug("== ENTER ==")
xbmcplugin.setContent(pluginhandle, 'seasons')
#Get URL, XML and parse
server=plex_network.get_server_from_url(url)
tree=getXML(url)
if tree is None:
return
willFlatten=False
if settings.get_setting('flatten') == "1":
#check for a single season
if int(tree.get('size',0)) == 1:
printDebug.debug("Flattening single season show")
willFlatten=True
sectionart=getFanart(tree, server)
banner=tree.get('banner')
setWindowHeading(tree)
#For all the directory tags
SeasonTags=tree.findall('Directory')
plot=tree.get('summary','').encode('utf-8')
for season in SeasonTags:
if willFlatten:
url=server.get_url_location()+season.get('key')
TVEpisodes(url)
return
if settings.get_setting('disable_all_season') and season.get('index') is None:
continue
watched=int(season.get('viewedLeafCount',0))
#Create the basic data structures to pass up
details={'title' : season.get('title','Unknown').encode('utf-8') ,
'tvshowname' : season.get('title','Unknown').encode('utf-8') ,
'sorttitle' : season.get('titleSort', season.get('title','Unknown')).encode('utf-8') ,
'studio' : season.get('studio','').encode('utf-8') ,
'plot' : plot ,
'season' : 0 ,
'episode' : int(season.get('leafCount',0)) ,
'mpaa' : season.get('contentRating','') ,
'aired' : season.get('originallyAvailableAt','') }
if season.get('sorttitle'): details['sorttitle'] = season.get('sorttitle')
extraData={'type' : 'video' ,
'source' : 'tvseasons',
'TotalEpisodes' : details['episode'],
'WatchedEpisodes' : watched ,
'UnWatchedEpisodes' : details['episode'] - watched ,
'thumb' : getThumb(season, server) ,
'fanart_image' : getFanart(season, server) ,
'key' : season.get('key','') ,
'ratingKey' : str(season.get('ratingKey',0)) ,
'mode' : MODE_TVEPISODES }
if banner:
extraData['banner']=server.get_url_location()+banner
if extraData['fanart_image'] == "":
extraData['fanart_image']=sectionart
#Set up overlays for watched and unwatched episodes
if extraData['WatchedEpisodes'] == 0:
details['playcount'] = 0
elif extraData['UnWatchedEpisodes'] == 0:
details['playcount'] = 1
else:
extraData['partialTV'] = 1
url='%s%s' % ( server.get_url_location() , extraData['key'] )
if not settings.get_setting('skipcontextmenus'):
context=buildContextMenu(url, season, server)
else:
context=None
#Build the screen directory listing
addGUIItem(url,details,extraData, context)
printDebug.debug("Skin override is: %s" % settings.get_setting('skinoverride'))
view_id = enforceSkinView('season')
if view_id:
xbmc.executebuiltin("Container.SetViewMode(%s)" % view_id)
xbmcplugin.endOfDirectory(pluginhandle,cacheToDisc=settings.get_setting('kodicache'))
def TVEpisodes( url, tree=None ):
printDebug.debug("== ENTER ==")
xbmcplugin.setContent(pluginhandle, 'episodes')
tree=getXML(url,tree)
if tree is None:
return
setWindowHeading(tree)
#get banner thumb
banner = tree.get('banner')
#get season thumb for SEASON NODE
season_thumb = tree.get('thumb', '')
print season_thumb
if season_thumb == "/:/resources/show.png":
season_thumb = ""
ShowTags=tree.findall('Video')
server=plex_network.get_server_from_url(url)
if not settings.get_setting('skipimages'):
sectionart=getFanart(tree, server)
randomNumber=str(random.randint(1000000000,9999999999))
if tree.get('mixedParents') == '1':
printDebug.info('Setting plex sort')
xbmcplugin.addSortMethod(pluginhandle, 37 ) #maintain original plex sorted
else:
printDebug.info('Setting KODI sort')
xbmcplugin.addSortMethod(pluginhandle, xbmcplugin.SORT_METHOD_EPISODE ) #episode
xbmcplugin.addSortMethod(pluginhandle, 3 ) #date
xbmcplugin.addSortMethod(pluginhandle, 25 ) #video title ignore THE
xbmcplugin.addSortMethod(pluginhandle, 19 ) #date added
xbmcplugin.addSortMethod(pluginhandle, 18 ) #rating
xbmcplugin.addSortMethod(pluginhandle, 17 ) #year
xbmcplugin.addSortMethod(pluginhandle, 29 ) #runtime
xbmcplugin.addSortMethod(pluginhandle, 28 ) #by MPAA
for episode in ShowTags:
printDebug.debug("---New Item---")
tempgenre=[]
tempcast=[]
tempdir=[]
tempwriter=[]
for child in episode:
if child.tag == "Media":
mediaarguments = dict(child.items())
elif child.tag == "Genre" and not settings.get_setting('skipmetadata'):
tempgenre.append(child.get('tag'))
elif child.tag == "Writer" and not settings.get_setting('skipmetadata'):
tempwriter.append(child.get('tag'))
elif child.tag == "Director" and not settings.get_setting('skipmetadata'):
tempdir.append(child.get('tag'))
elif child.tag == "Role" and not settings.get_setting('skipmetadata'):
tempcast.append(child.get('tag'))
printDebug.debug("Media attributes are %s" % mediaarguments)
#Gather some data
view_offset=episode.get('viewOffset',0)
duration=int(mediaarguments.get('duration',episode.get('duration',0)))/1000
#Required listItem entries for XBMC
details={'plot' : episode.get('summary','').encode('utf-8') ,
'title' : episode.get('title','Unknown').encode('utf-8') ,
'sorttitle' : episode.get('titleSort', episode.get('title','Unknown')).encode('utf-8') ,
'rating' : float(episode.get('rating',0)) ,
'studio' : episode.get('studio',tree.get('studio','')).encode('utf-8') ,
'mpaa' : episode.get('contentRating', tree.get('grandparentContentRating','')) ,
'year' : int(episode.get('year',0)) ,
'tagline' : episode.get('tagline','').encode('utf-8') ,
'episode' : int(episode.get('index',0)) ,
'aired' : episode.get('originallyAvailableAt','') ,
'tvshowtitle' : episode.get('grandparentTitle',tree.get('grandparentTitle','')).encode('utf-8') ,
'season' : int(episode.get('parentIndex',tree.get('parentIndex',0))) }
if episode.get('sorttitle'):
details['sorttitle'] = episode.get('sorttitle').encode('utf-8')
if tree.get('mixedParents') == '1':
if tree.get('parentIndex') == '1':
details['title'] = "%sx%s %s" % ( details['season'], str(details['episode']).zfill(2), details['title'] )
else:
details['title'] = "%s - %sx%s %s" % ( details['tvshowtitle'], details['season'], str(details['episode']).zfill(2), details['title'] )
#Extra data required to manage other properties
extraData={'type' : "Video" ,
'source' : 'tvepisodes',
'thumb' : getThumb(episode, server) ,
'fanart_image' : getFanart(episode, server) ,
'key' : episode.get('key',''),
'ratingKey' : str(episode.get('ratingKey',0)),
'duration' : duration,
'resume' : int(int(view_offset)/1000) }
if extraData['fanart_image'] == "" and not settings.get_setting('skipimages'):
extraData['fanart_image'] = sectionart
if season_thumb:
extraData['season_thumb'] = server.get_url_location() + season_thumb
#get ALL SEASONS or TVSHOW thumb
if not season_thumb and episode.get('parentThumb', ""):
extraData['season_thumb'] = "%s%s" % (server.get_url_location(), episode.get('parentThumb', ""))
elif not season_thumb and episode.get('grandparentThumb', ""):
extraData['season_thumb'] = "%s%s" % (server.get_url_location(), episode.get('grandparentThumb', ""))
if banner:
extraData['banner'] = "%s%s" % (server.get_url_location(), banner)
#Determine what tupe of watched flag [overlay] to use
if int(episode.get('viewCount',0)) > 0:
details['playcount'] = 1
else:
details['playcount'] = 0
#Extended Metadata
if not settings.get_setting('skipmetadata'):
details['cast'] = tempcast
details['director'] = " / ".join(tempdir)
details['writer'] = " / ".join(tempwriter)
details['genre'] = " / ".join(tempgenre)
#Add extra media flag data
if not settings.get_setting('skipflags'):
extraData.update(getMediaData(mediaarguments))
#Build any specific context menu entries
if not settings.get_setting('skipcontextmenus'):
context=buildContextMenu(url, extraData,server)
else:
context=None
extraData['mode']=MODE_PLAYLIBRARY
separator = "?"
if "?" in extraData['key']:
separator = "&"
u="%s%s%st=%s" % (server.get_url_location(), extraData['key'], separator, randomNumber)
addGUIItem(u,details,extraData, context, folder=False)
printDebug.debug("Skin override is: %s" % settings.get_setting('skinoverride'))
view_id = enforceSkinView('episode')
if view_id:
xbmc.executebuiltin("Container.SetViewMode(%s)" % view_id)
xbmcplugin.endOfDirectory(pluginhandle,cacheToDisc=settings.get_setting('kodicache'))
def getAudioSubtitlesMedia( server, tree, full=False ):
'''
Cycle through the Parts sections to find all "selected" audio and subtitle streams
If a stream is marked as selected=1 then we will record it in the dict
Any that are not, are ignored as we do not need to set them
We also record the media locations for playback decision later on
'''
printDebug.debug("== ENTER ==")
printDebug.debug("Gather media stream info" )
parts=[]
partsCount=0
subtitle={}
subCount=0
audio={}
audioCount=0
media={}
subOffset=-1
audioOffset=-1
selectedSubOffset=-1
selectedAudioOffset=-1
full_data={}
contents="type"
media_type="unknown"
extra={}
timings = tree.find('Video')
if timings is not None:
media_type="video"
extra['path']=timings.get('key')
else:
timings = tree.find('Track')
if timings:
media_type="music"
extra['path']=timings.get('key')
else:
timings = tree.find('Photo')
if timings:
media_type="picture"
extra['path']=timings.get('key')
else:
printDebug.debug("No Video data found")
return {}
media['viewOffset']=timings.get('viewOffset',0)
media['duration']=timings.get('duration',12*60*60)
if full:
if media_type == "video":
full_data={ 'plot' : timings.get('summary','').encode('utf-8') ,
'title' : timings.get('title','Unknown').encode('utf-8') ,
'sorttitle' : timings.get('titleSort', timings.get('title','Unknown')).encode('utf-8') ,
'rating' : float(timings.get('rating',0)) ,
'studio' : timings.get('studio','').encode('utf-8'),
'mpaa' : timings.get('contentRating', '').encode('utf-8'),
'year' : int(timings.get('year',0)) ,
'tagline' : timings.get('tagline','') ,
'thumbnailImage': getThumb(timings,server) }
if timings.get('type') == "episode":
full_data['episode'] = int(timings.get('index',0))
full_data['aired'] = timings.get('originallyAvailableAt','')
full_data['tvshowtitle'] = timings.get('grandparentTitle',tree.get('grandparentTitle','')).encode('utf-8')
full_data['season'] = int(timings.get('parentIndex',tree.get('parentIndex',0)))
elif media_type == "music":
full_data={'TrackNumber' : int(timings.get('index',0)) ,
'title' : str(timings.get('index',0)).zfill(2)+". "+timings.get('title','Unknown').encode('utf-8') ,
'rating' : float(timings.get('rating',0)) ,
'album' : timings.get('parentTitle', tree.get('parentTitle','')).encode('utf-8') ,
'artist' : timings.get('grandparentTitle', tree.get('grandparentTitle','')).encode('utf-8') ,
'duration' : int(timings.get('duration',0))/1000 ,
'thumbnailImage': getThumb(timings,server) }
extra['album']=timings.get('parentKey')
extra['index']=timings.get('index')
details = timings.findall('Media')
media_details_list=[]
for media_details in details:
resolution=""
try:
if media_details.get('videoResolution') == "sd":
resolution="SD"
elif int(media_details.get('videoResolution',0)) >= 1080:
resolution="HD 1080"
elif int(media_details.get('videoResolution',0)) >= 720:
resolution="HD 720"
elif int(media_details.get('videoResolution',0)) < 720:
resolution="SD"
except:
pass
media_details_temp = { 'bitrate' : round(float(media_details.get('bitrate',0))/1000,1) ,
'videoResolution' : resolution ,
'container' : media_details.get('container','unknown') }
options = media_details.findall('Part')
#Get the media locations (file and web) for later on
for stuff in options:
try:
bits=stuff.get('key'), stuff.get('file')
parts.append(bits)
media_details_list.append(media_details_temp)
partsCount += 1
except: pass
#if we are deciding internally or forcing an external subs file, then collect the data
if media_type == "video" and settings.get_setting('streamControl') == SUB_AUDIO_PLEX_CONTROL:
contents="all"
tags=tree.getiterator('Stream')
for bits in tags:
stream=dict(bits.items())
#Audio Streams
if stream['streamType'] == '2':
audioCount += 1
audioOffset += 1
if stream.get('selected') == "1":
printDebug.debug("Found preferred audio id: %s " % stream['id'] )
audio=stream
selectedAudioOffset=audioOffset
#Subtitle Streams
elif stream['streamType'] == '3':
if subOffset == -1:
subOffset = int(stream.get('index',-1))
elif stream.get('index',-1) > 0 and stream.get('index',-1) < subOffset:
subOffset = int(stream.get('index',-1))
if stream.get('selected') == "1":
printDebug.debug( "Found preferred subtitles id : %s " % stream['id'])
subCount += 1
subtitle=stream
if stream.get('key'):
subtitle['key'] = server.get_formatted_url(stream['key'])
else:
selectedSubOffset=int( stream.get('index') ) - subOffset
else:
printDebug.debug( "Stream selection is set OFF")
streamData={'contents' : contents , #What type of data we are holding
'audio' : audio , #Audio data held in a dict
'audioCount' : audioCount , #Number of audio streams
'subtitle' : subtitle , #Subtitle data (embedded) held as a dict
'subCount' : subCount , #Number of subtitle streams
'parts' : parts , #The differet media locations
'partsCount' : partsCount , #Number of media locations
'media' : media , #Resume/duration data for media
'details' : media_details_list , #Bitrate, resolution and container for each part
'subOffset' : selectedSubOffset , #Stream index for selected subs
'audioOffset': selectedAudioOffset , #STream index for select audio
'full_data' : full_data , #Full metadata extract if requested
'type' : media_type , #Type of metadata
'extra' : extra } #Extra data
printDebug.debug( streamData )
return streamData
def playPlaylist ( server, data ):
printDebug.debug("== ENTER ==")
printDebug.debug("Creating new playlist")
playlist = xbmc.PlayList(xbmc.PLAYLIST_MUSIC)
playlist.clear()
tree = getXML(server.get_url_location()+data['extra'].get('album')+"/children")
if tree is None:
return
TrackTags=tree.findall('Track')
for track in TrackTags:
printDebug.debug("Adding playlist item")
url, item = trackTag(server, tree, track, listing = False)
liz=xbmcgui.ListItem(item.get('title','Unknown'), iconImage=data['full_data'].get('thumbnailImage','') , thumbnailImage=data['full_data'].get('thumbnailImage',''))
liz.setInfo( type='music', infoLabels=item )
playlist.add(url, liz)
index = int(data['extra'].get('index',0)) - 1
printDebug.debug("Playlist complete. Starting playback from track %s [playlist index %s] " % (data['extra'].get('index',0), index ))
xbmc.Player().playselected( index )
return
def playLibraryMedia( vids, override=False, force=None, full_data=False, shelf=False ):
#assume widget if playback initiated from home
if xbmc.getCondVisibility("Window.IsActive(home)"):
shelf = True
full_data = True
session=None
if settings.get_setting('transcode'):
override=True
if override:
full_data = True
server=plex_network.get_server_from_url(vids)
id=vids.split('?')[0].split('&')[0].split('/')[-1]
tree=getXML(vids)
if tree is None:
return
if force:
full_data = True
streams=getAudioSubtitlesMedia(server,tree, full_data)
if force and streams['type'] == "music":
playPlaylist(server, streams)
return
url=selectMedia(streams, server)
if url is None:
return
protocol=url.split(':',1)[0]
if protocol == "file":
printDebug.debug( "We are playing a local file")
playurl=url.split(':',1)[1]
elif protocol == "http":
printDebug.debug( "We are playing a stream")
if override:
printDebug.debug( "We will be transcoding the stream")
if settings.get_setting('transcode_type') == "universal":
session, playurl=server.get_universal_transcode(streams['extra']['path'])
elif settings.get_setting('transcode_type') == "legacy":
session, playurl=server.get_legacy_transcode(id,url)
else:
playurl=server.get_formatted_url(url)
else:
playurl=url
resume=int(int(streams['media']['viewOffset'])/1000)
duration=int(int(streams['media']['duration'])/1000)
if not resume == 0 and shelf:
printDebug.debug("Shelf playback: display resume dialog")
displayTime = str(datetime.timedelta(seconds=resume))
display_list = [ "Resume from %s" % displayTime , "Start from beginning"]
resumeScreen = xbmcgui.Dialog()
result = resumeScreen.select('Resume',display_list)
if result == -1:
return False
if result == 1:
resume=0
printDebug.debug("Resume has been set to %s " % resume)
item = xbmcgui.ListItem(path=playurl)
if streams['full_data']:
item.setInfo( type=streams['type'], infoLabels=streams['full_data'] )
item.setThumbnailImage(streams['full_data'].get('thumbnailImage',''))
item.setIconImage(streams['full_data'].get('thumbnailImage',''))
if force:
if int(force) > 0:
resume=int(int(force)/1000)
else:
resume=force
if force or shelf or session is not None:
if resume:
item.setProperty('ResumeTime', str(resume) )
item.setProperty('TotalTime', str(duration) )
item.setProperty('StartOffset', str(resume))
printDebug.info("Playback from resume point: %s" % resume)
if streams['type'] == "picture":
import json
request=json.dumps({ "id" : 1,
"jsonrpc" : "2.0",
"method" : "Player.Open",
"params" : { "item" : {"file": playurl } } } )
html=xbmc.executeJSONRPC(request)
return
else:
if shelf:
# if launched from widget, use player.play for playback so artwork and resume works correctly
xbmcplugin.setResolvedUrl(pluginhandle, False, item)
start = xbmc.Player().play(playurl,item)
else:
start = xbmcplugin.setResolvedUrl(pluginhandle, True, item)
# record the playing file and server in the home window
# so that plexbmc helper can find out what is playing
WINDOW = xbmcgui.Window( 10000 )
WINDOW.setProperty('plexbmc.nowplaying.server', server.get_location())
WINDOW.setProperty('plexbmc.nowplaying.id', id)
#Set a loop to wait for positive confirmation of playback
count = 0
while not xbmc.Player().isPlaying():
printDebug.debug( "Not playing yet...sleep for 2")
count = count + 2
if count >= 20:
return
else:
time.sleep(2)
if not override:
setAudioSubtitles(streams)
if streams['type'] == "video" or streams['type'] == "music":
monitorPlayback(id,server, playurl, session)
return
def setAudioSubtitles( stream ):
'''
Take the collected audio/sub stream data and apply to the media
If we do not have any subs then we switch them off
'''
printDebug.debug("== ENTER ==")
#If we have decided not to collect any sub data then do not set subs
if stream['contents'] == "type":
printDebug.info("No audio or subtitle streams to process.")
#If we have decided to force off all subs, then turn them off now and return
if settings.get_setting('streamControl') == SUB_AUDIO_NEVER_SHOW :
xbmc.Player().showSubtitles(False)
printDebug ("All subs disabled")
return True
#Set the AUDIO component
if settings.get_setting('streamControl') == SUB_AUDIO_PLEX_CONTROL:
printDebug.debug("Attempting to set Audio Stream")
audio = stream['audio']
if stream['audioCount'] == 1:
printDebug.info("Only one audio stream present - will leave as default")
elif audio:
printDebug.debug("Attempting to use selected language setting: %s" % audio.get('language',audio.get('languageCode','Unknown')).encode('utf8'))
printDebug.info("Found preferred language at index %s" % stream['audioOffset'])
try:
xbmc.Player().setAudioStream(stream['audioOffset'])
printDebug.debug("Audio set")
except:
printDebug.info("Error setting audio, will use embedded default stream")
#Set the SUBTITLE component
if settings.get_setting('streamControl') == SUB_AUDIO_PLEX_CONTROL:
printDebug.debug("Attempting to set preferred subtitle Stream")
subtitle=stream['subtitle']
if subtitle:
printDebug.debug("Found preferred subtitle stream" )
try:
xbmc.Player().showSubtitles(False)
if subtitle.get('key'):
xbmc.Player().setSubtitles(subtitle['key'])
else:
printDebug.info("Enabling embedded subtitles at index %s" % stream['subOffset'])
xbmc.Player().setSubtitleStream(int(stream['subOffset']))
xbmc.Player().showSubtitles(True)
return True
except:
printDebug.info("Error setting subtitle")
else:
printDebug.info("No preferred subtitles to set")
xbmc.Player().showSubtitles(False)
return False
def selectMedia( data, server ):
printDebug.debug("== ENTER ==")
#if we have two or more files for the same movie, then present a screen
result=0
dvdplayback=False
count=data['partsCount']
options=data['parts']
details=data['details']
if count > 1:
dialogOptions=[]
dvdIndex=[]
indexCount=0
for items in options:
if items[1]:
name=items[1].split('/')[-1]
#name="%s %s %sMbps" % (items[1].split('/')[-1], details[indexCount]['videoResolution'], details[indexCount]['bitrate'])
else:
name="%s %s %sMbps" % (items[0].split('.')[-1], details[indexCount]['videoResolution'], details[indexCount]['bitrate'])
if settings.get_setting('forcedvd'):
if '.ifo' in name.lower():
printDebug.debug( "Found IFO DVD file in " + name )
name="DVD Image"
dvdIndex.append(indexCount)
dialogOptions.append(name)
indexCount+=1
printDebug.debug("Create selection dialog box - we have a decision to make!")
startTime = xbmcgui.Dialog()
result = startTime.select('Select media to play',dialogOptions)
if result == -1:
return None
if result in dvdIndex:
printDebug.debug( "DVD Media selected")
dvdplayback=True
else:
if settings.get_setting('forcedvd'):
if '.ifo' in options[result]:
dvdplayback=True
newurl=mediaType({'key': options[result][0] , 'file' : options[result][1]},server,dvdplayback)
printDebug.debug("We have selected media at %s" % newurl)
return newurl
def monitorPlayback( id, server, playurl, session=None ):
printDebug.debug("== ENTER ==")
if session:
printDebug.debug("We are monitoring a transcode session")
if settings.get_setting('monitoroff'):
return
playedTime = 0
totalTime = 0
currentTime = 0
#Whilst the file is playing back
while xbmc.Player().isPlaying():
try:
if not ( playurl == xbmc.Player().getPlayingFile() ):
printDebug.info("File stopped being played")
break
except: pass
currentTime = int(xbmc.Player().getTime())
totalTime = int(xbmc.Player().getTotalTime())
try:
progress = int(( float(currentTime) / float(totalTime) ) * 100)
except:
progress = 0
if playedTime == currentTime:
printDebug.debug( "Movies paused at: %s secs of %s @ %s%%" % ( currentTime, totalTime, progress) )
server.report_playback_progress(id,currentTime*1000, state="paused", duration=totalTime*1000)
else:
printDebug.debug( "Movies played time: %s secs of %s @ %s%%" % ( currentTime, totalTime, progress) )
server.report_playback_progress(id,currentTime*1000, state="playing", duration=totalTime*1000)
playedTime = currentTime
xbmc.sleep(2000)
#If we get this far, playback has stopped
printDebug.debug("Playback Stopped")
server.report_playback_progress(id,playedTime*1000, state='stopped', duration=totalTime*1000)
if session is not None:
printDebug.debug("Stopping PMS transcode job with session %s" % session)
server.stop_transcode_session(session)
return
def PLAY( url ):
printDebug.debug("== ENTER ==")
if url.startswith('file'):
printDebug.debug( "We are playing a local file")
#Split out the path from the URL
playurl=url.split(':',1)[1]
elif url.startswith('http'):
printDebug.debug( "We are playing a stream")
if '?' in url:
server=plex_network.get_server_from_url(url)
playurl=server.get_formatted_url(url)
else:
playurl=url
item = xbmcgui.ListItem(path=playurl)
return xbmcplugin.setResolvedUrl(pluginhandle, True, item)
def videoPluginPlay(vids, prefix=None, indirect=None, transcode=False ):
server=plex_network.get_server_from_url(vids)
if "node.plexapp.com" in vids:
server=getMasterServer()
if indirect:
#Probably should transcode this
if vids.startswith('http'):
vids='/'+vids.split('/',3)[3]
transcode=True
session, vids=server.get_universal_transcode(vids)
'''#If we find the url lookup service, then we probably have a standard plugin, but possibly with resolution choices
if '/services/url/lookup' in vids:
printDebug.debug("URL Lookup service")
tree=getXML(vids)
if not tree:
return
mediaCount=0
mediaDetails=[]
for media in tree.getiterator('Media'):
mediaCount+=1
tempDict={'videoResolution' : media.get('videoResolution',"Unknown")}
for child in media:
tempDict['key']=child.get('key','')
tempDict['identifier']=tree.get('identifier','')
mediaDetails.append(tempDict)
printDebug.debug( str(mediaDetails) )
#If we have options, create a dialog menu
result=0
if mediaCount > 1:
printDebug ("Select from plugin video sources")
dialogOptions=[x['videoResolution'] for x in mediaDetails ]
videoResolution = xbmcgui.Dialog()
result = videoResolution.select('Select resolution..',dialogOptions)
if result == -1:
return
videoPluginPlay(getLinkURL('',mediaDetails[result],server))
return
#Check if there is a further level of XML required
if indirect or '&indirect=1' in vids:
printDebug.debug("Indirect link")
tree=getXML(vids)
if not tree:
return
for bits in tree.getiterator('Part'):
videoPluginPlay(getLinkURL(vids,bits,server))
break
return
'''
#if we have a plex URL, then this is a transcoding URL
if 'plex://' in vids:
printDebug.debug("found webkit video, pass to transcoder")
if not (prefix):
prefix="system"
if settings.get_setting('transcode_type') == "universal":
session, vids=server.get_universal_transcode(vids)
elif settings.get_setting('transcode_type') == "legacy":
session, vids=server.get_legacy_transcode(0,vids,prefix)
#Workaround for XBMC HLS request limit of 1024 byts
if len(vids) > 1000:
printDebug.debug("XBMC HSL limit detected, will pre-fetch m3u8 playlist")
playlist = getXML(vids)
if not playlist or not "#EXTM3U" in playlist:
printDebug.debug("Unable to get valid m3u8 playlist from transcoder")
return
server=plex_network.get_server_from_url(vids)
session=playlist.split()[-1]
vids="%s/video/:/transcode/segmented/%s?t=1" % (server.get_url_location(), session)
printDebug.debug("URL to Play: %s " % vids)
printDebug.debug("Prefix is: %s" % prefix)
#If this is an Apple movie trailer, add User Agent to allow access
if 'trailers.apple.com' in vids:
url=vids+"|User-Agent=QuickTime/7.6.5 (qtver=7.6.5;os=Windows NT 5.1Service Pack 3)"
else:
url=vids
printDebug.debug("Final URL is: %s" % url)
item = xbmcgui.ListItem(path=url)
start = xbmcplugin.setResolvedUrl(pluginhandle, True, item)
if transcode:
try:
pluginTranscodeMonitor(session,server)
except:
printDebug.debug("Unable to start transcode monitor")
else:
printDebug.debug("Not starting monitor")
return
def pluginTranscodeMonitor( sessionID, server ):
printDebug.debug("== ENTER ==")
#Logic may appear backward, but this does allow for a failed start to be detected
#First while loop waiting for start
if settings.get_setting('monitoroff'):
return
count=0
while not xbmc.Player().isPlaying():
printDebug.debug( "Not playing yet...sleep for 2")
count = count + 2
if count >= 40:
#Waited 20 seconds and still no movie playing - assume it isn't going to..
return
else:
xbmc.sleep(2000)
while xbmc.Player().isPlaying():
printDebug.debug("Waiting for playback to finish")
xbmc.sleep(4000)
printDebug.debug("Playback Stopped")
printDebug.debug("Stopping PMS transcode job with session: %s" % sessionID)
server.stop_transcode_session(sessionID)
return
def get_params( paramstring ):
printDebug.debug("== ENTER ==")
printDebug.debug("Parameter string: %s" % paramstring)
param={}
if len(paramstring)>=2:
params=paramstring
if params[0] == "?":
cleanedparams=params[1:]
else:
cleanedparams=params
if (params[len(params)-1]=='/'):
params=params[0:len(params)-2]
pairsofparams=cleanedparams.split('&')
for i in range(len(pairsofparams)):
splitparams={}
splitparams=pairsofparams[i].split('=')
if (len(splitparams))==2:
param[splitparams[0]]=splitparams[1]
elif (len(splitparams))==3:
param[splitparams[0]]=splitparams[1]+"="+splitparams[2]
print "PleXBMC -> Detected parameters: " + str(param)
return param
def channelSearch (url, prompt):
'''
When we encounter a search request, branch off to this function to generate the keyboard
and accept the terms. This URL is then fed back into the correct function for
onward processing.
'''
printDebug.debug("== ENTER ==")
if prompt:
prompt=urllib.unquote(prompt)
else:
prompt="Enter Search Term..."
kb = xbmc.Keyboard('', 'heading')
kb.setHeading(prompt)
kb.doModal()
if (kb.isConfirmed()):
text = kb.getText()
printDebug.debug("Search term input: %s" % text)
url=url+'&query='+urllib.quote(text)
PlexPlugins( url )
return
def getContent( url ):
'''
This function takes teh URL, gets the XML and determines what the content is
This XML is then redirected to the best processing function.
If a search term is detected, then show keyboard and run search query
@input: URL of XML page
@return: nothing, redirects to another function
'''
printDebug.debug("== ENTER ==")
server=plex_network.get_server_from_url(url)
lastbit=url.split('/')[-1]
printDebug.debug("URL suffix: %s" % lastbit)
#Catch search requests, as we need to process input before getting results.
if lastbit.startswith('search'):
printDebug.debug("This is a search URL. Bringing up keyboard")
kb = xbmc.Keyboard('', 'heading')
kb.setHeading('Enter search term')
kb.doModal()
if (kb.isConfirmed()):
text = kb.getText()
printDebug.debug("Search term input: %s" % text)
url=url+'&query='+urllib.quote(text)
else:
return
tree = server.processed_xml(url)
setWindowHeading(tree)
if lastbit == "folder" or lastbit == "playlists":
processXML(url,tree)
return
view_group=tree.get('viewGroup')
if view_group == "movie":
printDebug.debug( "This is movie XML, passing to Movies")
Movies(url, tree)
elif view_group == "show":
printDebug.debug( "This is tv show XML")
TVShows(url,tree)
elif view_group == "episode":
printDebug.debug("This is TV episode XML")
TVEpisodes(url,tree)
elif view_group == 'artist':
printDebug.debug( "This is music XML")
artist(url, tree)
elif view_group== 'album' or view_group == 'albums':
albums(url,tree)
elif view_group == 'track':
printDebug.debug("This is track XML")
tracks(url, tree) #sorthing is handled here
elif view_group =="photo":
printDebug.debug("This is a photo XML")
photo(url,tree)
else:
processDirectory(url,tree)
return
def processDirectory( url, tree=None ):
printDebug.debug("== ENTER ==")
printDebug.debug("Processing secondary menus")
xbmcplugin.setContent(pluginhandle, "")
server = plex_network.get_server_from_url(url)
setWindowHeading(tree)
for directory in tree:
details={'title' : directory.get('title','Unknown').encode('utf-8') }
extraData={'thumb' : getThumb(tree, server) ,
'fanart_image' : getFanart(tree, server) }
extraData['mode'] = MODE_GETCONTENT
u='%s' % (getLinkURL(url, directory, server))
addGUIItem(u, details, extraData)
xbmcplugin.endOfDirectory(pluginhandle, cacheToDisc=settings.get_setting('kodicache'))
def getMasterServer(all=False):
printDebug.debug("== ENTER ==")
possibleServers=[]
current_master=settings.get_setting('masterServer')
for serverData in plex_network.get_server_list():
printDebug.debug( str(serverData) )
if serverData.get_master() == 1:
possibleServers.append(serverData)
printDebug.debug( "Possible master servers are: %s" % possibleServers )
if all:
return possibleServers
if len(possibleServers) > 1:
preferred="local"
for serverData in possibleServers:
if serverData.get_name == current_master:
printDebug.debug("Returning current master")
return serverData
if preferred == "any":
printDebug.debug("Returning 'any'")
return serverData
else:
if serverData.get_discovery() == preferred:
printDebug.debug("Returning local")
return serverData
elif len(possibleServers) == 0:
return
return possibleServers[0]
def artist( url, tree=None ):
'''
Process artist XML and display data
@input: url of XML page, or existing tree of XML page
@return: nothing
'''
printDebug.debug("== ENTER ==")
xbmcplugin.setContent(pluginhandle, 'artists')
xbmcplugin.addSortMethod(pluginhandle, 37 ) #maintain original plex sorted
xbmcplugin.addSortMethod(pluginhandle, 12 ) #artist title ignore THE
xbmcplugin.addSortMethod(pluginhandle, 34 ) #last played
xbmcplugin.addSortMethod(pluginhandle, 17 ) #year
#Get the URL and server name. Get the XML and parse
tree=getXML(url,tree)
if tree is None:
return
server=plex_network.get_server_from_url(url)
setWindowHeading(tree)
ArtistTag=tree.findall('Directory')
for artist in ArtistTag:
details={'artist' : artist.get('title','').encode('utf-8') }
details['title']=details['artist']
extraData={'type' : "Music" ,
'thumb' : getThumb(artist, server) ,
'fanart_image' : getFanart(artist, server) ,
'ratingKey' : artist.get('title','') ,
'key' : artist.get('key','') ,
'mode' : MODE_ALBUMS ,
'plot' : artist.get('summary','') }
url='%s%s' % (server.get_url_location(), extraData['key'] )
addGUIItem(url,details,extraData)
printDebug.debug("Skin override is: %s" % settings.get_setting('skinoverride'))
view_id = enforceSkinView('music')
if view_id:
xbmc.executebuiltin("Container.SetViewMode(%s)" % view_id)
xbmcplugin.endOfDirectory(pluginhandle,cacheToDisc=settings.get_setting('kodicache'))
def albums( url, tree=None ):
printDebug.debug("== ENTER ==")
xbmcplugin.setContent(pluginhandle, 'albums')
xbmcplugin.addSortMethod(pluginhandle, 37 ) #maintain original plex sorted
xbmcplugin.addSortMethod(pluginhandle, 24 ) #album title ignore THE
xbmcplugin.addSortMethod(pluginhandle, 12 ) #artist ignore THE
xbmcplugin.addSortMethod(pluginhandle, 34 ) #last played
xbmcplugin.addSortMethod(pluginhandle, 17 ) #year
#Get the URL and server name. Get the XML and parse
tree=getXML(url,tree)
if tree is None:
return
server=plex_network.get_server_from_url(url)
sectionart=getFanart(tree, server)
setWindowHeading(tree)
AlbumTags=tree.findall('Directory')
recent = True if 'recentlyAdded' in url else False
for album in AlbumTags:
details={'album' : album.get('title','').encode('utf-8') ,
'year' : int(album.get('year',0)) ,
'artist' : tree.get('parentTitle', album.get('parentTitle','')).encode('utf-8') }
if recent:
details['title']="%s - %s" % ( details['artist'], details['album'])
else:
details['title']=details['album']
extraData={'type' : "Music" ,
'thumb' : getThumb(album, server) ,
'fanart_image' : getFanart(album, server) ,
'key' : album.get('key',''),
'mode' : MODE_TRACKS ,
'plot' : album.get('summary','')}
if extraData['fanart_image'] == "":
extraData['fanart_image']=sectionart
url='%s%s' % (server.get_url_location(), extraData['key'] )
addGUIItem(url,details,extraData)
printDebug.debug("Skin override is: %s" % settings.get_setting('skinoverride'))
view_id = enforceSkinView('music')
if view_id:
xbmc.executebuiltin("Container.SetViewMode(%s)" % view_id)
xbmcplugin.endOfDirectory(pluginhandle,cacheToDisc=settings.get_setting('kodicache'))
def tracks( url,tree=None ):
printDebug.debug("== ENTER ==")
xbmcplugin.setContent(pluginhandle, 'songs')
xbmcplugin.addSortMethod(pluginhandle, 37 ) #maintain original plex sorted
xbmcplugin.addSortMethod(pluginhandle, 10 ) #title title ignore THE
xbmcplugin.addSortMethod(pluginhandle, 8 ) #duration
xbmcplugin.addSortMethod(pluginhandle, 27 ) #song rating
xbmcplugin.addSortMethod(pluginhandle, 7 ) #track number
tree=getXML(url,tree)
if tree is None:
return
playlist = xbmc.PlayList(xbmc.PLAYLIST_MUSIC)
playlist.clear()
server=plex_network.get_server_from_url(url)
sectionart=getFanart(tree, server)
sectionthumb=getThumb(tree, server)
setWindowHeading(tree)
TrackTags=tree.findall('Track')
for track in TrackTags:
if track.get('thumb'):
sectionthumb=getThumb(track, server)
trackTag(server, tree, track, sectionart, sectionthumb)
printDebug.debug("Skin override is: %s" % settings.get_setting('skinoverride'))
view_id = enforceSkinView('music')
if view_id:
xbmc.executebuiltin("Container.SetViewMode(%s)" % view_id)
xbmcplugin.endOfDirectory(pluginhandle,cacheToDisc=settings.get_setting('kodicache'))
def getXML (url, tree=None):
printDebug.debug("== ENTER ==")
if tree is None:
tree=plex_network.get_processed_xml(url)
if tree.get('message'):
xbmcgui.Dialog().ok(tree.get('header','Message'),tree.get('message',''))
return None
return tree
def PlexPlugins(url, tree=None):
'''
Main function to parse plugin XML from PMS
Will create dir or item links depending on what the
main tag is.
@input: plugin page URL
@return: nothing, creates XBMC GUI listing
'''
printDebug.debug("== ENTER ==")
xbmcplugin.setContent(pluginhandle, 'addons')
server=plex_network.get_server_from_url(url)
tree = getXML(url,tree)
if tree is None:
return
myplex_url=False
if (tree.get('identifier') != "com.plexapp.plugins.myplex") and ( "node.plexapp.com" in url ) :
myplex_url=True
printDebug.debug("This is a myplex URL, attempting to locate master server")
server=getMasterServer()
for plugin in tree:
details={'title' : plugin.get('title','Unknown').encode('utf-8') }
if details['title'] == "Unknown":
details['title']=plugin.get('name',"Unknown").encode('utf-8')
if plugin.get('summary'):
details['plot']=plugin.get('summary')
extraData={'thumb' : getThumb(plugin, server) ,
'fanart_image' : getFanart(plugin, server) ,
'identifier' : tree.get('identifier','') ,
'type' : "Video" ,
'key' : plugin.get('key','') }
if myplex_url:
extraData['key']=extraData['key'].replace('node.plexapp.com:32400',server.get_location())
if extraData['fanart_image'] == "":
extraData['fanart_image']=getFanart(tree, server)
p_url=getLinkURL(url, extraData, server)
if plugin.tag == "Directory" or plugin.tag == "Podcast":
if plugin.get('search') == '1':
extraData['mode']=MODE_CHANNELSEARCH
extraData['parameters']={'prompt' : plugin.get('prompt',"Enter Search Term").encode('utf-8') }
else:
extraData['mode']=MODE_PLEXPLUGINS
addGUIItem(p_url, details, extraData)
elif plugin.tag == "Video":
extraData['mode']=MODE_VIDEOPLUGINPLAY
for child in plugin:
if child.tag == "Media":
extraData['parameters'] = {'indirect' : child.get('indirect','0')}
addGUIItem(p_url, details, extraData, folder=False)
elif plugin.tag == "Setting":
if plugin.get('option') == 'hidden':
value="********"
elif plugin.get('type') == "text":
value=plugin.get('value')
elif plugin.get('type') == "enum":
value=plugin.get('values').split('|')[int(plugin.get('value',0))]
else:
value=plugin.get('value')
details['title']= "%s - [%s]" % (plugin.get('label','Unknown').encode('utf-8'), value)
extraData['mode']=MODE_CHANNELPREFS
extraData['parameters']={'id' : plugin.get('id') }
addGUIItem(url, details, extraData)
xbmcplugin.endOfDirectory(pluginhandle,cacheToDisc=settings.get_setting('kodicache'))
def channelSettings ( url, settingID ):
'''
Take the setting XML and parse it to create an updated
string with the new settings. For the selected value, create
a user input screen (text or list) to update the setting.
@ input: url
@ return: nothing
'''
printDebug.debug("== ENTER ==")
printDebug.debug("Setting preference for ID: %s" % settingID )
if not settingID:
printDebug.debug("ID not set")
return
tree=getXML(url)
if tree is None:
return
setWindowHeading(tree)
setString=None
for plugin in tree:
if plugin.get('id') == settingID:
printDebug.debug("Found correct id entry for: %s" % settingID)
id=settingID
label=plugin.get('label',"Enter value")
option=plugin.get('option')
value=plugin.get('value')
if plugin.get('type') == "text":
printDebug.debug("Setting up a text entry screen")
kb = xbmc.Keyboard(value, 'heading')
kb.setHeading(label)
if option == "hidden":
kb.setHiddenInput(True)
else:
kb.setHiddenInput(False)
kb.doModal()
if (kb.isConfirmed()):
value = kb.getText()
printDebug.debug("Value input: %s " % value)
else:
printDebug.debug("User cancelled dialog")
return False
elif plugin.get('type') == "enum":
printDebug.debug("Setting up an enum entry screen")
values=plugin.get('values').split('|')
settingScreen = xbmcgui.Dialog()
value = settingScreen.select(label,values)
if value == -1:
printDebug.debug("User cancelled dialog")
return False
else:
printDebug.debug('Unknown option type: %s' % plugin.get('id') )
else:
value=plugin.get('value')
id=plugin.get('id')
if setString is None:
setString='%s/set?%s=%s' % (url, id, value)
else:
setString='%s&%s=%s' % (setString, id, value)
printDebug.debug("Settings URL: %s" % setString )
plex_network.talk_to_server(setString)
xbmc.executebuiltin("Container.Refresh")
return False
def processXML( url, tree=None ):
'''
Main function to parse plugin XML from PMS
Will create dir or item links depending on what the
main tag is.
@input: plugin page URL
@return: nothing, creates XBMC GUI listing
'''
printDebug.debug("== ENTER ==")
xbmcplugin.setContent(pluginhandle, 'movies')
server=plex_network.get_server_from_url(url)
tree=getXML(url,tree)
if tree is None:
return
setWindowHeading(tree)
for plugin in tree:
details={'title' : plugin.get('title','Unknown').encode('utf-8') }
if details['title'] == "Unknown":
details['title']=plugin.get('name',"Unknown").encode('utf-8')
extraData={'thumb' : getThumb(plugin, server) ,
'fanart_image' : getFanart(plugin, server) ,
'identifier' : tree.get('identifier','') ,
'type' : "Video" }
if extraData['fanart_image'] == "":
extraData['fanart_image']=getFanart(tree, server)
p_url=getLinkURL(url, plugin, server)
if plugin.tag == "Directory" or plugin.tag == "Podcast":
extraData['mode']=MODE_PROCESSXML
addGUIItem(p_url, details, extraData)
elif plugin.tag == "Track":
trackTag(server, tree, plugin)
elif plugin.tag == "Playlist":
playlistTag(url, server, tree, plugin)
elif tree.get('viewGroup') == "movie":
Movies(url, tree)
return
elif tree.get('viewGroup') == "episode":
TVEpisodes(url, tree)
return
xbmcplugin.endOfDirectory(pluginhandle,cacheToDisc=settings.get_setting('kodicache'))
def movieTag(url, server, tree, movie, randomNumber):
printDebug.debug("---New Item---")
tempgenre=[]
tempcast=[]
tempdir=[]
tempwriter=[]
#Lets grab all the info we can quickly through either a dictionary, or assignment to a list
#We'll process it later
for child in movie:
if child.tag == "Media":
mediaarguments = dict(child.items())
elif child.tag == "Genre" and not settings.get_setting('skipmetadata'):
tempgenre.append(child.get('tag'))
elif child.tag == "Writer" and not settings.get_setting('skipmetadata'):
tempwriter.append(child.get('tag'))
elif child.tag == "Director" and not settings.get_setting('skipmetadata'):
tempdir.append(child.get('tag'))
elif child.tag == "Role" and not settings.get_setting('skipmetadata'):
tempcast.append(child.get('tag'))
printDebug.debug("Media attributes are %s" % mediaarguments)
#Gather some data
view_offset=movie.get('viewOffset',0)
duration=int(mediaarguments.get('duration',movie.get('duration',0)))/1000
#if movie.get('originallyAvailableAt') is not None:
# release_date = time.strftime('%d.%m.%Y',(time.strptime(movie.get('originallyAvailableAt'), '%Y-%m-%d')))
#else:
# release_date = ""
#Required listItem entries for XBMC
details={'plot' : movie.get('summary','').encode('utf-8') ,
'title' : movie.get('title','Unknown').encode('utf-8') ,
'sorttitle' : movie.get('titleSort', movie.get('title','Unknown')).encode('utf-8') ,
'rating' : float(movie.get('rating',0)) ,
'studio' : movie.get('studio','').encode('utf-8'),
'mpaa' : movie.get('contentRating', '').encode('utf-8'),
'year' : int(movie.get('year',0)),
'date' : movie.get('originallyAvailableAt','1970-01-01'),
'tagline' : movie.get('tagline',''),
'DateAdded' : str(datetime.datetime.fromtimestamp(int(movie.get('addedAt',0))))}
#Extra data required to manage other properties
extraData={'type' : "Video" ,
'source' : 'movies',
'thumb' : getThumb(movie, server) ,
'fanart_image' : getFanart(movie, server) ,
'key' : movie.get('key',''),
'ratingKey' : str(movie.get('ratingKey',0)),
'duration' : duration,
'resume' : int (int(view_offset)/1000) }
#Determine what type of watched flag [overlay] to use
if int(movie.get('viewCount',0)) > 0:
details['playcount'] = 1
elif int(movie.get('viewCount',0)) == 0:
details['playcount'] = 0
#Extended Metadata
if not settings.get_setting('skipmetadata'):
details['cast'] = tempcast
details['director'] = " / ".join(tempdir)
details['writer'] = " / ".join(tempwriter)
details['genre'] = " / ".join(tempgenre)
if movie.get('primaryExtraKey') is not None:
details['trailer'] = "plugin://plugin.video.plexbmc/?url=%s%s?t=%s&mode=%s" % (server.get_url_location(), movie.get('primaryExtraKey', ''), randomNumber, MODE_PLAYLIBRARY)
printDebug.debug('Trailer plugin url added: %s' % details['trailer'])
#Add extra media flag data
if not settings.get_setting('skipflags'):
extraData.update(getMediaData(mediaarguments))
#Build any specific context menu entries
if not settings.get_setting('skipcontextmenus'):
context=buildContextMenu(url, extraData, server)
else:
context=None
# http:// <server> <path> &mode=<mode> &t=<rnd>
extraData['mode']=MODE_PLAYLIBRARY
separator = "?"
if "?" in extraData['key']:
separator = "&"
u="%s%s%st=%s" % (server.get_url_location(), extraData['key'], separator, randomNumber)
addGUIItem(u,details,extraData,context,folder=False)
return
def getMediaData ( tag_dict ):
'''
Extra the media details from the XML
@input: dict of <media /> tag attributes
@output: dict of required values
'''
printDebug.debug("== ENTER ==")
return {'VideoResolution' : tag_dict.get('videoResolution','') ,
'VideoCodec' : tag_dict.get('videoCodec','') ,
'AudioCodec' : tag_dict.get('audioCodec','') ,
'AudioChannels' : tag_dict.get('audioChannels','') ,
'VideoAspect' : tag_dict.get('aspectRatio','') ,
'xbmc_height' : tag_dict.get('height') ,
'xbmc_width' : tag_dict.get('width') ,
'xbmc_VideoCodec' : tag_dict.get('videoCodec') ,
'xbmc_AudioCodec' : tag_dict.get('audioCodec') ,
'xbmc_AudioChannels' : tag_dict.get('audioChannels') ,
'xbmc_VideoAspect' : tag_dict.get('aspectRatio') }
def trackTag( server, tree, track, sectionart="", sectionthumb="", listing=True ):
printDebug.debug("== ENTER ==")
xbmcplugin.setContent(pluginhandle, 'songs')
for child in track:
for babies in child:
if babies.tag == "Part":
partDetails=(dict(babies.items()))
printDebug.debug( "Part is %s" % partDetails)
details={'TrackNumber' : int(track.get('index',0)) ,
'title' : str(track.get('index',0)).zfill(2)+". "+track.get('title','Unknown').encode('utf-8') ,
'rating' : float(track.get('rating',0)) ,
'album' : track.get('parentTitle', tree.get('parentTitle','')).encode('utf-8') ,
'artist' : track.get('grandparentTitle', tree.get('grandparentTitle','')).encode('utf-8') ,
'duration' : int(track.get('duration',0))/1000 }
extraData={'type' : "music" ,
'fanart_image' : sectionart ,
'thumb' : sectionthumb ,
'key' : track.get('key','') }
#If we are streaming, then get the virtual location
extraData['mode']=MODE_PLAYLIBRARY
u="%s%s" % (server.get_url_location(), extraData['key'])
if listing:
addGUIItem(u,details,extraData,folder=False)
else:
return ( url, details )
def playlistTag(url, server, tree, track, sectionart="", sectionthumb="", listing=True ):
printDebug.debug("== ENTER ==")
details={'title' : track.get('title','Unknown').encode('utf-8') ,
'duration' : int(track.get('duration',0))/1000
}
extraData={'type' : track.get('playlistType', ''),
'thumb' : getThumb({'thumb' : track.get('composite', '')},server)}
if extraData['type'] == "video":
extraData['mode'] = MODE_MOVIES
elif extraData['type'] == "audio":
extraData['mode'] = MODE_TRACKS
else:
extraData['mode']=MODE_GETCONTENT
u=getLinkURL(url, track, server)
if listing:
addGUIItem(u,details,extraData,folder=True)
else:
return ( url, details )
def photo( url,tree=None ):
printDebug.debug("== ENTER ==")
server=plex_network.get_server_from_url(url)
xbmcplugin.setContent(pluginhandle, 'photo')
tree=getXML(url,tree)
if tree is None:
return
sectionArt=getFanart(tree,server)
setWindowHeading(tree)
for picture in tree:
details={'title' : picture.get('title',picture.get('name','Unknown')).encode('utf-8') }
if not details['title']:
details['title'] = "Unknown"
extraData={'thumb' : getThumb(picture, server) ,
'fanart_image' : getFanart(picture, server) ,
'type' : "image" }
if extraData['fanart_image'] == "":
extraData['fanart_image']=sectionArt
u=getLinkURL(url, picture, server)
if picture.tag == "Directory":
extraData['mode']=MODE_PHOTOS
addGUIItem(u,details,extraData)
elif picture.tag == "Photo":
if tree.get('viewGroup','') == "photo":
for photo in picture:
if photo.tag == "Media":
for images in photo:
if images.tag == "Part":
extraData['key']=server.get_url_location()+images.get('key','')
details['size']=int(images.get('size',0))
u=extraData['key']
addGUIItem(u,details,extraData,folder=False)
xbmcplugin.endOfDirectory(pluginhandle,cacheToDisc=settings.get_setting('kodicache'))
def music( url, tree=None ):
printDebug.debug("== ENTER ==")
xbmcplugin.setContent(pluginhandle, 'artists')
server=plex_network.get_server_from_url(url)
tree=getXML(url,tree)
if tree is None:
return
setWindowHeading(tree)
for grapes in tree:
if grapes.get('key') is None:
continue
details={'genre' : grapes.get('genre','').encode('utf-8') ,
'artist' : grapes.get('artist','').encode('utf-8') ,
'year' : int(grapes.get('year',0)) ,
'album' : grapes.get('album','').encode('utf-8') ,
'tracknumber' : int(grapes.get('index',0)) ,
'title' : "Unknown" }
extraData={'type' : "Music" ,
'thumb' : getThumb(grapes, server) ,
'fanart_image': getFanart(grapes, server) }
if extraData['fanart_image'] == "":
extraData['fanart_image']=getFanart(tree, server)
u=getLinkURL(url, grapes, server)
if grapes.tag == "Track":
printDebug.debug("Track Tag")
xbmcplugin.setContent(pluginhandle, 'songs')
details['title']=grapes.get('track',grapes.get('title','Unknown')).encode('utf-8')
details['duration']=int(int(grapes.get('totalTime',0))/1000)
extraData['mode']=MODE_BASICPLAY
addGUIItem(u,details,extraData,folder=False)
else:
if grapes.tag == "Artist":
printDebug.debug("Artist Tag")
xbmcplugin.setContent(pluginhandle, 'artists')
details['title']=grapes.get('artist','Unknown').encode('utf-8')
elif grapes.tag == "Album":
printDebug.debug("Album Tag")
xbmcplugin.setContent(pluginhandle, 'albums')
details['title']=grapes.get('album','Unknown').encode('utf-8')
elif grapes.tag == "Genre":
details['title']=grapes.get('genre','Unknown').encode('utf-8')
else:
printDebug.debug("Generic Tag: %s" % grapes.tag)
details['title']=grapes.get('title','Unknown').encode('utf-8')
extraData['mode']=MODE_MUSIC
addGUIItem(u,details,extraData)
printDebug.debug("Skin override is: %s" % settings.get_setting('skinoverride'))
view_id = enforceSkinView('music')
if view_id:
xbmc.executebuiltin("Container.SetViewMode(%s)" % view_id)
xbmcplugin.endOfDirectory(pluginhandle,cacheToDisc=settings.get_setting('kodicache'))
def getThumb(data, server, width=720, height=720):
'''
Simply take a URL or path and determine how to format for images
@ input: elementTree element, server name
@ return formatted URL
'''
if settings.get_setting('skipimages'):
return ''
thumbnail=data.get('thumb','').split('?t')[0].encode('utf-8')
if thumbnail.startswith("http") :
return thumbnail
elif thumbnail.startswith('/'):
if settings.get_setting('fullres_thumbs'):
return server.get_kodi_header_formatted_url(thumbnail)
else:
return server.get_kodi_header_formatted_url('/photo/:/transcode?url=%s&width=%s&height=%s' % (urllib.quote_plus('http://localhost:32400' + thumbnail), width, height))
return GENERIC_THUMBNAIL
def getShelfThumb(data, server, seasonThumb=False, prefer_season=False, width=400, height=400):
'''
Simply take a URL or path and determine how to format for images
@ input: elementTree element, server name
@ return formatted URL
'''
if seasonThumb:
if prefer_season:
thumbnail=data.get('parentThumb',data.get('grandparentThumb','')).split('?t')[0].encode('utf-8')
else:
thumbnail=data.get('grandparentThumb','').split('?t')[0].encode('utf-8')
else:
thumbnail=data.get('thumb','').split('?t')[0].encode('utf-8')
if thumbnail.startswith("http"):
return thumbnail
elif thumbnail.startswith('/'):
if settings.get_setting('fullres_thumbs'):
return server.get_kodi_header_formatted_url(thumbnail)
else:
return server.get_kodi_header_formatted_url('/photo/:/transcode?url=%s&width=%s&height=%s' % (urllib.quote_plus('http://localhost:32400' + thumbnail), width, height))
return GENERIC_THUMBNAIL
def getFanart(data, server, width=1280, height=720):
'''
Simply take a URL or path and determine how to format for fanart
@ input: elementTree element, server name
@ return formatted URL for photo resizing
'''
if settings.get_setting('skipimages'):
return ''
fanart=data.get('art','').encode('utf-8')
if fanart.startswith('http') :
return fanart
elif fanart.startswith('/'):
if settings.get_setting('fullres_fanart'):
return server.get_kodi_header_formatted_url(fanart)
else:
return server.get_kodi_header_formatted_url('/photo/:/transcode?url=%s&width=%s&height=%s' % (urllib.quote_plus('http://localhost:32400' + fanart), width, height))
return ''
def getLinkURL(url, pathData, server, season_shelf=False):
if not season_shelf:
path = pathData.get('key', '')
else:
path = pathData.get('parentKey', '') + "/children"
printDebug.debug("Path is %s" % path)
if path == '':
printDebug.debug("Empty Path")
return
#If key starts with http, then return it
if path.startswith('http'):
printDebug.debug("Detected http link")
return path
#If key starts with a / then prefix with server address
elif path.startswith('/'):
printDebug.debug("Detected base path link")
return '%s%s' % (server.get_url_location(), path)
#If key starts with plex:// then it requires transcoding
elif path.startswith("plex:") :
printDebug.debug("Detected plex link")
components = path.split('&')
for i in components:
if 'prefix=' in i:
del components[components.index(i)]
break
if pathData.get('identifier') is not None:
components.append('identifier='+pathData['identifier'])
path='&'.join(components)
return 'plex://'+server.get_location()+'/'+'/'.join(path.split('/')[3:])
elif path.startswith("rtmp"):
printDebug.debug("Detected RTMP link")
return path
#Any thing else is assumed to be a relative path and is built on existing url
else:
printDebug.debug("Detected relative link")
return "%s/%s" % (url, path)
return url
def plexOnline( url ):
printDebug.debug("== ENTER ==")
xbmcplugin.setContent(pluginhandle, 'addons')
server=plex_network.get_server_from_url(url)
tree=server.processed_xml(url)
if tree is None:
return
for plugin in tree:
details={'title' : plugin.get('title',plugin.get('name','Unknown')).encode('utf-8') }
extraData={'type' : "Video" ,
'installed' : int(plugin.get('installed',2)) ,
'key' : plugin.get('key','') ,
'thumb' : getThumb(plugin,server)}
extraData['mode']=MODE_CHANNELINSTALL
if extraData['installed'] == 1:
details['title']=details['title']+" (installed)"
elif extraData['installed'] == 2:
extraData['mode']=MODE_PLEXONLINE
u=getLinkURL(url, plugin, server)
extraData['parameters']={'name' : details['title'] }
addGUIItem(u, details, extraData)
xbmcplugin.endOfDirectory(pluginhandle,cacheToDisc=settings.get_setting('kodicache'))
def install( url, name ):
printDebug.debug("== ENTER ==")
server=plex_network.get_server_from_url(url)
tree=server.processed_xml(url)
if tree is None:
return
operations={}
i=0
for plums in tree.findall('Directory'):
operations[i]=plums.get('title')
#If we find an install option, switch to a yes/no dialog box
if operations[i].lower() == "install":
printDebug.debug("Not installed. Print dialog")
ret = xbmcgui.Dialog().yesno("Plex Online","About to install " + name)
if ret:
printDebug.debug("Installing....")
tree = server.processed_xml(url+"/install")
msg=tree.get('message','(blank)')
printDebug.debug(msg)
xbmcgui.Dialog().ok("Plex Online",msg)
return
i+=1
#Else continue to a selection dialog box
ret = xbmcgui.Dialog().select("This plugin is already installed..",operations.values())
if ret == -1:
printDebug.debug("No option selected, cancelling")
return
printDebug.debug("Option %s selected. Operation is %s" % (ret, operations[ret]))
u=url+"/"+operations[ret].lower()
tree = server.processed_xml(u)
msg=tree.get('message')
printDebug.debug(msg)
xbmcgui.Dialog().ok("Plex Online",msg)
xbmc.executebuiltin("Container.Refresh")
return
def channelView( url ):
printDebug.debug("== ENTER ==")
server=plex_network.get_server_from_url(url)
tree=server.processed_xml(url)
if tree is None:
return
setWindowHeading(tree)
for channels in tree.getiterator('Directory'):
if channels.get('local','') == "0":
continue
arguments=dict(channels.items())
extraData={'fanart_image' : getFanart(channels, server) ,
'thumb' : getThumb(channels, server) }
details={'title' : channels.get('title','Unknown') }
suffix=channels.get('key').split('/')[1]
if channels.get('unique','')=='0':
details['title']="%s (%s)" % ( details['title'], suffix )
#Alter data sent into getlinkurl, as channels use path rather than key
p_url=getLinkURL(url, {'key': channels.get('key'), 'identifier' : channels.get('key')} , server)
if suffix == "photos":
extraData['mode']=MODE_PHOTOS
elif suffix == "video":
extraData['mode']=MODE_PLEXPLUGINS
elif suffix == "music":
extraData['mode']=MODE_MUSIC
else:
extraData['mode']=MODE_GETCONTENT
addGUIItem(p_url,details,extraData)
xbmcplugin.endOfDirectory(pluginhandle,cacheToDisc=settings.get_setting('kodicache'))
def skin( server_list=None, type=None ):
#Gather some data and set the window properties
printDebug.debug("== ENTER ==")
#Get the global host variable set in settings
WINDOW = xbmcgui.Window( 10000 )
sectionCount=0
serverCount=0
sharedCount=0
shared_flag={}
hide_shared = settings.get_setting('hide_shared')
WINDOW.setProperty("plexbmc.myplex_signedin" , str(plex_network.is_myplex_signedin()))
WINDOW.setProperty("plexbmc.plexhome_enabled" , str(plex_network.is_plexhome_enabled()))
if server_list is None:
server_list=plex_network.get_server_list()
for server in server_list:
server.discover_sections()
for section in server.get_sections():
extraData={ 'fanart_image' : server.get_fanart(section) ,
'thumb' : server.get_fanart(section) }
#Determine what we are going to do process after a link is selected by the user, based on the content we find
path=section.get_path()
if section.is_show():
if hide_shared == "true" and not server.is_owned():
shared_flag['show']=True
continue
window="VideoLibrary"
mode=MODE_TVSHOWS
WINDOW.setProperty("plexbmc.%d.search" % (sectionCount) , "ActivateWindow(%s,plugin://plugin.video.plexbmc/?url=%s%s%s&mode=%s,return)" % (window, server.get_url_location(), section.get_path(), "/search?type=4", mode) )
if section.is_movie():
if hide_shared == "true" and not server.is_owned():
shared_flag['movie']=True
continue
window="VideoLibrary"
mode=MODE_MOVIES
WINDOW.setProperty("plexbmc.%d.search" % (sectionCount) , "ActivateWindow(%s,plugin://plugin.video.plexbmc/?url=%s%s%s&mode=%s,return)" % (window, server.get_url_location(), section.get_path(), "/search?type=1", mode) )
if section.is_artist():
if hide_shared == "true" and not server.is_owned():
shared_flag['artist']=True
continue
window="MusicFiles"
mode=MODE_ARTISTS
WINDOW.setProperty("plexbmc.%d.album" % (sectionCount) , "ActivateWindow(%s,plugin://plugin.video.plexbmc/?url=%s%s%s&mode=%s,return)" % (window, server.get_url_location(), section.get_path(), "/albums", mode) )
WINDOW.setProperty("plexbmc.%d.search" % (sectionCount) , "ActivateWindow(%s,plugin://plugin.video.plexbmc/?url=%s%s%s&mode=%s,return)" % (window, server.get_url_location(), section.get_path(), "/search?type=10", mode) )
if section.is_photo():
if hide_shared == "true" and not server.is_owned():
shared_flag['photo']=True
continue
window="Pictures"
WINDOW.setProperty("plexbmc.%d.year" % (sectionCount) , "ActivateWindow(%s,plugin://plugin.video.plexbmc/?url=%s%s%s&mode=%s,return)" % (window, server.get_url_location(), section.get_path(), "/year", mode) )
mode=MODE_PHOTOS
if settings.get_setting('secondary'):
mode=MODE_GETCONTENT
else:
path=path+'/all'
s_url='%s%s&mode=%s' % ( server.get_url_location(), path, mode)
#Build that listing..
WINDOW.setProperty("plexbmc.%d.title" % (sectionCount) , section.get_title())
WINDOW.setProperty("plexbmc.%d.subtitle" % (sectionCount) , server.get_name())
WINDOW.setProperty("plexbmc.%d.path" % (sectionCount) , "ActivateWindow(%s,plugin://plugin.video.plexbmc/?url=%s,return)" % (window, s_url))
WINDOW.setProperty("plexbmc.%d.art" % (sectionCount) , extraData['fanart_image'])
WINDOW.setProperty("plexbmc.%d.type" % (sectionCount) , section.get_type())
WINDOW.setProperty("plexbmc.%d.icon" % (sectionCount) , extraData.get('thumb',GENERIC_THUMBNAIL))
WINDOW.setProperty("plexbmc.%d.thumb" % (sectionCount) , extraData.get('thumb',GENERIC_THUMBNAIL))
WINDOW.setProperty("plexbmc.%d.partialpath" % (sectionCount) , "ActivateWindow(%s,plugin://plugin.video.plexbmc/?url=%s%s" % (window, server.get_url_location(), section.get_path()))
WINDOW.setProperty("plexbmc.%d.search" % (sectionCount) , "ActivateWindow(%s,plugin://plugin.video.plexbmc/?url=%s%s%s&mode=%s,return)" % (window, server.get_url_location(), section.get_path(), "/search?type=1", mode) )
WINDOW.setProperty("plexbmc.%d.recent" % (sectionCount) , "ActivateWindow(%s,plugin://plugin.video.plexbmc/?url=%s%s%s&mode=%s,return)" % (window, server.get_url_location(), section.get_path(), "/recentlyAdded", mode) )
WINDOW.setProperty("plexbmc.%d.all" % (sectionCount) , "ActivateWindow(%s,plugin://plugin.video.plexbmc/?url=%s%s%s&mode=%s,return)" % (window, server.get_url_location(), section.get_path(), "/all", mode, ) )
WINDOW.setProperty("plexbmc.%d.viewed" % (sectionCount) , "ActivateWindow(%s,plugin://plugin.video.plexbmc/?url=%s%s%s&mode=%s,return)" % (window, server.get_url_location(), section.get_path(), "/recentlyViewed", mode) )
WINDOW.setProperty("plexbmc.%d.ondeck" % (sectionCount) , "ActivateWindow(%s,plugin://plugin.video.plexbmc/?url=%s%s%s&mode=%s,return)" % (window, server.get_url_location(), section.get_path(), "/onDeck", mode) )
WINDOW.setProperty("plexbmc.%d.released" % (sectionCount) , "ActivateWindow(%s,plugin://plugin.video.plexbmc/?url=%s%s%s&mode=%s,return)" % (window, server.get_url_location(), section.get_path(), "/newest", mode) )
WINDOW.setProperty("plexbmc.%d.shared" % (sectionCount) , "false")
printDebug.debug("Building window properties index [%s] which is [%s]" % (sectionCount, section.get_title()))
printDebug.debug("PATH in use is: ActivateWindow(%s,plugin://plugin.video.plexbmc/?url=%s,return)" % (window, s_url))
sectionCount += 1
if type == "nocat":
WINDOW.setProperty("plexbmc.%d.title" % (sectionCount) , "Shared...")
WINDOW.setProperty("plexbmc.%d.subtitle" % (sectionCount) , "Shared")
WINDOW.setProperty("plexbmc.%d.path" % (sectionCount) , "ActivateWindow(VideoLibrary,plugin://plugin.video.plexbmc/?url=/&mode=%s,return)" % MODE_SHARED_ALL )
WINDOW.setProperty("plexbmc.%d.type" % (sectionCount) , "movie")
WINDOW.setProperty("plexbmc.%d.shared" % (sectionCount) , "true")
sectionCount += 1
else:
if shared_flag.get('movie'):
WINDOW.setProperty("plexbmc.%d.title" % (sectionCount) , "Shared...")
WINDOW.setProperty("plexbmc.%d.subtitle" % (sectionCount) , "Shared")
WINDOW.setProperty("plexbmc.%d.path" % (sectionCount) , "ActivateWindow(VideoLibrary,plugin://plugin.video.plexbmc/?url=/&mode=%s,return)" % MODE_SHARED_MOVIES )
WINDOW.setProperty("plexbmc.%d.type" % (sectionCount) , "movie")
WINDOW.setProperty("plexbmc.%d.shared" % (sectionCount) , "true")
sectionCount += 1
if shared_flag.get('show'):
WINDOW.setProperty("plexbmc.%d.title" % (sectionCount) , "Shared...")
WINDOW.setProperty("plexbmc.%d.subtitle" % (sectionCount) , "Shared")
WINDOW.setProperty("plexbmc.%d.path" % (sectionCount) , "ActivateWindow(VideoLibrary,plugin://plugin.video.plexbmc/?url=/&mode=%s,return)" % MODE_SHARED_SHOWS)
WINDOW.setProperty("plexbmc.%d.type" % (sectionCount) , "show")
WINDOW.setProperty("plexbmc.%d.shared" % (sectionCount) , "true")
sectionCount += 1
if shared_flag.get('artist'):
WINDOW.setProperty("plexbmc.%d.title" % (sectionCount) , "Shared...")
WINDOW.setProperty("plexbmc.%d.subtitle" % (sectionCount) , "Shared")
WINDOW.setProperty("plexbmc.%d.path" % (sectionCount) , "ActivateWindow(MusicFiles,plugin://plugin.video.plexbmc/?url=/&mode=%s,return)" % MODE_SHARED_MUSIC)
WINDOW.setProperty("plexbmc.%d.type" % (sectionCount) , "artist")
WINDOW.setProperty("plexbmc.%d.shared" % (sectionCount) , "true")
sectionCount += 1
if shared_flag.get('photo'):
WINDOW.setProperty("plexbmc.%d.title" % (sectionCount) , "Shared...")
WINDOW.setProperty("plexbmc.%d.subtitle" % (sectionCount) , "Shared")
WINDOW.setProperty("plexbmc.%d.path" % (sectionCount) , "ActivateWindow(Pictures,plugin://plugin.video.plexbmc/?url=/&mode=%s,return)" % MODE_SHARED_PHOTOS)
WINDOW.setProperty("plexbmc.%d.type" % (sectionCount) , "photo")
WINDOW.setProperty("plexbmc.%d.shared" % (sectionCount) , "true")
sectionCount += 1
#For each of the servers we have identified
numOfServers=len(server_list)
for server in server_list:
if server.is_secondary():
continue
if settings.get_setting('channelview'):
WINDOW.setProperty("plexbmc.channel", "1")
WINDOW.setProperty("plexbmc.%d.server.channel" % (serverCount) , "ActivateWindow(VideoLibrary,plugin://plugin.video.plexbmc/?url=%s/channels/all&mode=21,return)" % server.get_url_location())
else:
WINDOW.clearProperty("plexbmc.channel")
WINDOW.setProperty("plexbmc.%d.server.video" % (serverCount) , "%s/video&mode=7" % server.get_url_location() )
WINDOW.setProperty("plexbmc.%d.server.music" % (serverCount) , "%s/music&mode=17" % server.get_url_location() )
WINDOW.setProperty("plexbmc.%d.server.photo" % (serverCount) , "%s/photos&mode=16" % server.get_url_location() )
WINDOW.setProperty("plexbmc.%d.server.online" % (serverCount) , "%s/system/plexonline&mode=19" % server.get_url_location() )
WINDOW.setProperty("plexbmc.%d.server" % (serverCount) , server.get_name())
serverCount+=1
#Clear out old data
clear_skin_sections(WINDOW, sectionCount, int(WINDOW.getProperty("plexbmc.sectionCount") if '' else 50))
printDebug.debug("Total number of skin sections is [%s]" % sectionCount )
printDebug.debug("Total number of servers is [%s]" % numOfServers)
WINDOW.setProperty("plexbmc.sectionCount", str(sectionCount))
WINDOW.setProperty("plexbmc.numServers", str(numOfServers))
if plex_network.is_myplex_signedin():
WINDOW.setProperty("plexbmc.queue" , "ActivateWindow(VideoLibrary,plugin://plugin.video.plexbmc/?url=http://myplexqueue&mode=24,return)")
WINDOW.setProperty("plexbmc.myplex", "1" )
else:
WINDOW.clearProperty("plexbmc.myplex")
return
def amberskin():
#Gather some data and set the window properties
printDebug.debug("== ENTER ==")
#Get the global host variable set in settings
WINDOW = xbmcgui.Window( 10000 )
sectionCount=0
serverCount=0
sharedCount=0
shared_flag={}
hide_shared = settings.get_setting('hide_shared')
server_list=plex_network.get_server_list()
WINDOW.setProperty("plexbmc.myplex_signedin" , str(plex_network.is_myplex_signedin()))
WINDOW.setProperty("plexbmc.plexhome_enabled" , str(plex_network.is_plexhome_enabled()))
if plex_network.is_plexhome_enabled():
WINDOW.setProperty("plexbmc.plexhome_user" , str(plex_network.get_myplex_user()))
WINDOW.setProperty("plexbmc.plexhome_avatar" , str(plex_network.get_myplex_avatar()))
printDebug.debug("Using list of %s servers: %s " % (len(server_list), server_list))
for server in server_list:
server.discover_sections()
for section in server.get_sections():
printDebug.debug("=Enter amberskin section=")
printDebug.debug(str(section.__dict__))
printDebug.debug("=/section=")
extraData = {'fanart_image': server.get_fanart(section)}
#Determine what we are going to do process after a link is selected by the user, based on the content we find
path = section.get_path()
base_url="plugin://plugin.video.plexbmc/?url=%s" % server.get_url_location()
if section.is_show():
if hide_shared and not server.is_owned():
shared_flag['show']=True
sharedCount += 1
continue
window="VideoLibrary"
mode=MODE_TVSHOWS
WINDOW.setProperty("plexbmc.%d.search" % (sectionCount) , "ActivateWindow(%s,%s%s%s&mode=%s,return)" % (window, base_url, path, "/search?type=4", mode) )
elif section.is_movie():
if hide_shared and not server.is_owned():
shared_flag['movie']=True
sharedCount += 1
continue
window="VideoLibrary"
mode=MODE_MOVIES
WINDOW.setProperty("plexbmc.%d.search" % (sectionCount) , "ActivateWindow(%s,%s%s%s&mode=%s,return)" % (window, base_url, path, "/search?type=1", mode) )
elif section.is_artist():
if hide_shared and not server.is_owned():
shared_flag['artist']=True
sharedCount += 1
continue
window="MusicFiles"
mode=MODE_ARTISTS
WINDOW.setProperty("plexbmc.%d.album" % (sectionCount) , "ActivateWindow(%s,%s%s%s&mode=%s,return)" % (window, base_url, path, "/albums", mode) )
WINDOW.setProperty("plexbmc.%d.search" % (sectionCount) , "ActivateWindow(%s,%s%s%s&mode=%s,return)" % (window, base_url, path, "/search?type=10", mode) )
elif section.is_photo():
if hide_shared and not server.is_owned():
shared_flag['photo']=True
sharedCount += 1
continue
window="Pictures"
WINDOW.setProperty("plexbmc.%d.year" % (sectionCount) , "ActivateWindow(%s,%s%s%s&mode=%s,return)" % (window, base_url, path, "/year", mode) )
else:
if hide_shared and not server.is_owned():
shared_flag['movie']=True
sharedCount += 1
continue
window="Videos"
mode=MODE_PHOTOS
if settings.get_setting('secondary'):
mode=MODE_GETCONTENT
suffix=''
else:
suffix='/all'
#Build that listing..
WINDOW.setProperty("plexbmc.%d.uuid" % (sectionCount) , section.get_uuid())
WINDOW.setProperty("plexbmc.%d.title" % (sectionCount) , section.get_title())
WINDOW.setProperty("plexbmc.%d.subtitle" % (sectionCount) , server.get_name())
WINDOW.setProperty("plexbmc.%d.path" % (sectionCount) , "ActivateWindow(%s,%s%s&mode=%s,return)" % ( window, base_url, path+suffix, mode))
WINDOW.setProperty("plexbmc.%d.art" % (sectionCount) , extraData['fanart_image'])
WINDOW.setProperty("plexbmc.%d.type" % (sectionCount) , section.get_type())
WINDOW.setProperty("plexbmc.%d.icon" % (sectionCount) , extraData.get('thumb',GENERIC_THUMBNAIL))
WINDOW.setProperty("plexbmc.%d.thumb" % (sectionCount) , extraData.get('thumb',GENERIC_THUMBNAIL))
WINDOW.setProperty("plexbmc.%d.partialpath" % (sectionCount) , "ActivateWindow(%s,%s%s" % (window, base_url, path))
WINDOW.setProperty("plexbmc.%d.search" % (sectionCount) , "ActivateWindow(%s,%s%s%s&mode=%s,return)" % (window, base_url, path, "/search?type=1", mode) )
WINDOW.setProperty("plexbmc.%d.recent" % (sectionCount) , "ActivateWindow(%s,%s%s%s&mode=%s,return)" % (window, base_url, path, "/recentlyAdded", mode) )
WINDOW.setProperty("plexbmc.%d.all" % (sectionCount) , "ActivateWindow(%s,%s%s%s&mode=%s,return)" % (window, base_url, path, "/all", mode) )
WINDOW.setProperty("plexbmc.%d.viewed" % (sectionCount) , "ActivateWindow(%s,%s%s%s&mode=%s,return)" % (window, base_url, path, "/recentlyViewed", mode) )
WINDOW.setProperty("plexbmc.%d.ondeck" % (sectionCount) , "ActivateWindow(%s,%s%s%s&mode=%s,return)" % (window, base_url, path, "/onDeck", mode) )
WINDOW.setProperty("plexbmc.%d.released" % (sectionCount) , "ActivateWindow(%s,%s%s%s&mode=%s,return)" % (window, base_url, path, "/newest", mode) )
WINDOW.setProperty("plexbmc.%d.shared" % (sectionCount) , "false")
WINDOW.setProperty("plexbmc.%d.ondeck.content" % (sectionCount) , "%s%s%s&mode=%s" % (base_url, path, "/onDeck", mode) )
WINDOW.setProperty("plexbmc.%d.recent.content" % (sectionCount) , "%s%s%s&mode=%s" % (base_url, path, "/recentlyAdded", mode) )
printDebug.debug("Building window properties index [%s] which is [%s]" % (sectionCount, section.get_title()))
printDebug.debug("PATH in use is: ActivateWindow(%s,%s%s&mode=%s,return)" % ( window, base_url, path, mode))
sectionCount += 1
if plex_network.is_myplex_signedin() and hide_shared and sharedCount != 0:
WINDOW.setProperty("plexbmc.%d.title" % (sectionCount) , "Shared Content")
WINDOW.setProperty("plexbmc.%d.subtitle" % (sectionCount) , "Shared")
WINDOW.setProperty("plexbmc.%d.path" % (sectionCount) , "ActivateWindow(VideoLibrary,plugin://plugin.video.plexbmc/?url=/&mode=%s,return)" % MODE_SHARED_ALL)
WINDOW.setProperty("plexbmc.%d.type" % (sectionCount) , "shared")
WINDOW.setProperty("plexbmc.%d.shared" % (sectionCount) , "true")
sectionCount += 1
elif sharedCount != 0:
WINDOW.setProperty("plexbmc.%d.title" % (sectionCount) , "Shared...")
WINDOW.setProperty("plexbmc.%d.subtitle" % (sectionCount) , "Shared")
WINDOW.setProperty("plexbmc.%d.type" % (sectionCount) , "shared")
WINDOW.setProperty("plexbmc.%d.shared" % (sectionCount) , "true")
if shared_flag.get('movie'):
WINDOW.setProperty("plexbmc.%d.path" % (sectionCount) , "ActivateWindow(VideoLibrary,plugin://plugin.video.plexbmc/?url=/&mode=%s,return)" % MODE_SHARED_MOVIES)
if shared_flag.get('show'):
WINDOW.setProperty("plexbmc.%d.path" % (sectionCount) , "ActivateWindow(VideoLibrary,plugin://plugin.video.plexbmc/?url=/&mode=%s,return)" % MODE_SHARED_SHOWS)
if shared_flag.get('artist'):
WINDOW.setProperty("plexbmc.%d.path" % (sectionCount) , "ActivateWindow(MusicFiles,plugin://plugin.video.plexbmc/?url=/&mode=%s,return)"% MODE_SHARED_MUSIC)
if shared_flag.get('photo'):
WINDOW.setProperty("plexbmc.%d.path" % (sectionCount) , "ActivateWindow(Pictures,plugin://plugin.video.plexbmc/?url=/&mode=%s,return)" % MODE_SHARED_PHOTOS)
sectionCount += 1
#For each of the servers we have identified
numOfServers=len(server_list)
#shelfChannel (server_list)
for server in server_list:
printDebug.debug(server.get_details())
if server.is_secondary():
continue
if settings.get_setting('channelview'):
WINDOW.setProperty("plexbmc.channel", "1")
WINDOW.setProperty("plexbmc.%d.server.channel" % (serverCount) , "ActivateWindow(VideoLibrary,plugin://plugin.video.plexbmc/?url=%s%s&mode=%s, return" % (server.get_url_location(), "/channels/all", MODE_CHANNELVIEW ))
else:
WINDOW.clearProperty("plexbmc.channel")
WINDOW.setProperty("plexbmc.%d.server.video" % (serverCount) , "%s%s&mode=%s" % (server.get_url_location(), "/video", MODE_PLEXPLUGINS ))
WINDOW.setProperty("plexbmc.%d.server.music" % (serverCount) , "%s%s&mode=%s" % (server.get_url_location(), "/music", MODE_MUSIC ))
WINDOW.setProperty("plexbmc.%d.server.photo" % (serverCount) , "%s%s&mode=%s" % (server.get_url_location(), "/photos", MODE_PHOTOS ))
WINDOW.setProperty("plexbmc.%d.server.online" % (serverCount) , "%s%s&mode=%s" % (server.get_url_location(), "/system/plexonline", MODE_PLEXONLINE ))
WINDOW.setProperty("plexbmc.%d.server" % (serverCount) , server.get_name())
serverCount+=1
#Clear out old data
clear_skin_sections(WINDOW, sectionCount, int(WINDOW.getProperty("plexbmc.sectionCount") if '' else 50))
printDebug.debug("Total number of skin sections is [%s]" % sectionCount )
printDebug.debug("Total number of servers is [%s]" % numOfServers)
WINDOW.setProperty("plexbmc.sectionCount", str(sectionCount))
WINDOW.setProperty("plexbmc.numServers", str(numOfServers))
if plex_network.is_myplex_signedin():
WINDOW.setProperty("plexbmc.queue" , "ActivateWindow(VideoLibrary,plugin://plugin.video.plexbmc/?url=http://myplexqueue&mode=24,return)")
WINDOW.setProperty("plexbmc.myplex", "1" )
#Now let's populate queue shelf items since we have MyPlex login
if settings.get_setting('homeshelf') != '3':
printDebug.debug("== ENTER ==")
root = plex_network.get_myplex_queue()
server_address = getMasterServer()
queue_count = 1
for media in root:
printDebug.debug("Found a queue item entry: [%s]" % (media.get('title', '').encode('UTF-8') , ))
m_url = "plugin://plugin.video.plexbmc?url=%s&mode=%s&indirect=%s" % (getLinkURL(server_address.get_url_location(), media, server_address), 18, 1)
m_thumb = getShelfThumb(media, server_address)
try:
movie_runtime = str(int(float(media.get('duration'))/1000/60))
except:
movie_runtime = ""
WINDOW.setProperty("Plexbmc.Queue.%s.Path" % queue_count, m_url)
WINDOW.setProperty("Plexbmc.Queue.%s.Title" % queue_count, media.get('title', 'Unknown').encode('UTF-8'))
WINDOW.setProperty("Plexbmc.Queue.%s.Year" % queue_count, media.get('originallyAvailableAt', '').encode('UTF-8'))
WINDOW.setProperty("Plexbmc.Queue.%s.Duration" % queue_count, movie_runtime)
WINDOW.setProperty("Plexbmc.Queue.%s.Thumb" % queue_count, m_thumb)
queue_count += 1
printDebug.debug("Building Queue item: %s" % media.get('title', 'Unknown').encode('UTF-8'))
printDebug.debug("Building Queue item url: %s" % m_url)
printDebug.debug("Building Queue item thumb: %s" % m_thumb)
clearQueueShelf(queue_count)
else:
WINDOW.clearProperty("plexbmc.myplex")
fullShelf (server_list)
def clear_skin_sections(WINDOW=None, start=0, finish=50):
printDebug.debug("Clearing properties from [%s] to [%s]" % (start, finish))
if WINDOW is None:
WINDOW = xbmcgui.Window( 10000 )
try:
for i in range(start, finish+1):
WINDOW.clearProperty("plexbmc.%d.uuid" % ( i ) )
WINDOW.clearProperty("plexbmc.%d.title" % ( i ) )
WINDOW.clearProperty("plexbmc.%d.subtitle" % ( i ) )
WINDOW.clearProperty("plexbmc.%d.url" % ( i ) )
WINDOW.clearProperty("plexbmc.%d.path" % ( i ) )
WINDOW.clearProperty("plexbmc.%d.window" % ( i ) )
WINDOW.clearProperty("plexbmc.%d.art" % ( i ) )
WINDOW.clearProperty("plexbmc.%d.type" % ( i ) )
WINDOW.clearProperty("plexbmc.%d.icon" % ( i ) )
WINDOW.clearProperty("plexbmc.%d.thumb" % ( i ) )
WINDOW.clearProperty("plexbmc.%d.recent" % ( i ) )
WINDOW.clearProperty("plexbmc.%d.all" % ( i ) )
WINDOW.clearProperty("plexbmc.%d.search" % ( i ) )
WINDOW.clearProperty("plexbmc.%d.viewed" % ( i ) )
WINDOW.clearProperty("plexbmc.%d.ondeck" % ( i ) )
WINDOW.clearProperty("plexbmc.%d.released" % ( i ) )
WINDOW.clearProperty("plexbmc.%d.shared" % ( i ) )
WINDOW.clearProperty("plexbmc.%d.album" % ( i ) )
WINDOW.clearProperty("plexbmc.%d.year" % ( i ) )
WINDOW.clearProperty("plexbmc.%d.recent.content" % ( i ) )
WINDOW.clearProperty("plexbmc.%d.ondeck.content" % ( i ) )
except:
printDebug.debug("Clearing stopped")
printDebug.debug("Finished clearing properties")
def fullShelf(server_list={}):
#Gather some data and set the window properties
printDebug.debug("== ENTER ==")
if settings.get_setting('homeshelf') == '3' or ( not settings.get_setting('movieShelf') and not settings.get_setting('tvShelf') and not settings.get_setting('musicShelf')):
printDebug.debug("Disabling all shelf items")
clearShelf()
clearOnDeckShelf()
return
#Get the global host variable set in settings
WINDOW = xbmcgui.Window( 10000 )
recentMovieCount=1
recentSeasonCount=1
recentMusicCount=1
recentPhotoCount=1
ondeckMovieCount=1
ondeckSeasonCount=1
recent_list=[]
ondeck_list=[]
full_count=0
if server_list == {}:
xbmc.executebuiltin("XBMC.Notification(Unable to see any media servers,)")
clearShelf(0, 0, 0, 0)
return
randomNumber = str(random.randint(1000000000,9999999999))
for server_details in server_list:
if not server_details.is_owned():
continue
for section in server_details.get_sections():
if settings.get_setting('homeshelf') == '0' or settings.get_setting('homeshelf') == '2':
tree = server_details.get_recently_added(section=section.get_key(), size=15, hide_watched=settings.get_setting('hide_watched_recent_items'))
if tree is None:
printDebug.debug("PLEXBMC -> RecentlyAdded items not found on: %s" % server_details.get_url_location())
continue
libraryuuid = tree.get("librarySectionUUID",'').encode('utf-8')
ep_helper = {} # helper season counter
for eachitem in tree:
if eachitem.get("type", "") == "episode":
key = int(eachitem.get("parentRatingKey")) # season identifier
if key in ep_helper:
continue
ep_helper[key] = key # use seasons as dict key so we can check
recent_list.append((eachitem, server_details, libraryuuid))
if settings.get_setting('homeshelf') == '1' or settings.get_setting('homeshelf') == '2':
tree = server_details.get_ondeck(section=section.get_key(),size=15)
if tree is None:
print ("PLEXBMC -> OnDeck items not found on: " + server_details.get_url_location(), False)
continue
libraryuuid = tree.get("librarySectionUUID",'').encode('utf-8')
for eachitem in tree:
ondeck_list.append((eachitem, server_details, libraryuuid))
printDebug.debugplus("Recent object is: %s" % recent_list)
printDebug.debugplus("ondeck object is: %s" % ondeck_list)
prefer_season=settings.get_setting('prefer_season_thumbs')
#For each of the servers we have identified
for media, source_server, libuuid in recent_list:
if media.get('type') == "movie":
if not settings.get_setting('movieShelf'):
WINDOW.clearProperty("Plexbmc.LatestMovie.1.Path" )
continue
title_name=media.get('title','Unknown').encode('UTF-8')
printDebug.debug("Found a recent movie entry: [%s]" % title_name)
title_url="plugin://plugin.video.plexbmc?url=%s&mode=%s&t=%s" % ( getLinkURL(source_server.get_url_location(),media,source_server), MODE_PLAYSHELF, randomNumber)
title_thumb = getShelfThumb(media,source_server)
if media.get('duration') > 0:
movie_runtime = str(int(float(media.get('duration'))/1000/60))
else:
movie_runtime = ""
if media.get('rating') > 0:
movie_rating = str(round(float(media.get('rating')), 1))
else:
movie_rating = ''
WINDOW.setProperty("Plexbmc.LatestMovie.%s.Path" % recentMovieCount, title_url)
WINDOW.setProperty("Plexbmc.LatestMovie.%s.Title" % recentMovieCount, title_name)
WINDOW.setProperty("Plexbmc.LatestMovie.%s.Year" % recentMovieCount, media.get('year', '').encode('UTF-8'))
WINDOW.setProperty("Plexbmc.LatestMovie.%s.Rating" % recentMovieCount, movie_rating)
WINDOW.setProperty("Plexbmc.LatestMovie.%s.Duration" % recentMovieCount, movie_runtime)
WINDOW.setProperty("Plexbmc.LatestMovie.%s.Thumb" % recentMovieCount, title_thumb)
WINDOW.setProperty("Plexbmc.LatestMovie.%s.uuid" % recentMovieCount, libuuid)
WINDOW.setProperty("Plexbmc.LatestMovie.%s.Plot" % recentMovieCount, media.get('summary', '').encode('UTF-8'))
m_genre = []
for child in media:
if child.tag == "Genre":
m_genre.append(child.get('tag'))
else:
continue
WINDOW.setProperty("Plexbmc.LatestMovie.%s.Genre" % recentMovieCount, ", ".join(m_genre).encode('UTF-8'))
recentMovieCount += 1
elif media.get('type') == "season":
title_name=media.get('parentTitle','Unknown').encode('UTF-8')
printDebug.debug("Found a recent season entry [%s]" % title_name)
if not settings.get_setting('tvShelf'):
WINDOW.clearProperty("Plexbmc.LatestEpisode.1.Path" )
continue
title_url="ActivateWindow(VideoLibrary, plugin://plugin.video.plexbmc?url=%s&mode=%s, return)" % ( getLinkURL(source_server.get_url_location(),media,source_server), MODE_TVEPISODES)
title_thumb=getShelfThumb(media,source_server)
WINDOW.setProperty("Plexbmc.LatestEpisode.%s.Path" % recentSeasonCount, title_url )
WINDOW.setProperty("Plexbmc.LatestEpisode.%s.EpisodeTitle" % recentSeasonCount, '')
WINDOW.setProperty("Plexbmc.LatestEpisode.%s.EpisodeSeason" % recentSeasonCount, media.get('title','').encode('UTF-8'))
WINDOW.setProperty("Plexbmc.LatestEpisode.%s.ShowTitle" % recentSeasonCount, title_name)
WINDOW.setProperty("Plexbmc.LatestEpisode.%s.Thumb" % recentSeasonCount, title_thumb)
WINDOW.setProperty("Plexbmc.LatestEpisode.%s.uuid" % recentSeasonCount, media.get('librarySectionUUID','').encode('UTF-8'))
recentSeasonCount += 1
elif media.get('type') == "album":
if not settings.get_setting('musicShelf'):
WINDOW.clearProperty("Plexbmc.LatestAlbum.1.Path" )
continue
title_name=media.get('parentTitle','Unknown').encode('UTF-8')
title_url="ActivateWindow(MusicFiles, plugin://plugin.video.plexbmc?url=%s&mode=%s, return)" % ( getLinkURL(source_server.get_url_location(),media,source_server), MODE_TRACKS)
title_thumb=getShelfThumb(media,source_server)
printDebug.debug("Found a recent album entry: [%s]" % title_name)
WINDOW.setProperty("Plexbmc.LatestAlbum.%s.Path" % recentMusicCount, title_url )
WINDOW.setProperty("Plexbmc.LatestAlbum.%s.Title" % recentMusicCount, media.get('title','Unknown').encode('UTF-8'))
WINDOW.setProperty("Plexbmc.LatestAlbum.%s.Artist" % recentMusicCount, title_name)
WINDOW.setProperty("Plexbmc.LatestAlbum.%s.Thumb" % recentMusicCount, title_thumb)
recentMusicCount += 1
elif media.get('type') == "photo":
title_name=media.get('title','Unknown').encode('UTF-8')
title_url="ActivateWindow(Pictures, plugin://plugin.video.plexbmc/?url=%s%s&mode=%s,return" % ( source_server.get_url_location(), "/recentlyAdded", MODE_PHOTOS)
title_thumb = getShelfThumb(media, source_server)
printDebug.debug("Found a recent photo entry: [%s]" % title_name)
WINDOW.setProperty("Plexbmc.LatestPhoto.%s.Path" % recentPhotoCount, title_url)
WINDOW.setProperty("Plexbmc.LatestPhoto.%s.Title" % recentPhotoCount, title_name)
WINDOW.setProperty("Plexbmc.LatestPhoto.%s.Thumb" % recentPhotoCount, title_thumb)
recentPhotoCount += 1
elif media.get('type') == "episode":
title_name=media.get('title','Unknown').encode('UTF-8')
printDebug.debug("Found an Recent episode entry [%s]" % title_name)
if not settings.get_setting('tvShelf'):
WINDOW.clearProperty("Plexbmc.LatestEpisode.1.Path" )
continue
title_url="ActivateWindow(Videos, plugin://plugin.video.plexbmc?url=%s&mode=%s, return)" % ( getLinkURL(source_server.get_url_location(), media, source_server, season_shelf=True), MODE_TVEPISODES)
title_thumb = getShelfThumb(media, source_server, seasonThumb=True, prefer_season=prefer_season)
WINDOW.setProperty("Plexbmc.LatestEpisode.%s.Path" % recentSeasonCount, title_url)
WINDOW.setProperty("Plexbmc.LatestEpisode.%s.EpisodeTitle" % recentSeasonCount, title_name)
WINDOW.setProperty("Plexbmc.LatestEpisode.%s.EpisodeNumber" % recentSeasonCount, media.get('index','').encode('utf-8'))
WINDOW.setProperty("Plexbmc.LatestEpisode.%s.EpisodeSeason" % recentSeasonCount, media.get('parentIndex','').encode('UTF-8')+'.'+media.get('index','Unknown').encode('UTF-8'))
WINDOW.setProperty("Plexbmc.LatestEpisode.%s.EpisodeSeasonNumber" % recentSeasonCount, media.get('parentIndex','').encode('UTF-8'))
WINDOW.setProperty("Plexbmc.LatestEpisode.%s.ShowTitle" % recentSeasonCount, media.get('grandparentTitle','').encode('UTF-8'))
WINDOW.setProperty("Plexbmc.LatestEpisode.%s.Thumb" % recentSeasonCount, title_thumb)
WINDOW.setProperty("Plexbmc.LatestEpisode.%s.uuid" % recentSeasonCount, libuuid)
recentSeasonCount += 1
printDebug.debug(" Building Recent window title: %s\n Building Recent window url: %s\n Building Recent window thumb: %s" % (title_name, title_url, title_thumb))
clearShelf(recentMovieCount, recentSeasonCount, recentMusicCount, recentPhotoCount)
#For each of the servers we have identified
for media, source_server, libuuid in ondeck_list:
if media.get('type') == "movie":
title_name=media.get('title','Unknown').encode('UTF-8')
printDebug.debug("Found a OnDeck movie entry: [%s]" % title_name)
if not settings.get_setting('movieShelf'):
WINDOW.clearProperty("Plexbmc.OnDeckMovie.1.Path" )
continue
title_url = "plugin://plugin.video.plexbmc?url=%s&mode=%s&t=%s" % ( getLinkURL(source_server.get_url_location(),media,source_server), MODE_PLAYSHELF, randomNumber)
title_thumb = getShelfThumb(media,source_server)
if media.get('duration') > 0:
#movie_runtime = media.get('duration', '0')
movie_runtime = str(int(float(media.get('duration'))/1000/60))
else:
movie_runtime = ""
if media.get('rating') > 0:
title_rating = str(round(float(media.get('rating')), 1))
else:
title_rating = ''
WINDOW.setProperty("Plexbmc.OnDeckMovie.%s.Path" % ondeckMovieCount, title_url)
WINDOW.setProperty("Plexbmc.OnDeckMovie.%s.Title" % ondeckMovieCount, title_name)
WINDOW.setProperty("Plexbmc.OnDeckMovie.%s.Year" % ondeckMovieCount, media.get('year','').encode('UTF-8'))
WINDOW.setProperty("Plexbmc.OnDeckMovie.%s.Rating" % ondeckMovieCount, title_rating)
WINDOW.setProperty("Plexbmc.OnDeckMovie.%s.Duration" % ondeckMovieCount, movie_runtime)
WINDOW.setProperty("Plexbmc.OnDeckMovie.%s.Thumb" % ondeckMovieCount, title_thumb)
WINDOW.setProperty("Plexbmc.OnDeckMovie.%s.uuid" % ondeckMovieCount, libuuid)
ondeckMovieCount += 1
elif media.get('type') == "season":
title_name=media.get('parentTitle','Unknown').encode('UTF-8')
printDebug.debug("Found a OnDeck season entry [%s]" % title_name)
if not settings.get_setting('tvShelf'):
WINDOW.clearProperty("Plexbmc.OnDeckEpisode.1.Path" )
continue
title_url="ActivateWindow(VideoLibrary, plugin://plugin.video.plexbmc?url=%s&mode=%s, return)" % ( getLinkURL(source_server.get_url_location(),media,source_server), MODE_TVEPISODES)
title_thumb=getShelfThumb(media,source_server)
WINDOW.setProperty("Plexbmc.OnDeckEpisode.%s.Path" % ondeckSeasonCount, title_url )
WINDOW.setProperty("Plexbmc.OnDeckEpisode.%s.EpisodeTitle" % ondeckSeasonCount, '')
WINDOW.setProperty("Plexbmc.OnDeckEpisode.%s.EpisodeSeason" % ondeckSeasonCount, media.get('title','').encode('UTF-8'))
WINDOW.setProperty("Plexbmc.OnDeckEpisode.%s.ShowTitle" % ondeckSeasonCount, title_name)
WINDOW.setProperty("Plexbmc.OnDeckEpisode.%s.Thumb" % ondeckSeasonCount, title_thumb)
ondeckSeasonCount += 1
elif media.get('type') == "episode":
title_name=media.get('title','Unknown').encode('UTF-8')
printDebug.debug("Found an onDeck episode entry [%s]" % title_name)
if not settings.get_setting('tvShelf'):
WINDOW.clearProperty("Plexbmc.OnDeckEpisode.1.Path" )
continue
title_url="PlayMedia(plugin://plugin.video.plexbmc?url=%s&mode=%s&t=%s)" % (getLinkURL(source_server.get_url_location(), media, source_server), MODE_PLAYSHELF, randomNumber)
title_thumb=getShelfThumb(media, source_server, seasonThumb=True, prefer_season=prefer_season)
WINDOW.setProperty("Plexbmc.OnDeckEpisode.%s.Path" % ondeckSeasonCount, title_url)
WINDOW.setProperty("Plexbmc.OnDeckEpisode.%s.EpisodeTitle" % ondeckSeasonCount, title_name)
WINDOW.setProperty("Plexbmc.OnDeckEpisode.%s.EpisodeNumber" % ondeckSeasonCount, media.get('index','').encode('utf-8'))
WINDOW.setProperty("Plexbmc.OnDeckEpisode.%s.EpisodeSeason" % ondeckSeasonCount, media.get('grandparentTitle','Unknown').encode('UTF-8'))
WINDOW.setProperty("Plexbmc.OnDeckEpisode.%s.EpisodeSeasonNumber" % ondeckSeasonCount, media.get('parentIndex','').encode('UTF-8'))
WINDOW.setProperty("Plexbmc.OnDeckEpisode.%s.ShowTitle" % ondeckSeasonCount, title_name)
WINDOW.setProperty("Plexbmc.OnDeckEpisode.%s.Thumb" % ondeckSeasonCount, title_thumb)
WINDOW.setProperty("Plexbmc.OnDeckEpisode.%s.uuid" % ondeckSeasonCount, libuuid)
ondeckSeasonCount += 1
printDebug.debug(" Building onDeck window title: %s\n Building onDeck window url: %s\n Building onDeck window thumb: %s" % (title_name, title_url, title_thumb))
clearOnDeckShelf(ondeckMovieCount, ondeckSeasonCount)
def displayContent( acceptable_level, content_level ):
'''
Takes a content Rating and decides whether it is an allowable
level, as defined by the content filter
@input: content rating
@output: boolean
'''
printDebug.info("Checking rating flag [%s] against [%s]" % (content_level, acceptable_level))
if acceptable_level == "Adults":
printDebug.debug("OK to display")
return True
content_map = { 'Kids' : 0 ,
'Teens' : 1 ,
'Adults' : 2 }
rating_map= { 'G' : 0 , # MPAA Kids
'PG' : 0 , # MPAA Kids
'PG-13' : 1 , # MPAA Teens
'R' : 2 , # MPAA Adults
'NC-17' : 2 , # MPAA Adults
'NR' : 2 , # MPAA Adults
'Unrated' : 2 , # MPAA Adults
'U' : 0 , # BBFC Kids
'PG' : 0 , # BBFC Kids
'12' : 1 , # BBFC Teens
'12A' : 1 , # BBFC Teens
'15' : 1 , # BBFC Teens
'18' : 2 , # BBFC Adults
'R18' : 2 , # BBFC Adults
'E' : 0 , #ACB Kids (hopefully)
'G' : 0 , #ACB Kids
'PG' : 0 , #ACB Kids
'M' : 1 , #ACB Teens
'MA15+' : 2 , #ADC Adults
'R18+' : 2 , #ACB Adults
'X18+' : 2 , #ACB Adults
'TV-Y' : 0 , # US TV - Kids
'TV-Y7' : 0 , # US TV - Kids
'TV -G' : 0 , # Us TV - kids
'TV-PG' : 1 , # US TV - Teens
'TV-14' : 1 , # US TV - Teens
'TV-MA' : 2 , # US TV - Adults
'G' : 0 , # CAN - kids
'PG' : 0 , # CAN - kids
'14A' : 1 , # CAN - teens
'18A' : 2 , # CAN - Adults
'R' : 2 , # CAN - Adults
'A' : 2 } # CAN - Adults
if content_level is None or content_level == "None":
printDebug.debug("Setting [None] rating as %s" % settings.get_setting('contentNone') )
if content_map[settings.get_setting('contentNone')] <= content_map[acceptable_level]:
printDebug.debug("OK to display")
return True
else:
try:
if rating_map[content_level] <= content_map[acceptable_level]:
printDebug.debug("OK to display")
return True
except:
print "Unknown rating flag [%s] whilst lookuing for [%s] - will filter for now, but needs to be added" % (content_level, acceptable_level)
printDebug.debug("NOT OK to display")
return False
def shelf( server_list=None ):
#Gather some data and set the window properties
printDebug.debug("== ENTER ==")
if not (settings.get_setting('movieShelf') and settings.get_setting('tvShelf') and settings.get_setting('musicShelf')) or settings.get_setting('homeshelf') == '3':
printDebug.debug("Disabling all shelf items")
clearShelf()
return
#Get the global host variable set in settings
WINDOW = xbmcgui.Window( 10000 )
movieCount=1
seasonCount=1
musicCount=1
added_list={}
direction=True
full_count=0
if server_list is None:
server_list=plex_network.get_server_list()
if server_list == {}:
xbmc.executebuiltin("XBMC.Notification(Unable to see any media servers,)")
clearShelf(0,0,0)
return
randomNumber=str(random.randint(1000000000,9999999999))
for server_details in server_list():
if server_details.is_secondary() or not server_details.is_owned():
continue
if settings.get_setting('homeshelf') == '0' or settings.get_setting('homeshelf') == '2':
tree=server_details.get_server_recentlyadded()
else:
direction=False
tree=server_details.get_server_ondeck()
if tree is None:
xbmc.executebuiltin("XBMC.Notification(Unable to contact server: %s,)" % server_details.get_name() )
clearShelf()
return
for eachitem in tree:
if direction:
added_list[int(eachitem.get('addedAt',0))] = (eachitem, server_details )
else:
added_list[full_count] = (eachitem, server_details)
full_count += 1
library_filter = settings.get_setting('libraryfilter')
acceptable_level = settings.get_setting('contentFilter')
#For each of the servers we have identified
for media, server in sorted(added_list, reverse=direction):
if media.get('type') == "movie":
title_name=media.get('title','Unknown').encode('UTF-8')
printDebug.debug("Found a recent movie entry: [%s]" % title_name )
if not settings.get_setting('movieShelf'):
WINDOW.clearProperty("Plexbmc.LatestMovie.1.Path" )
continue
if not displayContent( acceptable_level , media.get('contentRating') ):
continue
if media.get('librarySectionID') == library_filter:
printDebug.debug("SKIPPING: Library Filter match: %s = %s " % (library_filter, media.get('librarySectionID')))
continue
title_url="plugin://plugin.video.plexbmc?url=%s&mode=%s&t=%s" % ( getLinkURL(server.get_url_location(),media,server), MODE_PLAYSHELF, randomNumber)
title_thumb=getThumb(media,server)
WINDOW.setProperty("Plexbmc.LatestMovie.%s.Path" % movieCount, title_url)
WINDOW.setProperty("Plexbmc.LatestMovie.%s.Title" % movieCount, title_name)
WINDOW.setProperty("Plexbmc.LatestMovie.%s.Thumb" % movieCount, title_thumb)
movieCount += 1
elif media.get('type') == "season":
printDebug.debug("Found a recent season entry [%s]" % ( media.get('parentTitle','Unknown').encode('UTF-8') , ))
if not settings.get_setting('tvShelf'):
WINDOW.clearProperty("Plexbmc.LatestEpisode.1.Path" )
continue
title_name=media.get('parentTitle','Unknown').encode('UTF-8')
title_url="ActivateWindow(VideoLibrary, plugin://plugin.video.plexbmc?url=%s&mode=%s, return)" % ( getLinkURL(server.get_url_location(),media,server), MODE_TVEPISODES)
title_thumb=getThumb(media,server)
WINDOW.setProperty("Plexbmc.LatestEpisode.%s.Path" % seasonCount, title_url )
WINDOW.setProperty("Plexbmc.LatestEpisode.%s.EpisodeTitle" % seasonCount, '')
WINDOW.setProperty("Plexbmc.LatestEpisode.%s.EpisodeSeason" % seasonCount, media.get('title','').encode('UTF-8'))
WINDOW.setProperty("Plexbmc.LatestEpisode.%s.ShowTitle" % seasonCount, title_name)
WINDOW.setProperty("Plexbmc.LatestEpisode.%s.Thumb" % seasonCount, title_thumb)
seasonCount += 1
elif media.get('type') == "album":
if not settings.get_setting('musicShelf'):
WINDOW.clearProperty("Plexbmc.LatestAlbum.1.Path" )
continue
printDebug.debug("Found a recent album entry")
title_name=media.get('parentTitle','Unknown').encode('UTF-8')
title_url="ActivateWindow(MusicFiles, plugin://plugin.video.plexbmc?url=%s&mode=%s, return)" % ( getLinkURL(server.get_url_location(),media,server), MODE_TRACKS)
title_thumb=getThumb(media,server)
WINDOW.setProperty("Plexbmc.LatestAlbum.%s.Path" % musicCount, title_url )
WINDOW.setProperty("Plexbmc.LatestAlbum.%s.Title" % musicCount, media.get('title','Unknown').encode('UTF-8'))
WINDOW.setProperty("Plexbmc.LatestAlbum.%s.Artist" % musicCount, title_name)
WINDOW.setProperty("Plexbmc.LatestAlbum.%s.Thumb" % musicCount, title_thumb)
musicCount += 1
elif media.get('type') == "episode":
title_name=media.get('title','Unknown').encode('UTF-8')
printDebug.debug("Found an onDeck episode entry [%s]" % title_name)
if not settings.get_setting('tvShelf'):
WINDOW.clearProperty("Plexbmc.LatestEpisode.1.Path" )
continue
title_url="PlayMedia(plugin://plugin.video.plexbmc?url=%s&mode=%s%s)" % ( getLinkURL(server.get_url_location(),media,server), MODE_PLAYSHELF)
title_thumb=server.get_kodi_header_formatted_url(media.get('grandparentThumb',''))
WINDOW.setProperty("Plexbmc.LatestEpisode.%s.Path" % seasonCount, title_url )
WINDOW.setProperty("Plexbmc.LatestEpisode.%s.EpisodeTitle" % seasonCount, title_name)
WINDOW.setProperty("Plexbmc.LatestEpisode.%s.EpisodeSeason" % seasonCount, media.get('grandparentTitle','Unknown').encode('UTF-8'))
WINDOW.setProperty("Plexbmc.LatestEpisode.%s.ShowTitle" % seasonCount, title_name)
WINDOW.setProperty("Plexbmc.LatestEpisode.%s.Thumb" % seasonCount, title_thumb)
seasonCount += 1
printDebug.debug(" Building Recent window title: %s\n Building Recent window url: %s\n Building Recent window thumb: %s" % (title_name, title_url, title_thumb))
clearShelf( movieCount, seasonCount, musicCount)
def clearShelf (movieCount=0, seasonCount=0, musicCount=0, photoCount=0):
#Clear out old data
WINDOW = xbmcgui.Window( 10000 )
printDebug.debug("Clearing unused properties")
try:
for i in range(movieCount, 50+1):
WINDOW.clearProperty("Plexbmc.LatestMovie.%s.Path" % ( i ) )
WINDOW.clearProperty("Plexbmc.LatestMovie.%s.Title" % ( i ) )
WINDOW.clearProperty("Plexbmc.LatestMovie.%s.Year" % ( i ) )
WINDOW.clearProperty("Plexbmc.LatestMovie.%s.Rating" % ( i ) )
WINDOW.clearProperty("Plexbmc.LatestMovie.%s.Duration" % ( i ) )
WINDOW.clearProperty("Plexbmc.LatestMovie.%s.Thumb" % ( i ) )
WINDOW.clearProperty("Plexbmc.LatestMovie.%s.uuid" % ( i ) )
printDebug.debug("Done clearing movies")
except: pass
try:
for i in range(seasonCount, 50+1):
WINDOW.clearProperty("Plexbmc.LatestEpisode.%s.Path" % ( i ) )
WINDOW.clearProperty("Plexbmc.LatestEpisode.%s.EpisodeTitle" % ( i ) )
WINDOW.clearProperty("Plexbmc.LatestEpisode.%s.EpisodeSeason" % ( i ) )
WINDOW.clearProperty("Plexbmc.LatestEpisode.%s.ShowTitle" % ( i ) )
WINDOW.clearProperty("Plexbmc.LatestEpisode.%s.Thumb" % ( i ) )
WINDOW.clearProperty("Plexbmc.LatestEpisode.%s.uuid" % ( i ) )
printDebug.debug("Done clearing tv")
except: pass
try:
for i in range(musicCount, 25+1):
WINDOW.clearProperty("Plexbmc.LatestAlbum.%s.Path" % ( i ) )
WINDOW.clearProperty("Plexbmc.LatestAlbum.%s.Title" % ( i ) )
WINDOW.clearProperty("Plexbmc.LatestAlbum.%s.Artist" % ( i ) )
WINDOW.clearProperty("Plexbmc.LatestAlbum.%s.Thumb" % ( i ) )
printDebug.debug("Done clearing music")
except: pass
try:
for i in range(photoCount, 25+1):
WINDOW.clearProperty("Plexbmc.LatestPhoto.%s.Path" % ( i ) )
WINDOW.clearProperty("Plexbmc.LatestPhoto.%s.Title" % ( i ) )
WINDOW.clearProperty("Plexbmc.LatestPhoto.%s.Thumb" % ( i ) )
printDebug.debug("Done clearing photos")
except: pass
return
def clearOnDeckShelf (movieCount=0, seasonCount=0):
#Clear out old data
WINDOW = xbmcgui.Window( 10000 )
printDebug.debug("Clearing unused On Deck properties")
try:
for i in range(movieCount, 60+1):
WINDOW.clearProperty("Plexbmc.OnDeckMovie.%s.Path" % ( i ) )
WINDOW.clearProperty("Plexbmc.OnDeckMovie.%s.Title" % ( i ) )
WINDOW.clearProperty("Plexbmc.OnDeckMovie.%s.Thumb" % ( i ) )
WINDOW.clearProperty("Plexbmc.OnDeckMovie.%s.Rating" % ( i ) )
WINDOW.clearProperty("Plexbmc.OnDeckMovie.%s.Duration" % ( i ) )
WINDOW.clearProperty("Plexbmc.OnDeckMovie.%s.Year" % ( i ) )
WINDOW.clearProperty("Plexbmc.OnDeckMovie.%s.uuid" % ( i ) )
printDebug.debug("Done clearing On Deck movies")
except: pass
try:
for i in range(seasonCount, 60+1):
WINDOW.clearProperty("Plexbmc.OnDeckEpisode.%s.Path" % ( i ) )
WINDOW.clearProperty("Plexbmc.OnDeckEpisode.%s.EpisodeTitle" % ( i ) )
WINDOW.clearProperty("Plexbmc.OnDeckEpisode.%s.EpisodeSeason" % ( i ) )
WINDOW.clearProperty("Plexbmc.OnDeckEpisode.%s.ShowTitle" % ( i ) )
WINDOW.clearProperty("Plexbmc.OnDeckEpisode.%s.Thumb" % ( i ) )
WINDOW.clearProperty("Plexbmc.OnDeckEpisode.%s.uuid" % ( i ) )
printDebug.debug("Done clearing On Deck tv")
except: pass
return
def shelfChannel(server_list = None):
#Gather some data and set the window properties
printDebug.debug("== ENTER ==")
if not settings.get_setting('channelShelf') or settings.get_setting('homeshelf') == '3':
printDebug.debug("Disabling channel shelf")
clearChannelShelf()
return
#Get the global host variable set in settings
WINDOW = xbmcgui.Window( 10000 )
channelCount=1
if server_list is None:
server_list=plex_network.get_server_list()
if not server_list:
xbmc.executebuiltin("XBMC.Notification(Unable to see any media servers,)")
clearChannelShelf()
return
for server_details in server_list:
if server_details.is_secondary() or not server_details.is_owned():
continue
if not settings.get_setting('channelShelf') or settings.get_setting('homeshelf') == '3':
WINDOW.clearProperty("Plexbmc.LatestChannel.1.Path" )
return
tree=server_details.get_channel_recentlyviewed()
if tree is None:
xbmc.executebuiltin("XBMC.Notification(Unable to contact server: %s, )" % server_details.get_name())
clearChannelShelf(0)
return
#For each of the servers we have identified
for media in tree:
printDebug.debug("Found a recent channel entry")
suffix=media.get('key').split('/')[1]
if suffix == "photos":
mode=MODE_PHOTOS
channel_window = "Pictures"
elif suffix == "video":
mode=MODE_PLEXPLUGINS
channel_window="VideoLibrary"
elif suffix == "music":
mode=MODE_MUSIC
channel_window="MusicFiles"
else:
mode=MODE_GETCONTENT
channel_window="VideoLibrary"
c_url="ActivateWindow(%s, plugin://plugin.video.plexbmc?url=%s&mode=%s)" % ( channel_window, getLinkURL(server_details.get_url_location(),media,server_details), mode)
pms_thumb = str(media.get('thumb', ''))
if pms_thumb.startswith('/'):
c_thumb = server_details.get_kodi_header_formatted_url(pms_thumb)
else:
c_thumb = pms_thumb
WINDOW.setProperty("Plexbmc.LatestChannel.%s.Path" % channelCount, c_url)
WINDOW.setProperty("Plexbmc.LatestChannel.%s.Title" % channelCount, media.get('title', 'Unknown'))
WINDOW.setProperty("Plexbmc.LatestChannel.%s.Thumb" % channelCount, c_thumb)
channelCount += 1
printDebug.debug("Building Recent window title: %s\n Building Recent window url: %s\n Building Recent window thumb: %s" % (media.get('title', 'Unknown'),c_url,c_thumb))
clearChannelShelf(channelCount)
return
def clearChannelShelf (channelCount=0):
WINDOW = xbmcgui.Window( 10000 )
try:
for i in range(channelCount, 30+1):
WINDOW.clearProperty("Plexbmc.LatestChannel.%s.Path" % ( i ) )
WINDOW.clearProperty("Plexbmc.LatestChannel.%s.Title" % ( i ) )
WINDOW.clearProperty("Plexbmc.LatestChannel.%s.Thumb" % ( i ) )
printDebug.debug("Done clearing channels")
except: pass
return
def clearQueueShelf (queueCount=0):
WINDOW = xbmcgui.Window( 10000 )
try:
for i in range(queueCount, 15+1):
WINDOW.clearProperty("Plexbmc.Queue.%s.Path" % ( i ) )
WINDOW.clearProperty("Plexbmc.Queue.%s.Title" % ( i ) )
WINDOW.clearProperty("Plexbmc.Queue.%s.Thumb" % ( i ) )
printDebug.debug("Done clearing Queue shelf")
except: pass
return
def myPlexQueue():
printDebug.debug("== ENTER ==")
if not plex_network.is_myplex_signedin():
xbmc.executebuiltin("XBMC.Notification(myplex not configured,)")
return
tree=plex_network.get_myplex_queue()
PlexPlugins('http://my.plexapp.com/playlists/queue/all', tree)
return
def libraryRefresh( server_uuid , section_id):
printDebug.debug("== ENTER ==")
server=plex_network.get_server_from_uuid(server_uuid)
server.refresh_section(section_id)
printDebug.info("Library refresh requested")
xbmc.executebuiltin("XBMC.Notification(\"PleXBMC\",Library Refresh started,100)")
return
def watched( server_uuid, metadata_id, watched='watch' ):
printDebug.debug("== ENTER ==")
server=plex_network.get_server_from_uuid(server_uuid)
if watched == 'watch':
printDebug.info("Marking %s as watched" % metadata_id)
server.mark_item_watched(metadata_id)
else:
printDebug.info("Marking %s as unwatched" % metadata_id)
server.mark_item_unwatched(metadata_id)
xbmc.executebuiltin("Container.Refresh")
return
def deleteMedia( server_uuid, metadata_id ):
printDebug.debug("== ENTER ==")
printDebug.info("Deleting media at: %s" % metadata_id)
return_value = xbmcgui.Dialog().yesno("Confirm file delete?","Delete this item? This action will delete media and associated data files.")
if return_value:
printDebug.debug("Deleting....")
server=plex_network.get_server_from_uuid(server_uuid)
server.delete_metadata(metadata_id)
xbmc.executebuiltin("Container.Refresh")
return True
def alterSubs ( server_uuid, metadata_id ):
'''
Display a list of available Subtitle streams and allow a user to select one.
The currently selected stream will be annotated with a *
'''
printDebug.debug("== ENTER ==")
server = plex_network.get_server_from_uuid(server_uuid)
tree = server.get_metadata(metadata_id)
sub_list=['']
display_list=["None"]
fl_select=False
for parts in tree.getiterator('Part'):
part_id=parts.get('id')
for streams in parts:
if streams.get('streamType','') == "3":
stream_id=streams.get('id')
lang=streams.get('languageCode',"Unknown").encode('utf-8')
printDebug.debug("Detected Subtitle stream [%s] [%s]" % ( stream_id, lang ) )
if streams.get('format',streams.get('codec')) == "idx":
printDebug.debug("Stream: %s - Ignoring idx file for now" % stream_id)
continue
else:
sub_list.append(stream_id)
if streams.get('selected') == '1':
fl_select=True
language=streams.get('language','Unknown')+"*"
else:
language=streams.get('language','Unknown')
display_list.append(language)
break
if not fl_select:
display_list[0]=display_list[0]+"*"
subScreen = xbmcgui.Dialog()
result = subScreen.select('Select subtitle',display_list)
if result == -1:
return False
printDebug.debug("User has selected stream %s" % sub_list[result])
server.set_subtitle_stream(part_id, sub_list[result])
return True
def alterAudio ( server_uuid, metadata_id ):
'''
Display a list of available audio streams and allow a user to select one.
The currently selected stream will be annotated with a *
'''
printDebug.debug("== ENTER ==")
server = plex_network.get_server_from_uuid(server_uuid)
tree = server.get_metadata(metadata_id)
audio_list=[]
display_list=[]
for parts in tree.getiterator('Part'):
part_id=parts.get('id')
for streams in parts:
if streams.get('streamType','') == "2":
stream_id=streams.get('id')
audio_list.append(stream_id)
lang=streams.get('languageCode', "Unknown")
printDebug.debug("Detected Audio stream [%s] [%s] " % ( stream_id, lang))
if streams.get('channels','Unknown') == '6':
channels="5.1"
elif streams.get('channels','Unknown') == '7':
channels="6.1"
elif streams.get('channels','Unknown') == '2':
channels="Stereo"
else:
channels=streams.get('channels','Unknown')
if streams.get('codec','Unknown') == "ac3":
codec="AC3"
elif streams.get('codec','Unknown') == "dca":
codec="DTS"
else:
codec=streams.get('codec','Unknown')
language="%s (%s %s)" % ( streams.get('language','Unknown').encode('utf-8') , codec, channels )
if streams.get('selected') == '1':
language=language+"*"
display_list.append(language)
break
audioScreen = xbmcgui.Dialog()
result = audioScreen.select('Select audio',display_list)
if result == -1:
return False
printDebug.debug("User has selected stream %s" % audio_list[result])
server.set_audio_stream(part_id, audio_list[result])
return True
def setWindowHeading(tree) :
WINDOW = xbmcgui.Window( xbmcgui.getCurrentWindowId() )
try:
WINDOW.setProperty("heading", tree.get('title1'))
except:
WINDOW.clearProperty("heading")
try:
WINDOW.setProperty("heading2", tree.get('title2'))
except:
WINDOW.clearProperty("heading2")
def setMasterServer () :
printDebug.debug("== ENTER ==")
servers=getMasterServer(True)
printDebug.debug(str(servers))
current_master=settings.get_setting('masterServer')
displayList=[]
for address in servers:
found_server = address.get_name()
if found_server == current_master:
found_server = found_server+"*"
displayList.append(found_server)
audioScreen = xbmcgui.Dialog()
result = audioScreen.select('Select master server', displayList)
if result == -1:
return False
printDebug.debug("Setting master server to: %s" % servers[result].get_name() )
settings.update_master_server(servers[result].get_name() )
return
def displayServers( url ):
printDebug.debug("== ENTER ==")
type=url.split('/')[2]
printDebug.debug("Displaying entries for %s" % type)
Servers = plex_network.get_server_list()
Servers_list=len(Servers)
#For each of the servers we have identified
for mediaserver in Servers:
if mediaserver.is_secondary():
continue
details={'title' : mediaserver.get_name() }
extraData={}
if type == "video":
extraData['mode']=MODE_PLEXPLUGINS
s_url='%s%s' % ( mediaserver.get_url_location(), '/video' )
if Servers_list == 1:
PlexPlugins(s_url)
return
elif type == "online":
extraData['mode']=MODE_PLEXONLINE
s_url='%s%s' % ( mediaserver.get_url_location() , '/system/plexonline')
if Servers_list == 1:
plexOnline(s_url)
return
elif type == "music":
extraData['mode']=MODE_MUSIC
s_url='%s%s' % ( mediaserver.get_url_location(), '/music' )
if Servers_list == 1:
music(s_url)
return
elif type == "photo":
extraData['mode']=MODE_PHOTOS
s_url='%s%s' % ( mediaserver.get_url_location(), '/photos' )
if Servers_list == 1:
photo(s_url)
return
addGUIItem(s_url, details, extraData )
xbmcplugin.endOfDirectory(pluginhandle,cacheToDisc=settings.get_setting('kodicache'))
def switch_user():
#Get listof users
user_list = plex_network.get_plex_home_users()
#zero means we are not plexHome'd up
if user_list is None or len(user_list) == 1:
printDebug("No users listed or only one user, plexHome not enabled")
return False
printDebug("found %s users: %s" % (len(user_list), user_list.keys()))
#Get rid of currently logged in user.
user_list.pop(plex_network.get_myplex_user(), None)
select_screen = xbmcgui.Dialog()
result = select_screen.select('Switch User',user_list.keys())
if result == -1:
printDebug("Dialog cancelled")
return False
printDebug("user [%s] selected" % user_list.keys()[result])
user = user_list[user_list.keys()[result]]
pin=None
if user['protected'] == '1':
printDebug("Protected user [%s], requesting password" % user['title'])
pin = select_screen.input("Enter PIN", type=xbmcgui.INPUT_NUMERIC, option=xbmcgui.ALPHANUM_HIDE_INPUT)
success,msg = plex_network.switch_plex_home_user(user['id'], pin)
if not success:
xbmcgui.Dialog().ok("Switch Failed",msg)
return False
return True
##So this is where we really start the addon
printDebug=printDebug("PleXBMC")
print "PleXBMC -> Running PleXBMC: %s " % GLOBAL_SETUP['__version__']
wake_servers()
if settings.get_debug() >= printDebug.DEBUG_INFO:
print "PleXBMC -> Script argument is %s" % sys.argv
print "PleXBMC -> Running Python: %s" % str(sys.version_info)
print "PleXBMC -> CWD is set to: %s" % GLOBAL_SETUP['__cwd__']
print "PleXBMC -> Platform: %s" % GLOBAL_SETUP['platform']
print "PleXBMC -> Setting debug: %s" % printDebug.get_name(settings.get_debug())
print "PleXBMC -> FullRes Thumbs are set to: %s" % settings.get_setting('fullres_thumbs')
print "PleXBMC -> Settings streaming: %s" % settings.get_stream()
print "PleXBMC -> Setting filter menus: %s" % settings.get_setting('secondary')
print "PleXBMC -> Flatten is: %s" % settings.get_setting('flatten')
if settings.get_setting('streamControl') == SUB_AUDIO_XBMC_CONTROL:
print "PleXBMC -> Setting stream Control to : XBMC CONTROL"
elif settings.get_setting('streamControl') == SUB_AUDIO_PLEX_CONTROL:
print "PleXBMC -> Setting stream Control to : PLEX CONTROL"
elif settings.get_setting('streamControl') == SUB_AUDIO_NEVER_SHOW:
print "PleXBMC -> Setting stream Control to : NEVER SHOW"
print "PleXBMC -> Force DVD playback: %s" % settings.get_setting('forcedvd')
print "PleXBMC -> SMB IP Override: %s" % settings.get_setting('nasoverride')
if settings.get_setting('nasoverride') and not settings.get_setting('nasoverrideip'):
print "PleXBMC -> No NAS IP Specified. Ignoring setting"
else:
print "PleXBMC -> NAS IP: " + settings.get_setting('nasoverrideip')
else:
print "PleXBMC -> Debug is turned off. Running silent"
pluginhandle=0
plex_network=plex.Plex(load=False)
def start_plexbmc():
try:
params=get_params(sys.argv[2])
except:
params={}
#Now try and assign some data to them
param_url=params.get('url')
command=None
if param_url:
if ( param_url.startswith('http') or param_url.startswith('file') ):
param_url = urllib.unquote(param_url)
elif param_url.startswith('cmd'):
command=urllib.unquote(param_url).split(':')[1]
param_name=urllib.unquote_plus(params.get('name',""))
mode=int(params.get('mode',-1))
play_transcode=True if int(params.get('transcode',0)) == 1 else False
param_identifier=params.get('identifier')
param_indirect=params.get('indirect')
force=params.get('force')
if command is None:
try:
command=sys.argv[1]
except:
pass
if command == "cacherefresh":
plex_network.delete_cache()
xbmc.executebuiltin("ReloadSkin()")
#Open the add-on settings page, then refresh plugin
elif command == "setting":
settings.openSettings()
if xbmcgui.getCurrentWindowId() == 10000:
printDebug.debug("Currently in home - refreshing to allow new settings to be taken")
xbmc.executebuiltin("ReloadSkin()")
#Refresh the current XBMC listing
elif command == "refresh":
xbmc.executebuiltin("Container.Refresh")
elif command == "switchuser":
if switch_user():
clear_skin_sections()
clearOnDeckShelf()
clearShelf()
WINDOW = xbmcgui.Window(10000)
WINDOW.setProperty("plexbmc.plexhome_user" , str(plex_network.get_myplex_user()))
WINDOW.setProperty("plexbmc.plexhome_avatar" , str(plex_network.get_myplex_avatar()))
if xbmcgui.getCurrentWindowId() == 10000:
printDebug.debug("Currently in home - refreshing to allow new settings to be taken")
xbmc.executebuiltin("ReloadSkin()")
else:
xbmc.executebuiltin("Container.Refresh")
else:
printDebug.info("Switch User Failed")
elif command == "signout":
if not plex_network.is_admin():
return xbmcgui.Dialog().ok("Sign Out","To sign out you must be logged in as an admin user. Please switch user and try again")
ret = xbmcgui.Dialog().yesno("myplex","You are currently signed into myPlex. Are you sure you want to sign out?")
if ret:
plex_network.signout()
WINDOW = xbmcgui.Window(10000)
WINDOW.clearProperty("plexbmc.plexhome_user" )
WINDOW.clearProperty("plexbmc.plexhome_avatar" )
clear_skin_sections()
clearOnDeckShelf()
clearShelf()
xbmc.executebuiltin("ReloadSkin()")
elif command == "signin":
import plex_signin
signin_window = plex_signin.plex_signin('Myplex Login')
signin_window.set_authentication_target(plex_network)
signin_window.start()
del signin_window
elif command == "signintemp":
#Awful hack to get around running a script from a listitem..
xbmc.executebuiltin('XBMC.RunScript(plugin.video.plexbmc, signin)')
elif command == "managemyplex":
if not plex_network.is_myplex_signedin():
ret = xbmcgui.Dialog().yesno("Manage myplex","You are not currently logged into myplex. Please continue to sign in, or cancel to return")
if ret:
xbmc.executebuiltin('XBMC.RunScript(plugin.video.plexbmc, signin)')
else:
return
elif not plex_network.is_admin():
return xbmcgui.Dialog().ok("Manage myplex","To access these screens you must be logged in as an admin user. Please switch user and try again")
import plex_signin
manage_window = plex_signin.plex_manage('Manage myplex')
manage_window.set_authentication_target(plex_network)
manage_window.start()
del manage_window
else:
plex_network.load()
#Populate Skin variables
if command == "skin":
try:
type=sys.argv[2]
except:
type=None
skin(type=type)
elif command == "amberskin":
amberskin()
#Populate recently/on deck shelf items
elif command == "shelf":
shelf()
#Populate channel recently viewed items
elif command == "channelShelf":
shelfChannel()
pass
#Send a library update to Plex
elif command == "update":
server_uuid=sys.argv[2]
section_id=sys.argv[3]
libraryRefresh(server_uuid, section_id)
#Mark an item as watched/unwatched in plex
elif command == "watch":
server_uuid=sys.argv[2]
metadata_id=sys.argv[3]
watch_status=sys.argv[4]
watched(server_uuid, metadata_id, watch_status )
#nt currently used
elif command == "refreshplexbmc":
plex_network.discover()
server_list = plex_network.get_server_list()
skin(server_list)
shelf(server_list)
shelfChannel(server_list)
#delete media from PMS
elif command == "delete":
server_uuid=sys.argv[2]
metadata_id=sys.argv[3]
deleteMedia(server_uuid, metadata_id)
#Display subtitle selection screen
elif command == "subs":
server_uuid=sys.argv[2]
metadata_id=sys.argv[3]
alterSubs(server_uuid, metadata_id)
#Display audio streanm selection screen
elif command == "audio":
server_uuid=sys.argv[2]
metadata_id=sys.argv[3]
alterAudio(server_uuid, metadata_id)
#Allow a mastre server to be selected (for myplex queue)
elif command == "master":
setMasterServer()
#else move to the main code
else:
global pluginhandle
try:
pluginhandle = int(command)
except:
pass
WINDOW = xbmcgui.Window( xbmcgui.getCurrentWindowId() )
WINDOW.clearProperty("heading")
WINDOW.clearProperty("heading2")
if settings.get_debug() >= printDebug.DEBUG_INFO:
print "PleXBMC -> Mode: %s " % mode
print "PleXBMC -> URL: %s" % param_url
print "PleXBMC -> Name: %s" % param_name
print "PleXBMC -> identifier: %s" % param_identifier
#Run a function based on the mode variable that was passed in the URL
if ( mode == None ) or ( param_url == None ) or ( len(param_url)<1 ):
displaySections()
elif mode == MODE_GETCONTENT:
getContent(param_url)
elif mode == MODE_TVSHOWS:
TVShows(param_url)
elif mode == MODE_MOVIES:
Movies(param_url)
elif mode == MODE_ARTISTS:
artist(param_url)
elif mode == MODE_TVSEASONS:
TVSeasons(param_url)
elif mode == MODE_PLAYLIBRARY:
playLibraryMedia(param_url,force=force, override=play_transcode)
elif mode == MODE_PLAYSHELF:
playLibraryMedia(param_url,full_data=True, shelf=True)
elif mode == MODE_TVEPISODES:
TVEpisodes(param_url)
elif mode == MODE_PLEXPLUGINS:
PlexPlugins(param_url)
elif mode == MODE_PROCESSXML:
processXML(param_url)
elif mode == MODE_BASICPLAY:
PLAY(param_url)
elif mode == MODE_ALBUMS:
albums(param_url)
elif mode == MODE_TRACKS:
tracks(param_url)
elif mode == MODE_PHOTOS:
photo(param_url)
elif mode == MODE_MUSIC:
music(param_url)
elif mode == MODE_VIDEOPLUGINPLAY:
videoPluginPlay(param_url,param_identifier,param_indirect)
elif mode == MODE_PLEXONLINE:
plexOnline(param_url)
elif mode == MODE_CHANNELINSTALL:
install(param_url,param_name)
elif mode == MODE_CHANNELVIEW:
channelView(param_url)
elif mode == MODE_PLAYLIBRARY_TRANSCODE:
playLibraryMedia(param_url,override=True)
elif mode == MODE_MYPLEXQUEUE:
myPlexQueue()
elif mode == MODE_CHANNELSEARCH:
channelSearch( param_url, params.get('prompt') )
elif mode == MODE_CHANNELPREFS:
channelSettings ( param_url, params.get('id') )
elif mode == MODE_SHARED_MOVIES:
displaySections(filter="movies", display_shared=True)
elif mode == MODE_SHARED_SHOWS:
displaySections(filter="tvshows", display_shared=True)
elif mode == MODE_SHARED_PHOTOS:
displaySections(filter="photos", display_shared=True)
elif mode == MODE_SHARED_MUSIC:
displaySections(filter="music", display_shared=True)
elif mode == MODE_SHARED_ALL:
displaySections(display_shared=True)
elif mode == MODE_DELETE_REFRESH:
plex_network.delete_cache()
xbmc.executebuiltin("Container.Refresh")
elif mode == MODE_PLAYLISTS:
processXML(param_url)
elif mode == MODE_DISPLAYSERVERS:
displayServers(param_url)
| gpl-2.0 | 1,801,083,209,271,853,800 | 39.409733 | 236 | 0.584815 | false |
StephenWeber/ansible | lib/ansible/modules/clustering/consul_kv.py | 25 | 9785 | #!/usr/bin/python
#
# (c) 2015, Steve Gargan <[email protected]>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
ANSIBLE_METADATA = {'status': ['preview'],
'supported_by': 'community',
'version': '1.0'}
DOCUMENTATION = """
module: consul_kv
short_description: Manipulate entries in the key/value store of a consul cluster.
description:
- Allows the addition, modification and deletion of key/value entries in a
consul cluster via the agent. The entire contents of the record, including
the indices, flags and session are returned as 'value'.
- If the key represents a prefix then Note that when a value is removed, the existing
value if any is returned as part of the results.
- "See http://www.consul.io/docs/agent/http.html#kv for more details."
requirements:
- "python >= 2.6"
- python-consul
- requests
version_added: "2.0"
author: "Steve Gargan (@sgargan)"
options:
state:
description:
- the action to take with the supplied key and value. If the state is
'present', the key contents will be set to the value supplied,
'changed' will be set to true only if the value was different to the
current contents. The state 'absent' will remove the key/value pair,
again 'changed' will be set to true only if the key actually existed
prior to the removal. An attempt can be made to obtain or free the
lock associated with a key/value pair with the states 'acquire' or
'release' respectively. a valid session must be supplied to make the
attempt changed will be true if the attempt is successful, false
otherwise.
required: false
choices: ['present', 'absent', 'acquire', 'release']
default: present
key:
description:
- the key at which the value should be stored.
required: true
value:
description:
- the value should be associated with the given key, required if state
is present
required: true
recurse:
description:
- if the key represents a prefix, each entry with the prefix can be
retrieved by setting this to true.
required: false
default: false
session:
description:
- the session that should be used to acquire or release a lock
associated with a key/value pair
required: false
default: None
token:
description:
- the token key indentifying an ACL rule set that controls access to
the key value pair
required: false
default: None
cas:
description:
- used when acquiring a lock with a session. If the cas is 0, then
Consul will only put the key if it does not already exist. If the
cas value is non-zero, then the key is only set if the index matches
the ModifyIndex of that key.
required: false
default: None
flags:
description:
- opaque integer value that can be passed when setting a value.
required: false
default: None
host:
description:
- host of the consul agent defaults to localhost
required: false
default: localhost
port:
description:
- the port on which the consul agent is running
required: false
default: 8500
scheme:
description:
- the protocol scheme on which the consul agent is running
required: false
default: http
version_added: "2.1"
validate_certs:
description:
- whether to verify the tls certificate of the consul agent
required: false
default: True
version_added: "2.1"
"""
EXAMPLES = '''
- name: add or update the value associated with a key in the key/value store
consul_kv:
key: somekey
value: somevalue
- name: remove a key from the store
consul_kv:
key: somekey
state: absent
- name: add a node to an arbitrary group via consul inventory (see consul.ini)
consul_kv:
key: ansible/groups/dc1/somenode
value: 'top_secret'
- name: Register a key/value pair with an associated session
consul_kv:
key: stg/node/server_birthday
value: 20160509
session: "{{ sessionid }}"
state: acquire
'''
import sys
try:
import consul
from requests.exceptions import ConnectionError
python_consul_installed = True
except ImportError:
python_consul_installed = False
from requests.exceptions import ConnectionError
def execute(module):
state = module.params.get('state')
if state == 'acquire' or state == 'release':
lock(module, state)
if state == 'present':
add_value(module)
else:
remove_value(module)
def lock(module, state):
consul_api = get_consul_api(module)
session = module.params.get('session')
key = module.params.get('key')
value = module.params.get('value')
if not session:
module.fail(
msg='%s of lock for %s requested but no session supplied' %
(state, key))
index, existing = consul_api.kv.get(key)
changed = not existing or (existing and existing['Value'] != value)
if changed and not module.check_mode:
if state == 'acquire':
changed = consul_api.kv.put(key, value,
cas=module.params.get('cas'),
acquire=session,
flags=module.params.get('flags'))
else:
changed = consul_api.kv.put(key, value,
cas=module.params.get('cas'),
release=session,
flags=module.params.get('flags'))
module.exit_json(changed=changed,
index=index,
key=key)
def add_value(module):
consul_api = get_consul_api(module)
key = module.params.get('key')
value = module.params.get('value')
index, existing = consul_api.kv.get(key)
changed = not existing or (existing and existing['Value'] != value)
if changed and not module.check_mode:
changed = consul_api.kv.put(key, value,
cas=module.params.get('cas'),
flags=module.params.get('flags'))
if module.params.get('retrieve'):
index, stored = consul_api.kv.get(key)
module.exit_json(changed=changed,
index=index,
key=key,
data=stored)
def remove_value(module):
''' remove the value associated with the given key. if the recurse parameter
is set then any key prefixed with the given key will be removed. '''
consul_api = get_consul_api(module)
key = module.params.get('key')
value = module.params.get('value')
index, existing = consul_api.kv.get(
key, recurse=module.params.get('recurse'))
changed = existing is not None
if changed and not module.check_mode:
consul_api.kv.delete(key, module.params.get('recurse'))
module.exit_json(changed=changed,
index=index,
key=key,
data=existing)
def get_consul_api(module, token=None):
return consul.Consul(host=module.params.get('host'),
port=module.params.get('port'),
scheme=module.params.get('scheme'),
verify=module.params.get('validate_certs'),
token=module.params.get('token'))
def test_dependencies(module):
if not python_consul_installed:
module.fail_json(msg="python-consul required for this module. "\
"see http://python-consul.readthedocs.org/en/latest/#installation")
def main():
argument_spec = dict(
cas=dict(required=False),
flags=dict(required=False),
key=dict(required=True),
host=dict(default='localhost'),
scheme=dict(required=False, default='http'),
validate_certs=dict(required=False, type='bool', default=True),
port=dict(default=8500, type='int'),
recurse=dict(required=False, type='bool'),
retrieve=dict(required=False, type='bool', default=True),
state=dict(default='present', choices=['present', 'absent', 'acquire', 'release']),
token=dict(required=False, no_log=True),
value=dict(required=False),
session=dict(required=False)
)
module = AnsibleModule(argument_spec, supports_check_mode=False)
test_dependencies(module)
try:
execute(module)
except ConnectionError as e:
module.fail_json(msg='Could not connect to consul agent at %s:%s, error was %s' % (
module.params.get('host'), module.params.get('port'), str(e)))
except Exception as e:
module.fail_json(msg=str(e))
# import module snippets
from ansible.module_utils.basic import *
if __name__ == '__main__':
main()
| gpl-3.0 | -7,380,284,078,236,923,000 | 32.395904 | 91 | 0.609811 | false |
Esri/mdcs-py | scripts/MDCS.py | 1 | 8890 | # ------------------------------------------------------------------------------
# Copyright 2019 Esri
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ------------------------------------------------------------------------------
# Name: MDCS.py
# Description: This is the main program entry point to MDCS.
# Version: 20201230
# Requirements: ArcGIS 10.1 SP1
# Required Arguments: -i:<config_file>
# Usage: python.exe MDCS.py -c:<Optional:command(s)> -i:<config_file>
# Notes:Type 'python.exe mdcs.py' to display the usage and a list of valid command codes.
# Author: Esri Imagery Workflows team
# ------------------------------------------------------------------------------
#!/usr/bin/env python
import arcpy
import sys
import os
solutionLib_path = os.path.dirname(os.path.abspath(__file__)) # set the location to the solutionsLib path
sys.path.append(solutionLib_path)
sys.path.append(os.path.join(solutionLib_path, 'SolutionsLog'))
import logger
import solutionsLib # import Raster Solutions library
import Base
from ProgramCheckAndUpdate import ProgramCheckAndUpdate
# cli callback ptrs
g_cli_callback = None
g_cli_msg_callback = None
# ends
# cli arcpy callback
def register_for_callbacks(fn_ptr):
global g_cli_callback
g_cli_callback = fn_ptr
# ends
# cli msg callback
def register_for_msg_callbacks(fn_ptr):
global g_cli_msg_callback
g_cli_msg_callback = fn_ptr
# ends
def postAddData(gdbPath, mdName, info):
mdName = info['md']
obvalue = info['pre_AddRasters_record_count']
fullPath = os.path.join(gdbPath, mdName)
mosaicMDType = info['type'].lower()
if(mosaicMDType == 'source'):
expression = 'OBJECTID >{}'.format(obvalue)
try:
fieldName = 'Dataset_ID'
fieldExist = arcpy.ListFields(fullPath, fieldName)
if (not fieldExist):
arcpy.AddField_management(fullPath, fieldName, "TEXT", "", "", "50")
log.Message('Calculating \'Dataset ID\' for the mosaic dataset ({}) with value ({})'.format(mdName, info[fieldName]), log.const_general_text)
with arcpy.da.UpdateCursor(fullPath, [fieldName], expression) as rows:
for row in rows:
row[0] = info[fieldName]
rows.updateRow(row)
except BaseException:
log.Message('Err. Failed to calculate \'Dataset_ID\'', log.const_critical_text)
log.Message(arcpy.GetMessages(), log.const_critical_text)
return False
return True
def main(argc, argv):
if (argc < 2):
# command-line argument codes.
# -i:config file.
# -c:command codes
# -m:mosaic dataset name
# -s:Source data paths. (as inputs to command (AR).
# -l:Full path to log file (including file name)
user_args = \
[
"-m: Mosaic dataset path including GDB and MD name [e.g. c:\WorldElevation.gdb\Portland]",
"-s: Source data paths. (As inputs to command (AR). -s: can be repeated to add multiple paths",
"-l: Log file output path [path+file name]",
"-artdem: Update DEM path in ART file"
]
print ("\nMDCS.py v5.9 [20170425]\nUsage: MDCS.py -c:<Optional:command> -i:<config_file>"
"\n\nFlags to override configuration values,")
for arg in user_args:
print (arg)
print (
"\nNote: Commands can be combined with '+' to do multiple operations."
"\nAvailable commands:")
user_cmds = solutionsLib.Solutions().getAvailableCommands()
for key in user_cmds:
print ("\t" + key + ' = ' + user_cmds[key]['desc'])
sys.exit(1)
base = Base.Base()
if (not g_cli_callback is None):
base.m_cli_callback_ptr = g_cli_callback
if (not g_cli_msg_callback is None):
base.m_cli_msg_callback_ptr = g_cli_msg_callback
global log
log = logger.Logger(base)
base.setLog(log)
argIndx = 0
md_path_ = artdem = config = com = log_folder = code_base = ''
PathSeparator = ';'
while(argIndx < argc):
(values) = argv[argIndx].split(':')
if (len(values[0]) < 2 or
values[0][:1] != '-' and
values[0][:1] != '#'):
argIndx += 1
continue
exSubCode = values[0][1:len(values[0])].lower()
subCode = values.pop(0)[1].lower()
value = ':'.join(values).strip()
if (subCode == 'c'):
com = value.replace(' ', '') # remove spaces in between.
elif(subCode == 'i'):
config = value
elif(subCode == 'm'):
md_path_ = value
elif(subCode == 's'):
base.m_sources += value + PathSeparator
elif(subCode == 'l'):
log_folder = value
elif(subCode == 'b'):
code_base = value
elif(exSubCode == 'artdem'):
artdem = value
elif(exSubCode == 'gprun'):
log.isGPRun = True # direct log messages also to (arcpy.AddMessage)
elif(subCode == 'p'):
pMax = value.rfind('$')
if (pMax == -1):
pMax = value.rfind('@')
if (pMax == -1):
argIndx += 1
continue
dynamic_var = value[pMax + 1:].upper()
v = value[0: pMax]
if (dynamic_var.strip() != ''):
if ((dynamic_var in base.m_dynamic_params.keys()) == False):
base.m_dynamic_params[dynamic_var] = v
argIndx += 1
if (base.m_sources.endswith(PathSeparator)):
base.m_sources = base.m_sources[:len(base.m_sources) - 1]
if (code_base != ''):
base.setCodeBase(code_base)
if (md_path_ != ''):
(p, f) = os.path.split(md_path_)
f = f.strip()
const_gdb_ext_len_ = len(base.const_geodatabase_ext)
ext = p[-const_gdb_ext_len_:].lower()
if ((ext == base.const_geodatabase_ext.lower() or
ext == base.const_geodatabase_SDE_ext.lower()) and
f != ''):
p = p.replace('\\', '/')
w = p.split('/')
workspace_ = ''
for i in range(0, len(w) - 1):
workspace_ += w[i] + '/'
gdb_ = w[len(w) - 1]
base.m_workspace = workspace_
base.m_geodatabase = w[len(w) - 1]
base.m_mdName = f
configName, ext = os.path.splitext(config)
configName = os.path.basename(configName)
# setup log
log.Project('MDCS')
log.LogNamePrefix(configName)
log.StartLog()
log_output_folder = os.path.join(os.path.dirname(solutionLib_path), 'logs')
if (log_folder != ''):
(path, fileName) = os.path.split(log_folder)
if (path != ''):
log_output_folder = path
if (fileName != ''):
log.LogFileName(fileName)
log.SetLogFolder(log_output_folder)
# ends
# Source version check.
versionCheck = ProgramCheckAndUpdate()
log.Message('Checking for updates..', logger.Logger.const_general_text)
verMessage = versionCheck.run(solutionLib_path)
if (verMessage is not None):
if verMessage is True:
log.Message('Installed version is the latest version', logger.Logger.const_general_text)
else:
if verMessage is not 'Ignore':
log.Message(verMessage, logger.Logger.const_warning_text)
# ends
if (os.path.isfile(config) == False):
log.Message('Input config file is not specified/not found! ({})'.format(config), logger.Logger.const_critical_text)
log.Message(base.CCMD_STATUS_FAILED, logger.Logger.const_status_text) # set (failed) status
log.WriteLog('#all')
return False
if (artdem != ''):
(base.m_art_ws, base.m_art_ds) = os.path.split(artdem)
base.m_art_apply_changes = True
comInfo = {
'AR': {'cb': postAddData} # assign a callback function to run custom user code when adding rasters.
}
if (com == ''):
com = base.const_cmd_default_text
solutions = solutionsLib.Solutions(base)
results = solutions.run(config, com, comInfo)
log.Message("Done...", log.const_general_text)
log.WriteLog('#all') # persist information/errors collected.
return results
if __name__ == '__main__':
main(len(sys.argv), sys.argv)
| apache-2.0 | 2,259,711,835,154,969,600 | 37.484848 | 153 | 0.570641 | false |
zsx/hotwire | hotwire/sysdep/ipc_impl/ipc_dbus.py | 3 | 3872 | # This file is part of the Hotwire Shell project API.
# Copyright (C) 2007 Colin Walters <[email protected]>
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
# of the Software, and to permit persons to whom the Software is furnished to do so,
# subject to the following conditions:
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED,
# INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A
# PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE X CONSORTIUM BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
# TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR
# THE USE OR OTHER DEALINGS IN THE SOFTWARE.
import os,sys,platform,logging
import gtk, dbus, dbus.service
import hotwire.sysdep.ipc_impl.dbusutil as dbusutil
_logger = logging.getLogger("hotwire.sysdep.Ipc.DBus")
BUS_NAME = 'org.hotwireshell'
UI_OPATH = '/hotwire/ui'
UI_IFACE = BUS_NAME + '.Ui'
class Ui(dbus.service.Object):
def __init__(self, factory, bus_name):
super(Ui, self).__init__(bus_name, UI_OPATH)
self.__winfactory = factory
pass
@dbus.service.method(UI_IFACE,
in_signature="u")
def NewWindow(self, timestamp):
_logger.debug("Handling NewWindow method invocation (timestamp=%s)", timestamp)
newwin = self.__winfactory.create_window()
if timestamp > 0:
newwin.present_with_time(timestamp)
else:
newwin.present()
@dbus.service.method(UI_IFACE,
in_signature="usas")
def RunTty(self, timestamp, cwd, args):
self.__winfactory.run_tty(timestamp, cwd, args)
class IpcDBus(object):
def __init__(self):
self.__uiproxy = None
def singleton(self):
try:
_logger.debug("Requesting D-BUS name %s on session bus", BUS_NAME)
dbusutil.take_name(BUS_NAME, bus=dbus.SessionBus())
except dbusutil.DBusNameExistsException, e:
return True
return False
def register_window(self, win):
_logger.debug("Registering window object %s", win)
bus_name = dbus.service.BusName(BUS_NAME, bus=dbus.SessionBus())
self.__uiproxy = Ui(win.factory, bus_name)
def __parse_startup_id(self):
startup_time = None
try:
startup_id_env = os.environ['DESKTOP_STARTUP_ID']
except KeyError, e:
startup_id_env = None
if startup_id_env:
idx = startup_id_env.find('_TIME')
if idx > 0:
idx += 5
startup_time = int(startup_id_env[idx:])
return startup_time
def new_window(self):
inst = dbus.SessionBus().get_object(BUS_NAME, UI_OPATH)
inst_iface = dbus.Interface(inst, UI_IFACE)
_logger.debug("Sending RaiseNoTimestamp to existing instance")
try:
inst_iface.NewWindow(self.__parse_startup_id() or 0)
except dbus.DBusException, e:
_logger.error("Caught exception attempting to send RaiseNoTimestamp", exc_info=True)
def run_tty(self, cwd, *args):
inst = dbus.SessionBus().get_object(BUS_NAME, UI_OPATH)
inst_iface = dbus.Interface(inst, UI_IFACE)
inst.RunTty(self.__parse_startup_id() or 0, cwd, *args)
def getInstance():
return IpcDBus()
| gpl-2.0 | 2,806,544,742,445,595,000 | 37.72 | 96 | 0.64437 | false |
fbradyirl/home-assistant | homeassistant/components/tellstick/__init__.py | 2 | 10079 | """Support for Tellstick."""
import logging
import threading
import voluptuous as vol
from homeassistant.helpers import discovery
from homeassistant.core import callback
from homeassistant.const import EVENT_HOMEASSISTANT_STOP, CONF_HOST, CONF_PORT
from homeassistant.helpers.entity import Entity
import homeassistant.helpers.config_validation as cv
_LOGGER = logging.getLogger(__name__)
ATTR_DISCOVER_CONFIG = "config"
ATTR_DISCOVER_DEVICES = "devices"
CONF_SIGNAL_REPETITIONS = "signal_repetitions"
DEFAULT_SIGNAL_REPETITIONS = 1
DOMAIN = "tellstick"
DATA_TELLSTICK = "tellstick_device"
SIGNAL_TELLCORE_CALLBACK = "tellstick_callback"
# Use a global tellstick domain lock to avoid getting Tellcore errors when
# calling concurrently.
TELLSTICK_LOCK = threading.RLock()
# A TellstickRegistry that keeps a map from tellcore_id to the corresponding
# tellcore_device and HA device (entity).
TELLCORE_REGISTRY = None
CONFIG_SCHEMA = vol.Schema(
{
DOMAIN: vol.Schema(
{
vol.Inclusive(CONF_HOST, "tellcore-net"): cv.string,
vol.Inclusive(CONF_PORT, "tellcore-net"): vol.All(
cv.ensure_list, [cv.port], vol.Length(min=2, max=2)
),
vol.Optional(
CONF_SIGNAL_REPETITIONS, default=DEFAULT_SIGNAL_REPETITIONS
): vol.Coerce(int),
}
)
},
extra=vol.ALLOW_EXTRA,
)
def _discover(hass, config, component_name, found_tellcore_devices):
"""Set up and send the discovery event."""
if not found_tellcore_devices:
return
_LOGGER.info(
"Discovered %d new %s devices", len(found_tellcore_devices), component_name
)
signal_repetitions = config[DOMAIN].get(CONF_SIGNAL_REPETITIONS)
discovery.load_platform(
hass,
component_name,
DOMAIN,
{
ATTR_DISCOVER_DEVICES: found_tellcore_devices,
ATTR_DISCOVER_CONFIG: signal_repetitions,
},
config,
)
def setup(hass, config):
"""Set up the Tellstick component."""
from tellcore.constants import TELLSTICK_DIM, TELLSTICK_UP
from tellcore.telldus import AsyncioCallbackDispatcher
from tellcore.telldus import TelldusCore
from tellcorenet import TellCoreClient
conf = config.get(DOMAIN, {})
net_host = conf.get(CONF_HOST)
net_ports = conf.get(CONF_PORT)
# Initialize remote tellcore client
if net_host:
net_client = TellCoreClient(
host=net_host, port_client=net_ports[0], port_events=net_ports[1]
)
net_client.start()
def stop_tellcore_net(event):
"""Event handler to stop the client."""
net_client.stop()
hass.bus.listen_once(EVENT_HOMEASSISTANT_STOP, stop_tellcore_net)
try:
tellcore_lib = TelldusCore(
callback_dispatcher=AsyncioCallbackDispatcher(hass.loop)
)
except OSError:
_LOGGER.exception("Could not initialize Tellstick")
return False
# Get all devices, switches and lights alike
tellcore_devices = tellcore_lib.devices()
# Register devices
hass.data[DATA_TELLSTICK] = {device.id: device for device in tellcore_devices}
# Discover the lights
_discover(
hass,
config,
"light",
[device.id for device in tellcore_devices if device.methods(TELLSTICK_DIM)],
)
# Discover the cover
_discover(
hass,
config,
"cover",
[device.id for device in tellcore_devices if device.methods(TELLSTICK_UP)],
)
# Discover the switches
_discover(
hass,
config,
"switch",
[
device.id
for device in tellcore_devices
if (not device.methods(TELLSTICK_UP) and not device.methods(TELLSTICK_DIM))
],
)
@callback
def async_handle_callback(tellcore_id, tellcore_command, tellcore_data, cid):
"""Handle the actual callback from Tellcore."""
hass.helpers.dispatcher.async_dispatcher_send(
SIGNAL_TELLCORE_CALLBACK, tellcore_id, tellcore_command, tellcore_data
)
# Register callback
callback_id = tellcore_lib.register_device_event(async_handle_callback)
def clean_up_callback(event):
"""Unregister the callback bindings."""
if callback_id is not None:
tellcore_lib.unregister_callback(callback_id)
hass.bus.listen_once(EVENT_HOMEASSISTANT_STOP, clean_up_callback)
return True
class TellstickDevice(Entity):
"""Representation of a Tellstick device.
Contains the common logic for all Tellstick devices.
"""
def __init__(self, tellcore_device, signal_repetitions):
"""Init the Tellstick device."""
self._signal_repetitions = signal_repetitions
self._state = None
self._requested_state = None
self._requested_data = None
self._repeats_left = 0
# Look up our corresponding tellcore device
self._tellcore_device = tellcore_device
self._name = tellcore_device.name
async def async_added_to_hass(self):
"""Register callbacks."""
self.hass.helpers.dispatcher.async_dispatcher_connect(
SIGNAL_TELLCORE_CALLBACK, self.update_from_callback
)
@property
def should_poll(self):
"""Tell Home Assistant not to poll this device."""
return False
@property
def assumed_state(self):
"""Tellstick devices are always assumed state."""
return True
@property
def name(self):
"""Return the name of the device as reported by tellcore."""
return self._name
@property
def is_on(self):
"""Return true if the device is on."""
return self._state
def _parse_ha_data(self, kwargs):
"""Turn the value from HA into something useful."""
raise NotImplementedError
def _parse_tellcore_data(self, tellcore_data):
"""Turn the value received from tellcore into something useful."""
raise NotImplementedError
def _update_model(self, new_state, data):
"""Update the device entity state to match the arguments."""
raise NotImplementedError
def _send_device_command(self, requested_state, requested_data):
"""Let tellcore update the actual device to the requested state."""
raise NotImplementedError
def _send_repeated_command(self):
"""Send a tellstick command once and decrease the repeat count."""
from tellcore.library import TelldusError
with TELLSTICK_LOCK:
if self._repeats_left > 0:
self._repeats_left -= 1
try:
self._send_device_command(
self._requested_state, self._requested_data
)
except TelldusError as err:
_LOGGER.error(err)
def _change_device_state(self, new_state, data):
"""Turn on or off the device."""
with TELLSTICK_LOCK:
# Set the requested state and number of repeats before calling
# _send_repeated_command the first time. Subsequent calls will be
# made from the callback. (We don't want to queue a lot of commands
# in case the user toggles the switch the other way before the
# queue is fully processed.)
self._requested_state = new_state
self._requested_data = data
self._repeats_left = self._signal_repetitions
self._send_repeated_command()
# Sooner or later this will propagate to the model from the
# callback, but for a fluid UI experience update it directly.
self._update_model(new_state, data)
self.schedule_update_ha_state()
def turn_on(self, **kwargs):
"""Turn the switch on."""
self._change_device_state(True, self._parse_ha_data(kwargs))
def turn_off(self, **kwargs):
"""Turn the switch off."""
self._change_device_state(False, None)
def _update_model_from_command(self, tellcore_command, tellcore_data):
"""Update the model, from a sent tellcore command and data."""
from tellcore.constants import (
TELLSTICK_TURNON,
TELLSTICK_TURNOFF,
TELLSTICK_DIM,
)
if tellcore_command not in [TELLSTICK_TURNON, TELLSTICK_TURNOFF, TELLSTICK_DIM]:
_LOGGER.debug("Unhandled tellstick command: %d", tellcore_command)
return
self._update_model(
tellcore_command != TELLSTICK_TURNOFF,
self._parse_tellcore_data(tellcore_data),
)
def update_from_callback(self, tellcore_id, tellcore_command, tellcore_data):
"""Handle updates from the tellcore callback."""
if tellcore_id != self._tellcore_device.id:
return
self._update_model_from_command(tellcore_command, tellcore_data)
self.schedule_update_ha_state()
# This is a benign race on _repeats_left -- it's checked with the lock
# in _send_repeated_command.
if self._repeats_left > 0:
self._send_repeated_command()
def _update_from_tellcore(self):
"""Read the current state of the device from the tellcore library."""
from tellcore.library import TelldusError
from tellcore.constants import (
TELLSTICK_TURNON,
TELLSTICK_TURNOFF,
TELLSTICK_DIM,
)
with TELLSTICK_LOCK:
try:
last_command = self._tellcore_device.last_sent_command(
TELLSTICK_TURNON | TELLSTICK_TURNOFF | TELLSTICK_DIM
)
last_data = self._tellcore_device.last_sent_value()
self._update_model_from_command(last_command, last_data)
except TelldusError as err:
_LOGGER.error(err)
def update(self):
"""Poll the current state of the device."""
self._update_from_tellcore()
| apache-2.0 | -284,460,317,920,635,900 | 31.40836 | 88 | 0.62278 | false |
umago/virtualbmc | virtualbmc/manager.py | 1 | 7545 | # Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import errno
import os
import sys
import shutil
import signal
import daemon
from six.moves import configparser
import exception
import log
from virtualbmc import VirtualBMC
import utils
import config as vbmc_config
LOG = log.get_logger()
# BMC status
RUNNING = 'running'
DOWN = 'down'
DEFAULT_SECTION = 'VirtualBMC'
CONF = vbmc_config.get_config()
class VirtualBMCManager(object):
def _parse_config(self, domain_name):
config_path = os.path.join(utils.CONFIG_PATH, domain_name, 'config')
if not os.path.exists(config_path):
raise exception.DomainNotFound(domain=domain_name)
config = configparser.ConfigParser()
config.read(config_path)
bmc = {}
for item in ('username', 'password', 'address', 'domain_name',
'libvirt_uri', 'libvirt_sasl_username',
'libvirt_sasl_password'):
try:
value = config.get(DEFAULT_SECTION, item)
except configparser.NoOptionError:
value = None
bmc[item] = value
# Port needs to be int
bmc['port'] = config.getint(DEFAULT_SECTION, 'port')
return bmc
def _show(self, domain_name):
running = False
try:
pidfile_path = os.path.join(utils.CONFIG_PATH, domain_name, 'pid')
with open(pidfile_path, 'r') as f:
pid = int(f.read())
running = utils.is_pid_running(pid)
except IOError:
pass
bmc_config = self._parse_config(domain_name)
bmc_config['status'] = RUNNING if running else DOWN
# mask the passwords if requested
if not CONF['default']['show_passwords']:
bmc_config = utils.mask_dict_password(bmc_config)
return bmc_config
def add(self, username, password, port, address, domain_name, libvirt_uri,
libvirt_sasl_username, libvirt_sasl_password):
# check libvirt's connection and if domain exist prior to adding it
utils.check_libvirt_connection_and_domain(
libvirt_uri, domain_name,
sasl_username=libvirt_sasl_username,
sasl_password=libvirt_sasl_password)
domain_path = os.path.join(utils.CONFIG_PATH, domain_name)
try:
os.makedirs(domain_path)
except OSError as e:
if e.errno == errno.EEXIST:
sys.exit('Domain %s already exist' % domain_name)
config_path = os.path.join(domain_path, 'config')
with open(config_path, 'w') as f:
config = configparser.ConfigParser()
config.add_section(DEFAULT_SECTION)
config.set(DEFAULT_SECTION, 'username', username)
config.set(DEFAULT_SECTION, 'password', password)
config.set(DEFAULT_SECTION, 'port', port)
config.set(DEFAULT_SECTION, 'address', address)
config.set(DEFAULT_SECTION, 'domain_name', domain_name)
config.set(DEFAULT_SECTION, 'libvirt_uri', libvirt_uri)
if libvirt_sasl_username and libvirt_sasl_password:
config.set(DEFAULT_SECTION, 'libvirt_sasl_username',
libvirt_sasl_username)
config.set(DEFAULT_SECTION, 'libvirt_sasl_password',
libvirt_sasl_password)
config.write(f)
def delete(self, domain_name):
domain_path = os.path.join(utils.CONFIG_PATH, domain_name)
if not os.path.exists(domain_path):
raise exception.DomainNotFound(domain=domain_name)
try:
self.stop(domain_name)
except exception.VirtualBMCError:
pass
shutil.rmtree(domain_path)
def start(self, domain_name):
domain_path = os.path.join(utils.CONFIG_PATH, domain_name)
if not os.path.exists(domain_path):
raise exception.DomainNotFound(domain=domain_name)
bmc_config = self._parse_config(domain_name)
# check libvirt's connection and domain prior to starting the BMC
utils.check_libvirt_connection_and_domain(
bmc_config['libvirt_uri'], domain_name,
sasl_username=bmc_config['libvirt_sasl_username'],
sasl_password=bmc_config['libvirt_sasl_password'])
# mask the passwords if requested
log_config = bmc_config.copy()
if not CONF['default']['show_passwords']:
log_config = utils.mask_dict_password(bmc_config)
LOG.debug('Starting a Virtual BMC for domain %(domain)s with the '
'following configuration options: %(config)s',
{'domain': domain_name,
'config': ' '.join(['%s="%s"' % (k, log_config[k])
for k in log_config])})
with daemon.DaemonContext(stderr=sys.stderr,
files_preserve=[LOG.handler.stream, ]):
# FIXME(lucasagomes): pyghmi start the sockets when the
# class is instantiated, therefore we need to create the object
# within the daemon context
try:
vbmc = VirtualBMC(**bmc_config)
except Exception as e:
msg = ('Error starting a Virtual BMC for domain %(domain)s. '
'Error: %(error)s' % {'domain': domain_name,
'error': e})
LOG.error(msg)
raise exception.VirtualBMCError(msg)
# Save the PID number
pidfile_path = os.path.join(domain_path, 'pid')
with open(pidfile_path, 'w') as f:
f.write(str(os.getpid()))
LOG.info('Virtual BMC for domain %s started', domain_name)
vbmc.listen()
def stop(sel, domain_name):
LOG.debug('Stopping Virtual BMC for domain %s', domain_name)
domain_path = os.path.join(utils.CONFIG_PATH, domain_name)
if not os.path.exists(domain_path):
raise exception.DomainNotFound(domain=domain_name)
pidfile_path = os.path.join(domain_path, 'pid')
pid = None
try:
with open(pidfile_path, 'r') as f:
pid = int(f.read())
except IOError:
raise exception.VirtualBMCError(
'Error stopping the domain %s: PID file not '
'found' % domain_name)
else:
os.remove(pidfile_path)
try:
os.kill(pid, signal.SIGKILL)
except OSError:
pass
def list(self):
bmcs = []
try:
for domain in os.listdir(utils.CONFIG_PATH):
if os.path.isdir(os.path.join(utils.CONFIG_PATH, domain)):
bmcs.append(self._show(domain))
except OSError as e:
if e.errno == errno.EEXIST:
return bmcs
return bmcs
def show(self, domain_name):
return self._show(domain_name)
| apache-2.0 | -1,971,991,840,386,615,600 | 34.093023 | 78 | 0.584626 | false |
tlakshman26/cinder-bug-fix-volume-conversion-full | cinder/tests/unit/objects/test_volume.py | 2 | 6211 | # Copyright 2015 SimpliVity Corp.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import mock
from cinder import context
from cinder import objects
from cinder.tests.unit import fake_volume
from cinder.tests.unit import objects as test_objects
class TestVolume(test_objects.BaseObjectsTestCase):
def setUp(self):
super(TestVolume, self).setUp()
# NOTE (e0ne): base tests contains original RequestContext from
# oslo_context. We change it to our RequestContext implementation
# to have 'elevated' method
self.context = context.RequestContext(self.user_id, self.project_id,
is_admin=False)
@staticmethod
def _compare(test, db, obj):
for field, value in db.items():
if not hasattr(obj, field):
continue
test.assertEqual(db[field], obj[field])
@mock.patch('cinder.db.volume_glance_metadata_get', return_value={})
@mock.patch('cinder.db.volume_get')
def test_get_by_id(self, volume_get, volume_glance_metadata_get):
db_volume = fake_volume.fake_db_volume()
volume_get.return_value = db_volume
volume = objects.Volume.get_by_id(self.context, 1)
self._compare(self, db_volume, volume)
@mock.patch('cinder.db.volume_create')
def test_create(self, volume_create):
db_volume = fake_volume.fake_db_volume()
volume_create.return_value = db_volume
volume = objects.Volume(context=self.context)
volume.create()
self.assertEqual(db_volume['id'], volume.id)
@mock.patch('cinder.db.volume_metadata_get', return_value={})
@mock.patch('cinder.db.volume_get')
def test_refresh(self, volume_get, volume_metadata_get):
db_volume = fake_volume.fake_db_volume()
volume_get.return_value = db_volume
volume = objects.Volume.get_by_id(self.context, '1')
volume.refresh()
volume_get.assert_has_calls([mock.call(self.context, '1'),
mock.call(self.context, '1')])
self._compare(self, db_volume, volume)
@mock.patch('cinder.db.volume_update')
def test_save(self, volume_update):
db_volume = fake_volume.fake_db_volume()
volume = objects.Volume._from_db_object(self.context,
objects.Volume(), db_volume)
volume.display_name = 'foobar'
volume.save()
volume_update.assert_called_once_with(self.context, volume.id,
{'display_name': 'foobar'})
@mock.patch('cinder.db.volume_destroy')
def test_destroy(self, volume_destroy):
db_volume = fake_volume.fake_db_volume()
volume = objects.Volume._from_db_object(self.context,
objects.Volume(), db_volume)
volume.destroy()
self.assertTrue(volume_destroy.called)
admin_context = volume_destroy.call_args[0][0]
self.assertTrue(admin_context.is_admin)
def test_obj_fields(self):
volume = objects.Volume(context=self.context, id=2, _name_id=2)
self.assertEqual(['name', 'name_id'], volume.obj_extra_fields)
self.assertEqual('volume-2', volume.name)
self.assertEqual('2', volume.name_id)
def test_obj_field_previous_status(self):
volume = objects.Volume(context=self.context,
previous_status='backing-up')
self.assertEqual('backing-up', volume.previous_status)
class TestVolumeList(test_objects.BaseObjectsTestCase):
@mock.patch('cinder.db.volume_glance_metadata_get', return_value={})
@mock.patch('cinder.db.volume_get_all')
def test_get_all(self, volume_get_all, volume_glance_metadata_get):
db_volume = fake_volume.fake_db_volume()
volume_get_all.return_value = [db_volume]
volumes = objects.VolumeList.get_all(self.context,
mock.sentinel.marker,
mock.sentinel.limit,
mock.sentinel.sort_key,
mock.sentinel.sort_dir)
self.assertEqual(1, len(volumes))
TestVolume._compare(self, db_volume, volumes[0])
@mock.patch('cinder.db.volume_get_all_by_host')
def test_get_by_host(self, get_all_by_host):
db_volume = fake_volume.fake_db_volume()
get_all_by_host.return_value = [db_volume]
volumes = objects.VolumeList.get_all_by_host(
self.context, 'fake-host')
self.assertEqual(1, len(volumes))
TestVolume._compare(self, db_volume, volumes[0])
@mock.patch('cinder.db.volume_get_all_by_group')
def test_get_by_group(self, get_all_by_group):
db_volume = fake_volume.fake_db_volume()
get_all_by_group.return_value = [db_volume]
volumes = objects.VolumeList.get_all_by_group(
self.context, 'fake-host')
self.assertEqual(1, len(volumes))
TestVolume._compare(self, db_volume, volumes[0])
@mock.patch('cinder.db.volume_get_all_by_project')
def test_get_by_project(self, get_all_by_project):
db_volume = fake_volume.fake_db_volume()
get_all_by_project.return_value = [db_volume]
volumes = objects.VolumeList.get_all_by_project(
self.context, mock.sentinel.project_id, mock.sentinel.marker,
mock.sentinel.limit, mock.sentinel.sorted_keys,
mock.sentinel.sorted_dirs, mock.sentinel.filters)
self.assertEqual(1, len(volumes))
TestVolume._compare(self, db_volume, volumes[0])
| apache-2.0 | 5,775,216,041,093,497,000 | 42.131944 | 78 | 0.619385 | false |
EricMuller/mynotes-backend | requirements/twisted/Twisted-17.1.0/src/twisted/web/_stan.py | 15 | 10790 | # -*- test-case-name: twisted.web.test.test_stan -*-
# Copyright (c) Twisted Matrix Laboratories.
# See LICENSE for details.
"""
An s-expression-like syntax for expressing xml in pure python.
Stan tags allow you to build XML documents using Python.
Stan is a DOM, or Document Object Model, implemented using basic Python types
and functions called "flatteners". A flattener is a function that knows how to
turn an object of a specific type into something that is closer to an HTML
string. Stan differs from the W3C DOM by not being as cumbersome and heavy
weight. Since the object model is built using simple python types such as lists,
strings, and dictionaries, the API is simpler and constructing a DOM less
cumbersome.
@var voidElements: the names of HTML 'U{void
elements<http://www.whatwg.org/specs/web-apps/current-work/multipage/syntax.html#void-elements>}';
those which can't have contents and can therefore be self-closing in the
output.
"""
from __future__ import absolute_import, division
from twisted.python.compat import iteritems
class slot(object):
"""
Marker for markup insertion in a template.
@type name: C{str}
@ivar name: The name of this slot. The key which must be used in
L{Tag.fillSlots} to fill it.
@type children: C{list}
@ivar children: The L{Tag} objects included in this L{slot}'s template.
@type default: anything flattenable, or L{None}
@ivar default: The default contents of this slot, if it is left unfilled.
If this is L{None}, an L{UnfilledSlot} will be raised, rather than
L{None} actually being used.
@type filename: C{str} or L{None}
@ivar filename: The name of the XML file from which this tag was parsed.
If it was not parsed from an XML file, L{None}.
@type lineNumber: C{int} or L{None}
@ivar lineNumber: The line number on which this tag was encountered in the
XML file from which it was parsed. If it was not parsed from an XML
file, L{None}.
@type columnNumber: C{int} or L{None}
@ivar columnNumber: The column number at which this tag was encountered in
the XML file from which it was parsed. If it was not parsed from an
XML file, L{None}.
"""
def __init__(self, name, default=None, filename=None, lineNumber=None,
columnNumber=None):
self.name = name
self.children = []
self.default = default
self.filename = filename
self.lineNumber = lineNumber
self.columnNumber = columnNumber
def __repr__(self):
return "slot(%r)" % (self.name,)
class Tag(object):
"""
A L{Tag} represents an XML tags with a tag name, attributes, and children.
A L{Tag} can be constructed using the special L{twisted.web.template.tags}
object, or it may be constructed directly with a tag name. L{Tag}s have a
special method, C{__call__}, which makes representing trees of XML natural
using pure python syntax.
@ivar tagName: The name of the represented element. For a tag like
C{<div></div>}, this would be C{"div"}.
@type tagName: C{str}
@ivar attributes: The attributes of the element.
@type attributes: C{dict} mapping C{str} to renderable objects.
@ivar children: The child L{Tag}s of this C{Tag}.
@type children: C{list} of renderable objects.
@ivar render: The name of the render method to use for this L{Tag}. This
name will be looked up at render time by the
L{twisted.web.template.Element} doing the rendering, via
L{twisted.web.template.Element.lookupRenderMethod}, to determine which
method to call.
@type render: C{str}
@type filename: C{str} or L{None}
@ivar filename: The name of the XML file from which this tag was parsed.
If it was not parsed from an XML file, L{None}.
@type lineNumber: C{int} or L{None}
@ivar lineNumber: The line number on which this tag was encountered in the
XML file from which it was parsed. If it was not parsed from an XML
file, L{None}.
@type columnNumber: C{int} or L{None}
@ivar columnNumber: The column number at which this tag was encountered in
the XML file from which it was parsed. If it was not parsed from an
XML file, L{None}.
@type slotData: C{dict} or L{None}
@ivar slotData: The data which can fill slots. If present, a dictionary
mapping slot names to renderable values. The values in this dict might
be anything that can be present as the child of a L{Tag}; strings,
lists, L{Tag}s, generators, etc.
"""
slotData = None
filename = None
lineNumber = None
columnNumber = None
def __init__(self, tagName, attributes=None, children=None, render=None,
filename=None, lineNumber=None, columnNumber=None):
self.tagName = tagName
self.render = render
if attributes is None:
self.attributes = {}
else:
self.attributes = attributes
if children is None:
self.children = []
else:
self.children = children
if filename is not None:
self.filename = filename
if lineNumber is not None:
self.lineNumber = lineNumber
if columnNumber is not None:
self.columnNumber = columnNumber
def fillSlots(self, **slots):
"""
Remember the slots provided at this position in the DOM.
During the rendering of children of this node, slots with names in
C{slots} will be rendered as their corresponding values.
@return: C{self}. This enables the idiom C{return tag.fillSlots(...)} in
renderers.
"""
if self.slotData is None:
self.slotData = {}
self.slotData.update(slots)
return self
def __call__(self, *children, **kw):
"""
Add children and change attributes on this tag.
This is implemented using __call__ because it then allows the natural
syntax::
table(tr1, tr2, width="100%", height="50%", border="1")
Children may be other tag instances, strings, functions, or any other
object which has a registered flatten.
Attributes may be 'transparent' tag instances (so that
C{a(href=transparent(data="foo", render=myhrefrenderer))} works),
strings, functions, or any other object which has a registered
flattener.
If the attribute is a python keyword, such as 'class', you can add an
underscore to the name, like 'class_'.
There is one special keyword argument, 'render', which will be used as
the name of the renderer and saved as the 'render' attribute of this
instance, rather than the DOM 'render' attribute in the attributes
dictionary.
"""
self.children.extend(children)
for k, v in iteritems(kw):
if k[-1] == '_':
k = k[:-1]
if k == 'render':
self.render = v
else:
self.attributes[k] = v
return self
def _clone(self, obj, deep):
"""
Clone an arbitrary object; used by L{Tag.clone}.
@param obj: an object with a clone method, a list or tuple, or something
which should be immutable.
@param deep: whether to continue cloning child objects; i.e. the
contents of lists, the sub-tags within a tag.
@return: a clone of C{obj}.
"""
if hasattr(obj, 'clone'):
return obj.clone(deep)
elif isinstance(obj, (list, tuple)):
return [self._clone(x, deep) for x in obj]
else:
return obj
def clone(self, deep=True):
"""
Return a clone of this tag. If deep is True, clone all of this tag's
children. Otherwise, just shallow copy the children list without copying
the children themselves.
"""
if deep:
newchildren = [self._clone(x, True) for x in self.children]
else:
newchildren = self.children[:]
newattrs = self.attributes.copy()
for key in newattrs.keys():
newattrs[key] = self._clone(newattrs[key], True)
newslotdata = None
if self.slotData:
newslotdata = self.slotData.copy()
for key in newslotdata:
newslotdata[key] = self._clone(newslotdata[key], True)
newtag = Tag(
self.tagName,
attributes=newattrs,
children=newchildren,
render=self.render,
filename=self.filename,
lineNumber=self.lineNumber,
columnNumber=self.columnNumber)
newtag.slotData = newslotdata
return newtag
def clear(self):
"""
Clear any existing children from this tag.
"""
self.children = []
return self
def __repr__(self):
rstr = ''
if self.attributes:
rstr += ', attributes=%r' % self.attributes
if self.children:
rstr += ', children=%r' % self.children
return "Tag(%r%s)" % (self.tagName, rstr)
voidElements = ('img', 'br', 'hr', 'base', 'meta', 'link', 'param', 'area',
'input', 'col', 'basefont', 'isindex', 'frame', 'command',
'embed', 'keygen', 'source', 'track', 'wbs')
class CDATA(object):
"""
A C{<![CDATA[]]>} block from a template. Given a separate representation in
the DOM so that they may be round-tripped through rendering without losing
information.
@ivar data: The data between "C{<![CDATA[}" and "C{]]>}".
@type data: C{unicode}
"""
def __init__(self, data):
self.data = data
def __repr__(self):
return 'CDATA(%r)' % (self.data,)
class Comment(object):
"""
A C{<!-- -->} comment from a template. Given a separate representation in
the DOM so that they may be round-tripped through rendering without losing
information.
@ivar data: The data between "C{<!--}" and "C{-->}".
@type data: C{unicode}
"""
def __init__(self, data):
self.data = data
def __repr__(self):
return 'Comment(%r)' % (self.data,)
class CharRef(object):
"""
A numeric character reference. Given a separate representation in the DOM
so that non-ASCII characters may be output as pure ASCII.
@ivar ordinal: The ordinal value of the unicode character to which this is
object refers.
@type ordinal: C{int}
@since: 12.0
"""
def __init__(self, ordinal):
self.ordinal = ordinal
def __repr__(self):
return "CharRef(%d)" % (self.ordinal,)
| mit | 7,378,153,803,653,518,000 | 31.69697 | 102 | 0.618072 | false |
Netuitive/netuitive-diamond | src/collectors/eventstoreprojections/tests/testeventstoreprojections.py | 13 | 5013 | #!/usr/bin/python
# coding=utf-8
##########################################################################
from test import CollectorTestCase
from test import get_collector_config
from test import unittest
from mock import call, patch
from diamond.collector import Collector
from eventstoreprojections import EventstoreProjectionsCollector
##########################################################################
class TestEventstoreProjectionsCollector(CollectorTestCase):
def setUp(self):
config = get_collector_config('EventstoreProjectionsCollector', {})
self.collector = EventstoreProjectionsCollector(config, None)
def test_import(self):
self.assertTrue(EventstoreProjectionsCollector)
@patch('urllib2.urlopen')
@patch.object(Collector, 'publish')
def test_should_work_with_real_data(self, publish_mock, urlopen_mock):
returns = [self.getFixture('projections')]
urlopen_mock.side_effect = lambda *args: returns.pop(0)
self.collector.collect()
metrics = {
'projections.all-reports.eventsProcessedAfterRestart': 88,
'projections.all-reports.bufferedEvents': 0,
'projections.all-reports.coreProcessingTime': 46,
'projections.all-reports.epoch': -1,
'projections.all-reports.version': 1,
'projections.all-reports.progress': 100.0,
'projections.all-reports.status': 1,
'projections.all-reports.writePendingEventsBeforeCheckpoint': 0,
'projections.all-reports.partitionsCached': 1,
'projections.all-reports.writesInProgress': 0,
'projections.all-reports.readsInProgress': 0,
'projections.all-reports.writePendingEventsAfterCheckpoint': 0,
'projections._by_event_type.eventsProcessedAfterRestart': 0,
'projections._by_event_type.bufferedEvents': 0,
'projections._by_event_type.coreProcessingTime': 0,
'projections._by_event_type.epoch': -1,
'projections._by_event_type.version': 0,
'projections._by_event_type.progress': -1.0,
'projections._by_event_type.status': 0,
'projections._by_event_type.writePendingEventsBeforeCheckpoint': 0,
'projections._by_event_type.partitionsCached': 1,
'projections._by_event_type.writesInProgress': 0,
'projections._by_event_type.readsInProgress': 0,
'projections._by_event_type.writePendingEventsAfterCheckpoint': 0,
'projections._by_category.eventsProcessedAfterRestart': 886,
'projections._by_category.bufferedEvents': 0,
'projections._by_category.coreProcessingTime': 10,
'projections._by_category.epoch': -1,
'projections._by_category.version': 1,
'projections._by_category.progress': 100.0,
'projections._by_category.status': 1,
'projections._by_category.writePendingEventsBeforeCheckpoint': 0,
'projections._by_category.partitionsCached': 1,
'projections._by_category.writesInProgress': 0,
'projections._by_category.readsInProgress': 0,
'projections._by_category.writePendingEventsAfterCheckpoint': 0,
'projections._stream_by_cat.eventsProcessedAfterRestart': 0,
'projections._stream_by_cat.bufferedEvents': 0,
'projections._stream_by_cat.coreProcessingTime': 0,
'projections._stream_by_cat.epoch': -1,
'projections._stream_by_cat.version': 0,
'projections._stream_by_cat.progress': -1.0,
'projections._stream_by_cat.status': 0,
'projections._stream_by_cat.writePendingEventsBeforeCheckpoint': 0,
'projections._stream_by_cat.partitionsCached': 1,
'projections._stream_by_cat.writesInProgress': 0,
'projections._stream_by_cat.readsInProgress': 0,
'projections._stream_by_cat.writePendingEventsAfterCheckpoint': 0,
'projections._streams.eventsProcessedAfterRestart': 0,
'projections._streams.bufferedEvents': 0,
'projections._streams.coreProcessingTime': 0,
'projections._streams.epoch': -1,
'projections._streams.version': 0,
'projections._streams.progress': -1.0,
'projections._streams.status': 0,
'projections._streams.writePendingEventsBeforeCheckpoint': 0,
'projections._streams.partitionsCached': 1,
'projections._streams.writesInProgress': 0,
'projections._streams.readsInProgress': 0,
'projections._streams.writePendingEventsAfterCheckpoint': 0,
}
self.setDocExample(collector=self.collector.__class__.__name__,
metrics=metrics)
self.assertPublishedMany(publish_mock, metrics)
##########################################################################
if __name__ == "__main__":
unittest.main()
| mit | -489,803,046,913,590,850 | 44.990826 | 79 | 0.619589 | false |
david415/txtorcon | test/test_circuit.py | 1 | 16068 | import datetime
import time
from twisted.trial import unittest
from twisted.internet import defer, task
from twisted.python.failure import Failure
from zope.interface import implements
from mock import patch
from txtorcon import Circuit
from txtorcon import build_timeout_circuit
from txtorcon import Stream
from txtorcon import TorControlProtocol
from txtorcon import TorState
from txtorcon import Router
from txtorcon.router import hexIdFromHash
from txtorcon.interface import IRouterContainer
from txtorcon.interface import ICircuitListener
from txtorcon.interface import ICircuitContainer
from txtorcon.interface import CircuitListenerMixin
from txtorcon.interface import ITorControlProtocol
from mock import Mock
class FakeTorController(object):
implements(IRouterContainer, ICircuitListener, ICircuitContainer, ITorControlProtocol)
post_bootstrap = defer.Deferred()
queue_command = Mock()
def __init__(self):
self.routers = {}
self.circuits = {}
self.extend = []
self.failed = []
def router_from_id(self, i):
return self.routers[i[:41]]
def circuit_new(self, circuit):
self.circuits[circuit.id] = circuit
def circuit_extend(self, circuit, router):
self.extend.append((circuit, router))
def circuit_launched(self, circuit):
pass
def circuit_built(self, circuit):
pass
def circuit_closed(self, circuit, **kw):
if circuit.id in self.circuits:
del self.circuits[circuit.id]
def circuit_failed(self, circuit, **kw):
self.failed.append((circuit, kw))
if circuit.id in self.circuits:
del self.circuits[circuit.id]
def find_circuit(self, circid):
return self.circuits[circid]
def close_circuit(self, circid):
del self.circuits[circid]
return defer.succeed('')
class FakeLocation:
def __init__(self):
self.countrycode = 'NA'
class FakeRouter:
def __init__(self, hsh, nm):
self.name = nm
self.id_hash = hsh
self.id_hex = hexIdFromHash(self.id_hash)
self.location = FakeLocation()
examples = ['CIRC 365 LAUNCHED PURPOSE=GENERAL',
'CIRC 365 EXTENDED $E11D2B2269CC25E67CA6C9FB5843497539A74FD0=eris PURPOSE=GENERAL',
'CIRC 365 EXTENDED $E11D2B2269CC25E67CA6C9FB5843497539A74FD0=eris,$50DD343021E509EB3A5A7FD0D8A4F8364AFBDCB5=venus PURPOSE=GENERAL',
'CIRC 365 EXTENDED $E11D2B2269CC25E67CA6C9FB5843497539A74FD0=eris,$50DD343021E509EB3A5A7FD0D8A4F8364AFBDCB5=venus,$253DFF1838A2B7782BE7735F74E50090D46CA1BC=chomsky PURPOSE=GENERAL',
'CIRC 365 BUILT $E11D2B2269CC25E67CA6C9FB5843497539A74FD0=eris,$50DD343021E509EB3A5A7FD0D8A4F8364AFBDCB5=venus,$253DFF1838A2B7782BE7735F74E50090D46CA1BC=chomsky PURPOSE=GENERAL',
'CIRC 365 CLOSED $E11D2B2269CC25E67CA6C9FB5843497539A74FD0=eris,$50DD343021E509EB3A5A7FD0D8A4F8364AFBDCB5=venus,$253DFF1838A2B7782BE7735F74E50090D46CA1BC=chomsky PURPOSE=GENERAL REASON=FINISHED',
'CIRC 365 FAILED $E11D2B2269CC25E67CA6C9FB5843497539A74FD0=eris,$50DD343021E509EB3A5A7FD0D8A4F8364AFBDCB5=venus,$253DFF1838A2B7782BE7735F74E50090D46CA1BC=chomsky PURPOSE=GENERAL REASON=TIMEOUT']
class CircuitTests(unittest.TestCase):
def test_age(self):
"""
make sure age does something sensible at least once.
"""
tor = FakeTorController()
circuit = Circuit(tor)
now = datetime.datetime.now()
update = '1 LAUNCHED PURPOSE=GENERAL TIME_CREATED=%s' % now.strftime('%Y-%m-%dT%H:%M:%S')
circuit.update(update.split())
diff = circuit.age(now=now)
self.assertEquals(diff, 0)
self.assertTrue(circuit.time_created is not None)
@patch('txtorcon.circuit.datetime')
def test_age_default(self, fake_datetime):
"""
age() w/ defaults works properly
"""
from datetime import datetime
now = datetime.fromtimestamp(60.0)
fake_datetime.return_value = now
fake_datetime.utcnow = Mock(return_value=now)
tor = FakeTorController()
circuit = Circuit(tor)
circuit._time_created = datetime.fromtimestamp(0.0)
self.assertEquals(circuit.age(), 60)
self.assertTrue(circuit.time_created is not None)
def test_no_age_yet(self):
"""
make sure age doesn't explode if there's no TIME_CREATED flag.
"""
tor = FakeTorController()
circuit = Circuit(tor)
now = datetime.datetime.now()
circuit.update('1 LAUNCHED PURPOSE=GENERAL'.split())
self.assertTrue(circuit.time_created is None)
diff = circuit.age(now=now)
self.assertEquals(diff, None)
def test_listener_mixin(self):
listener = CircuitListenerMixin()
from zope.interface.verify import verifyObject
self.assertTrue(verifyObject(ICircuitListener, listener))
# call all the methods with None for each arg. This is mostly
# just to gratuitously increase test coverage, but also
# serves to ensure these methods don't just blow up
for (methodname, desc) in ICircuitListener.namesAndDescriptions():
method = getattr(listener, methodname)
args = [None] * len(desc.positional)
method(*args)
def test_unlisten(self):
tor = FakeTorController()
tor.routers['$E11D2B2269CC25E67CA6C9FB5843497539A74FD0'] = FakeRouter(
'$E11D2B2269CC25E67CA6C9FB5843497539A74FD0', 'a'
)
circuit = Circuit(tor)
circuit.listen(tor)
circuit.listen(tor)
circuit.update('1 LAUNCHED PURPOSE=GENERAL'.split())
circuit.unlisten(tor)
circuit.update('1 EXTENDED $E11D2B2269CC25E67CA6C9FB5843497539A74FD0=eris PURPOSE=GENERAL'.split())
self.assertEqual(len(tor.circuits), 1)
self.assertTrue(1 in tor.circuits)
self.assertEqual(len(tor.extend), 0)
self.assertEqual(1, len(circuit.path))
self.assertEqual(0, len(circuit.listeners))
def test_path_update(self):
cp = TorControlProtocol()
state = TorState(cp, False)
circuit = Circuit(state)
circuit.update('1 EXTENDED $E11D2B2269CC25E67CA6C9FB5843497539A74FD0=eris PURPOSE=GENERAL'.split())
self.assertEqual(1, len(circuit.path))
self.assertEqual(
'$E11D2B2269CC25E67CA6C9FB5843497539A74FD0',
circuit.path[0].id_hex
)
self.assertEqual('eris', circuit.path[0].name)
def test_wrong_update(self):
tor = FakeTorController()
circuit = Circuit(tor)
circuit.listen(tor)
circuit.update('1 LAUNCHED PURPOSE=GENERAL'.split())
self.assertRaises(
Exception,
circuit.update,
'2 LAUNCHED PURPOSE=GENERAL'.split()
)
def test_closed_remaining_streams(self):
tor = FakeTorController()
circuit = Circuit(tor)
circuit.listen(tor)
circuit.update('1 LAUNCHED PURPOSE=GENERAL'.split())
stream = Stream(tor)
stream.update("1 NEW 0 94.23.164.42.$43ED8310EB968746970896E8835C2F1991E50B69.exit:9001 SOURCE_ADDR=(Tor_internal):0 PURPOSE=DIR_FETCH".split())
circuit.streams.append(stream)
self.assertEqual(len(circuit.streams), 1)
circuit.update('1 CLOSED $E11D2B2269CC25E67CA6C9FB5843497539A74FD0=eris,$50DD343021E509EB3A5A7FD0D8A4F8364AFBDCB5=venus,$253DFF1838A2B7782BE7735F74E50090D46CA1BC=chomsky PURPOSE=GENERAL REASON=FINISHED'.split())
circuit.update('1 FAILED $E11D2B2269CC25E67CA6C9FB5843497539A74FD0=eris,$50DD343021E509EB3A5A7FD0D8A4F8364AFBDCB5=venus,$253DFF1838A2B7782BE7735F74E50090D46CA1BC=chomsky PURPOSE=GENERAL REASON=TIMEOUT'.split())
errs = self.flushLoggedErrors()
self.assertEqual(len(errs), 2)
def test_updates(self):
tor = FakeTorController()
circuit = Circuit(tor)
circuit.listen(tor)
tor.routers['$E11D2B2269CC25E67CA6C9FB5843497539A74FD0'] = FakeRouter(
'$E11D2B2269CC25E67CA6C9FB5843497539A74FD0', 'a'
)
tor.routers['$50DD343021E509EB3A5A7FD0D8A4F8364AFBDCB5'] = FakeRouter(
'$50DD343021E509EB3A5A7FD0D8A4F8364AFBDCB5', 'b'
)
tor.routers['$253DFF1838A2B7782BE7735F74E50090D46CA1BC'] = FakeRouter(
'$253DFF1838A2B7782BE7735F74E50090D46CA1BC', 'c'
)
for ex in examples[:-1]:
circuit.update(ex.split()[1:])
self.assertEqual(circuit.state, ex.split()[2])
self.assertEqual(circuit.purpose, 'GENERAL')
if '$' in ex:
self.assertEqual(
len(circuit.path),
len(ex.split()[3].split(','))
)
for (r, p) in zip(ex.split()[3].split(','), circuit.path):
d = r.split('=')[0]
self.assertEqual(d, p.id_hash)
def test_extend_messages(self):
tor = FakeTorController()
a = FakeRouter('$E11D2B2269CC25E67CA6C9FB5843497539A74FD0', 'a')
b = FakeRouter('$50DD343021E509EB3A5A7FD0D8A4F8364AFBDCB5', 'b')
c = FakeRouter('$253DFF1838A2B7782BE7735F74E50090D46CA1BC', 'c')
tor.routers['$E11D2B2269CC25E67CA6C9FB5843497539A74FD0'] = a
tor.routers['$50DD343021E509EB3A5A7FD0D8A4F8364AFBDCB5'] = b
tor.routers['$253DFF1838A2B7782BE7735F74E50090D46CA1BC'] = c
circuit = Circuit(tor)
circuit.listen(tor)
circuit.update('365 LAUNCHED PURPOSE=GENERAL'.split())
self.assertEqual(tor.extend, [])
circuit.update('365 EXTENDED $E11D2B2269CC25E67CA6C9FB5843497539A74FD0=eris PURPOSE=GENERAL'.split())
self.assertEqual(len(tor.extend), 1)
self.assertEqual(tor.extend[0], (circuit, a))
circuit.update('365 EXTENDED $E11D2B2269CC25E67CA6C9FB5843497539A74FD0=eris,$50DD343021E509EB3A5A7FD0D8A4F8364AFBDCB5=venus PURPOSE=GENERAL'.split())
self.assertEqual(len(tor.extend), 2)
self.assertEqual(tor.extend[0], (circuit, a))
self.assertEqual(tor.extend[1], (circuit, b))
circuit.update('365 EXTENDED $E11D2B2269CC25E67CA6C9FB5843497539A74FD0=eris,$50DD343021E509EB3A5A7FD0D8A4F8364AFBDCB5=venus,$253DFF1838A2B7782BE7735F74E50090D46CA1BC=chomsky PURPOSE=GENERAL'.split())
self.assertEqual(len(tor.extend), 3)
self.assertEqual(tor.extend[0], (circuit, a))
self.assertEqual(tor.extend[1], (circuit, b))
self.assertEqual(tor.extend[2], (circuit, c))
def test_extends_no_path(self):
'''
without connectivity, it seems you get EXTENDS messages with no
path update.
'''
tor = FakeTorController()
circuit = Circuit(tor)
circuit.listen(tor)
circuit.update('753 EXTENDED BUILD_FLAGS=IS_INTERNAL,NEED_CAPACITY,NEED_UPTIME PURPOSE=MEASURE_TIMEOUT TIME_CREATED=2012-07-30T18:23:18.956704'.split())
self.assertEqual(tor.extend, [])
self.assertEqual(circuit.path, [])
self.assertTrue('IS_INTERNAL' in circuit.build_flags)
self.assertTrue('NEED_CAPACITY' in circuit.build_flags)
self.assertTrue('NEED_UPTIME' in circuit.build_flags)
def test_str(self):
tor = FakeTorController()
circuit = Circuit(tor)
circuit.id = 1
str(circuit)
router = Router(tor)
circuit.path.append(router)
str(circuit)
def test_failed_reason(self):
tor = FakeTorController()
circuit = Circuit(tor)
circuit.listen(tor)
circuit.update('1 FAILED $E11D2B2269CC25E67CA6C9FB5843497539A74FD0=eris PURPOSE=GENERAL REASON=TIMEOUT'.split())
self.assertEqual(len(tor.failed), 1)
circ, kw = tor.failed[0]
self.assertEqual(circ, circuit)
self.assertTrue('PURPOSE' in kw)
self.assertTrue('REASON' in kw)
self.assertEqual(kw['PURPOSE'], 'GENERAL')
self.assertEqual(kw['REASON'], 'TIMEOUT')
def test_close_circuit(self):
tor = FakeTorController()
a = FakeRouter('$E11D2B2269CC25E67CA6C9FB5843497539A74FD0', 'a')
b = FakeRouter('$50DD343021E509EB3A5A7FD0D8A4F8364AFBDCB5', 'b')
c = FakeRouter('$253DFF1838A2B7782BE7735F74E50090D46CA1BC', 'c')
tor.routers['$E11D2B2269CC25E67CA6C9FB5843497539A74FD0'] = a
tor.routers['$50DD343021E509EB3A5A7FD0D8A4F8364AFBDCB5'] = b
tor.routers['$253DFF1838A2B7782BE7735F74E50090D46CA1BC'] = c
circuit = Circuit(tor)
circuit.listen(tor)
circuit.update('123 EXTENDED $E11D2B2269CC25E67CA6C9FB5843497539A74FD0=eris,$50DD343021E509EB3A5A7FD0D8A4F8364AFBDCB5=venus,$253DFF1838A2B7782BE7735F74E50090D46CA1BC=chomsky PURPOSE=GENERAL'.split())
self.assertEqual(3, len(circuit.path))
d = circuit.close()
# we already pretended that Tor answered "OK" to the
# CLOSECIRCUIT call (see close_circuit() in FakeTorController
# above) however the circuit isn't "really" closed yet...
self.assertTrue(not d.called)
# not unit-test-y? shouldn't probably delve into internals I
# suppose...
self.assertTrue(circuit._closing_deferred is not None)
# simulate that Tor has really closed the circuit for us
# this should cause our Deferred to callback
circuit.update('123 CLOSED $E11D2B2269CC25E67CA6C9FB5843497539A74FD0=eris,$50DD343021E509EB3A5A7FD0D8A4F8364AFBDCB5=venus,$253DFF1838A2B7782BE7735F74E50090D46CA1BC=chomsky PURPOSE=GENERAL REASON=FINISHED'.split())
# confirm that our circuit callback has been triggered already
self.assertRaises(
defer.AlreadyCalledError,
d.callback,
"should have been called already"
)
return d
def test_is_built(self):
tor = FakeTorController()
a = FakeRouter('$E11D2B2269CC25E67CA6C9FB5843497539A74FD0', 'a')
b = FakeRouter('$50DD343021E509EB3A5A7FD0D8A4F8364AFBDCB5', 'b')
c = FakeRouter('$253DFF1838A2B7782BE7735F74E50090D46CA1BC', 'c')
tor.routers['$E11D2B2269CC25E67CA6C9FB5843497539A74FD0'] = a
tor.routers['$50DD343021E509EB3A5A7FD0D8A4F8364AFBDCB5'] = b
tor.routers['$253DFF1838A2B7782BE7735F74E50090D46CA1BC'] = c
circuit = Circuit(tor)
circuit.listen(tor)
circuit.update('123 EXTENDED $E11D2B2269CC25E67CA6C9FB5843497539A74FD0=eris,$50DD343021E509EB3A5A7FD0D8A4F8364AFBDCB5=venus,$253DFF1838A2B7782BE7735F74E50090D46CA1BC=chomsky PURPOSE=GENERAL'.split())
built0 = circuit.is_built
built1 = circuit.when_built()
self.assertTrue(built0 is not built1)
self.assertFalse(built0.called)
self.assertFalse(built1.called)
circuit.update('123 BUILT $E11D2B2269CC25E67CA6C9FB5843497539A74FD0=eris,$50DD343021E509EB3A5A7FD0D8A4F8364AFBDCB5=venus,$253DFF1838A2B7782BE7735F74E50090D46CA1BC=chomsky PURPOSE=GENERAL'.split())
# create callback when we're alread in BUILT; should be
# callback'd already
built2 = circuit.when_built()
self.assertTrue(built2 is not built1)
self.assertTrue(built2 is not built0)
self.assertTrue(built0.called)
self.assertTrue(built1.called)
self.assertTrue(built2.called)
self.assertTrue(built0.result == circuit)
self.assertTrue(built1.result == circuit)
self.assertTrue(built2.result == circuit)
def test_is_built_errback(self):
tor = FakeTorController()
a = FakeRouter('$E11D2B2269CC25E67CA6C9FB5843497539A74FD0', 'a')
tor.routers['$E11D2B2269CC25E67CA6C9FB5843497539A74FD0'] = a
state = TorState(tor)
circuit = Circuit(tor)
circuit.listen(tor)
circuit.update('123 EXTENDED $E11D2B2269CC25E67CA6C9FB5843497539A74FD0=eris PURPOSE=GENERAL'.split())
state.circuit_new(circuit)
d = circuit.when_built()
state.circuit_closed(circuit)
self.assertTrue(d.called)
self.assertTrue(isinstance(d.result, Failure))
| mit | -2,256,933,492,965,641,200 | 40.412371 | 221 | 0.679861 | false |
blckshrk/Weboob | modules/marmiton/backend.py | 1 | 2009 | # -*- coding: utf-8 -*-
# Copyright(C) 2013 Julien Veyssier
#
# This file is part of weboob.
#
# weboob is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# weboob is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with weboob. If not, see <http://www.gnu.org/licenses/>.
from weboob.capabilities.recipe import ICapRecipe, Recipe
from weboob.tools.backend import BaseBackend
from .browser import MarmitonBrowser
from urllib import quote_plus
__all__ = ['MarmitonBackend']
class MarmitonBackend(BaseBackend, ICapRecipe):
NAME = 'marmiton'
MAINTAINER = u'Julien Veyssier'
EMAIL = '[email protected]'
VERSION = '0.h'
DESCRIPTION = u'Marmiton French recipe website'
LICENSE = 'AGPLv3+'
BROWSER = MarmitonBrowser
def get_recipe(self, id):
return self.browser.get_recipe(id)
def iter_recipes(self, pattern):
return self.browser.iter_recipes(quote_plus(pattern.encode('utf-8')))
def fill_recipe(self, recipe, fields):
if 'nb_person' in fields or 'instructions' in fields:
rec = self.get_recipe(recipe.id)
recipe.picture_url = rec.picture_url
recipe.instructions = rec.instructions
recipe.ingredients = rec.ingredients
recipe.comments = rec.comments
recipe.author = rec.author
recipe.nb_person = rec.nb_person
recipe.cooking_time = rec.cooking_time
recipe.preparation_time = rec.preparation_time
return recipe
OBJECTS = {
Recipe: fill_recipe,
}
| agpl-3.0 | -5,020,013,221,003,372,000 | 31.934426 | 77 | 0.689895 | false |
ASCIT/donut-python | donut/modules/rooms/helpers.py | 2 | 3956 | from datetime import datetime, timedelta
from itertools import groupby
import flask
import pymysql.cursors
from donut.auth_utils import get_user_id
def get_rooms():
"""Gets a list of rooms in the form {id, name, title, desc}"""
query = 'SELECT room_id, location, title, description FROM rooms'
with flask.g.pymysql_db.cursor() as cursor:
cursor.execute(query)
return cursor.fetchall()
def is_room(room_id_string):
query = "SELECT room_id FROM rooms WHERE room_id = %s"
with flask.g.pymysql_db.cursor() as cursor:
cursor.execute(query, [room_id_string])
return cursor.fetchone() is not None
def add_reservation(room, username, reason, start, end):
insertion = """
INSERT INTO room_reservations
(room_id, user_id, reason, start_time, end_time)
VALUES (%s, %s, %s, %s, %s)
"""
with flask.g.pymysql_db.cursor() as cursor:
cursor.execute(insertion,
[room, get_user_id(username), reason, start, end])
def get_all_reservations(rooms, start, end):
query = """
SELECT reservation_id, location, start_time, end_time
FROM room_reservations NATURAL JOIN rooms
WHERE %s <= end_time AND start_time <= %s
"""
if rooms:
query += " AND room_id IN (" + ",".join("%s" for room in rooms) + ")"
query += " ORDER BY start_time"
with flask.g.pymysql_db.cursor() as cursor:
values = [start, end + timedelta(days=1)]
values.extend(rooms)
cursor.execute(query, values)
reservations = cursor.fetchall()
return [
{
"day": day,
"reservations": list(day_rooms)
}
for day, day_rooms in groupby(
reservations, lambda reservation: reservation["start_time"].date())
]
def split(lst, pred):
switch_index = len(lst)
for index, item in enumerate(lst):
if not pred(item):
switch_index = index
break
return [lst[:switch_index], lst[switch_index:]]
def get_my_reservations(username):
query = """
SELECT reservation_id, location, start_time, end_time
FROM room_reservations AS reservation
NATURAL JOIN rooms
NATURAL JOIN users
WHERE username = %s
ORDER BY start_time
"""
with flask.g.pymysql_db.cursor() as cursor:
cursor.execute(query, [username])
reservations = cursor.fetchall()
now = datetime.now()
past, upcoming = split(reservations, lambda res: res["start_time"] < now)
return {
"past": past[::-1], #show most recent past first
"upcoming": upcoming
}
def get_reservation(id):
query = """
SELECT location, title, full_name, start_time, end_time, reason, username
FROM room_reservations AS reservation
NATURAL JOIN rooms
NATURAL JOIN members_full_name
NATURAL JOIN users
WHERE reservation_id = %s
"""
with flask.g.pymysql_db.cursor() as cursor:
cursor.execute(query, [id])
return cursor.fetchone()
def delete_reservation(id, username):
if username is None:
raise "Not logged in"
query = """
DELETE FROM room_reservations
WHERE reservation_id = %s
AND user_id IN (
SELECT user_id FROM users WHERE username = %s
)
"""
with flask.g.pymysql_db.cursor() as cursor:
cursor.execute(query, [id, username])
def conflicts(room, start, end):
"""Returns a list of overlapping [start_time, end_time] tuples"""
query = """
SELECT start_time, end_time FROM room_reservations
WHERE room_id = %s AND %s < end_time AND start_time < %s
ORDER BY start_time
"""
with flask.g.pymysql_db.cursor() as cursor:
cursor.execute(query, [room, start, end])
results = cursor.fetchall()
return [(r['start_time'], r['end_time']) for r in results]
| mit | 8,378,764,097,430,169,000 | 29.666667 | 81 | 0.602882 | false |
lmazuel/azure-sdk-for-python | azure-mgmt-documentdb/setup.py | 4 | 2800 | #!/usr/bin/env python
#-------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#--------------------------------------------------------------------------
import re
import os.path
from io import open
from setuptools import find_packages, setup
try:
from azure_bdist_wheel import cmdclass
except ImportError:
from distutils import log as logger
logger.warn("Wheel is not available, disabling bdist_wheel hook")
cmdclass = {}
# Change the PACKAGE_NAME only to change folder and different name
PACKAGE_NAME = "azure-mgmt-documentdb"
PACKAGE_PPRINT_NAME = "DocumentDB Management"
# a-b-c => a/b/c
package_folder_path = PACKAGE_NAME.replace('-', '/')
# a-b-c => a.b.c
namespace_name = PACKAGE_NAME.replace('-', '.')
# azure v0.x is not compatible with this package
# azure v0.x used to have a __version__ attribute (newer versions don't)
try:
import azure
try:
ver = azure.__version__
raise Exception(
'This package is incompatible with azure=={}. '.format(ver) +
'Uninstall it with "pip uninstall azure".'
)
except AttributeError:
pass
except ImportError:
pass
# Version extraction inspired from 'requests'
with open(os.path.join(package_folder_path, 'version.py'), 'r') as fd:
version = re.search(r'^VERSION\s*=\s*[\'"]([^\'"]*)[\'"]',
fd.read(), re.MULTILINE).group(1)
if not version:
raise RuntimeError('Cannot find version information')
with open('README.rst', encoding='utf-8') as f:
readme = f.read()
with open('HISTORY.rst', encoding='utf-8') as f:
history = f.read()
setup(
name=PACKAGE_NAME,
version=version,
description='Microsoft Azure {} Client Library for Python'.format(PACKAGE_PPRINT_NAME),
long_description=readme + '\n\n' + history,
license='MIT License',
author='Microsoft Corporation',
author_email='[email protected]',
url='https://github.com/Azure/azure-sdk-for-python',
classifiers=[
'Development Status :: 4 - Beta',
'Programming Language :: Python',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'License :: OSI Approved :: MIT License',
],
zip_safe=False,
packages=find_packages(),
install_requires=[
'msrestazure~=0.4.7',
'azure-common~=1.1.5',
],
cmdclass=cmdclass
)
| mit | -7,216,384,342,864,207,000 | 31.941176 | 91 | 0.606786 | false |
zzicewind/oslo.log | oslo_log/formatters.py | 3 | 9527 | # Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import itertools
import logging
import logging.config
import logging.handlers
import sys
import traceback
import six
from six import moves
from oslo_context import context as context_utils
from oslo_serialization import jsonutils
def _dictify_context(context):
if context is None:
return {}
if not isinstance(context, dict) and getattr(context, 'to_dict', None):
context = context.to_dict()
return context
# A configuration object is given to us when the application registers
# the logging options.
_CONF = None
def _store_global_conf(conf):
global _CONF
_CONF = conf
def _update_record_with_context(record):
"""Given a log record, update it with context information.
The request context, if there is one, will either be in the
extra values for the incoming record or in the global
thread-local store.
"""
context = record.__dict__.get(
'context',
context_utils.get_current()
)
d = _dictify_context(context)
# Copy the context values directly onto the record so they can be
# used by the formatting strings.
for k, v in d.items():
setattr(record, k, v)
return context
class JSONFormatter(logging.Formatter):
def __init__(self, fmt=None, datefmt=None):
# NOTE(jkoelker) we ignore the fmt argument, but its still there
# since logging.config.fileConfig passes it.
self.datefmt = datefmt
def formatException(self, ei, strip_newlines=True):
lines = traceback.format_exception(*ei)
if strip_newlines:
lines = [moves.filter(
lambda x: x,
line.rstrip().splitlines()) for line in lines]
lines = list(itertools.chain(*lines))
return lines
def format(self, record):
message = {'message': record.getMessage(),
'asctime': self.formatTime(record, self.datefmt),
'name': record.name,
'msg': record.msg,
'args': record.args,
'levelname': record.levelname,
'levelno': record.levelno,
'pathname': record.pathname,
'filename': record.filename,
'module': record.module,
'lineno': record.lineno,
'funcname': record.funcName,
'created': record.created,
'msecs': record.msecs,
'relative_created': record.relativeCreated,
'thread': record.thread,
'thread_name': record.threadName,
'process_name': record.processName,
'process': record.process,
'traceback': None}
# Build the extra values that were given to us, including
# the context.
context = _update_record_with_context(record)
if hasattr(record, 'extra'):
extra = record.extra.copy()
else:
extra = {}
for key in getattr(record, 'extra_keys', []):
if key not in extra:
extra[key] = getattr(record, key)
# If we saved a context object, explode it into the extra
# dictionary because the values are more useful than the
# object reference.
if 'context' in extra:
extra.update(_dictify_context(context))
del extra['context']
message['extra'] = extra
if record.exc_info:
message['traceback'] = self.formatException(record.exc_info)
return jsonutils.dumps(message)
class ContextFormatter(logging.Formatter):
"""A context.RequestContext aware formatter configured through flags.
The flags used to set format strings are: logging_context_format_string
and logging_default_format_string. You can also specify
logging_debug_format_suffix to append extra formatting if the log level is
debug.
For information about what variables are available for the formatter see:
http://docs.python.org/library/logging.html#formatter
If available, uses the context value stored in TLS - local.store.context
"""
def __init__(self, *args, **kwargs):
"""Initialize ContextFormatter instance
Takes additional keyword arguments which can be used in the message
format string.
:keyword project: project name
:type project: string
:keyword version: project version
:type version: string
"""
self.project = kwargs.pop('project', 'unknown')
self.version = kwargs.pop('version', 'unknown')
self.conf = kwargs.pop('config', _CONF)
logging.Formatter.__init__(self, *args, **kwargs)
def format(self, record):
"""Uses contextstring if request_id is set, otherwise default."""
# NOTE(jecarey): If msg is not unicode, coerce it into unicode
# before it can get to the python logging and
# possibly cause string encoding trouble
if not isinstance(record.msg, six.text_type):
record.msg = six.text_type(record.msg)
# store project info
record.project = self.project
record.version = self.version
# FIXME(dims): We need a better way to pick up the instance
# or instance_uuid parameters from the kwargs from say
# LOG.info or LOG.warn
instance_extra = ''
instance = getattr(record, 'instance', None)
instance_uuid = getattr(record, 'instance_uuid', None)
context = _update_record_with_context(record)
if instance:
try:
instance_extra = (self.conf.instance_format
% instance)
except TypeError:
instance_extra = instance
elif instance_uuid:
instance_extra = (self.conf.instance_uuid_format
% {'uuid': instance_uuid})
elif context:
# FIXME(dhellmann): We should replace these nova-isms with
# more generic handling in the Context class. See the
# app-agnostic-logging-parameters blueprint.
instance = getattr(context, 'instance', None)
instance_uuid = getattr(context, 'instance_uuid', None)
# resource_uuid was introduced in oslo_context's
# RequestContext
resource_uuid = getattr(context, 'resource_uuid', None)
if instance:
instance_extra = (self.conf.instance_format
% {'uuid': instance})
elif instance_uuid:
instance_extra = (self.conf.instance_uuid_format
% {'uuid': instance_uuid})
elif resource_uuid:
instance_extra = (self.conf.instance_uuid_format
% {'uuid': resource_uuid})
record.instance = instance_extra
# NOTE(sdague): default the fancier formatting params
# to an empty string so we don't throw an exception if
# they get used
for key in ('instance', 'color', 'user_identity', 'resource',
'user_name', 'project_name'):
if key not in record.__dict__:
record.__dict__[key] = ''
if record.__dict__.get('request_id'):
fmt = self.conf.logging_context_format_string
else:
fmt = self.conf.logging_default_format_string
if (record.levelno == logging.DEBUG and
self.conf.logging_debug_format_suffix):
fmt += " " + self.conf.logging_debug_format_suffix
if sys.version_info < (3, 2):
self._fmt = fmt
else:
self._style = logging.PercentStyle(fmt)
self._fmt = self._style._fmt
# Cache this on the record, Logger will respect our formatted copy
if record.exc_info:
record.exc_text = self.formatException(record.exc_info, record)
return logging.Formatter.format(self, record)
def formatException(self, exc_info, record=None):
"""Format exception output with CONF.logging_exception_prefix."""
if not record:
return logging.Formatter.formatException(self, exc_info)
stringbuffer = moves.StringIO()
traceback.print_exception(exc_info[0], exc_info[1], exc_info[2],
None, stringbuffer)
lines = stringbuffer.getvalue().split('\n')
stringbuffer.close()
if self.conf.logging_exception_prefix.find('%(asctime)') != -1:
record.asctime = self.formatTime(record, self.datefmt)
formatted_lines = []
for line in lines:
pl = self.conf.logging_exception_prefix % record.__dict__
fl = '%s%s' % (pl, line)
formatted_lines.append(fl)
return '\n'.join(formatted_lines)
| apache-2.0 | -2,249,657,567,246,892,000 | 36.070039 | 78 | 0.593786 | false |
ripples-alive/Crawler | diyifanwen/diyifanwen/settings.py | 2 | 3239 | # -*- coding: utf-8 -*-
# Scrapy settings for diyifanwen project
#
# For simplicity, this file contains only settings considered important or
# commonly used. You can find more settings consulting the documentation:
#
# http://doc.scrapy.org/en/latest/topics/settings.html
# http://scrapy.readthedocs.org/en/latest/topics/downloader-middleware.html
# http://scrapy.readthedocs.org/en/latest/topics/spider-middleware.html
BOT_NAME = 'diyifanwen'
SPIDER_MODULES = ['diyifanwen.spiders']
NEWSPIDER_MODULE = 'diyifanwen.spiders'
# Crawl responsibly by identifying yourself (and your website) on the user-agent
USER_AGENT = 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_12_1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/54.0.2840.98 Safari/537.36'
# Obey robots.txt rules
ROBOTSTXT_OBEY = True
# Configure maximum concurrent requests performed by Scrapy (default: 16)
#CONCURRENT_REQUESTS = 32
# Configure a delay for requests for the same website (default: 0)
# See http://scrapy.readthedocs.org/en/latest/topics/settings.html#download-delay
# See also autothrottle settings and docs
#DOWNLOAD_DELAY = 3
# The download delay setting will honor only one of:
#CONCURRENT_REQUESTS_PER_DOMAIN = 16
#CONCURRENT_REQUESTS_PER_IP = 16
# Disable cookies (enabled by default)
#COOKIES_ENABLED = False
# Disable Telnet Console (enabled by default)
#TELNETCONSOLE_ENABLED = False
# Override the default request headers:
#DEFAULT_REQUEST_HEADERS = {
# 'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
# 'Accept-Language': 'en',
#}
# Enable or disable spider middlewares
# See http://scrapy.readthedocs.org/en/latest/topics/spider-middleware.html
#SPIDER_MIDDLEWARES = {
# 'diyifanwen.middlewares.MyCustomSpiderMiddleware': 543,
#}
# Enable or disable downloader middlewares
# See http://scrapy.readthedocs.org/en/latest/topics/downloader-middleware.html
#DOWNLOADER_MIDDLEWARES = {
# 'diyifanwen.middlewares.MyCustomDownloaderMiddleware': 543,
#}
# Enable or disable extensions
# See http://scrapy.readthedocs.org/en/latest/topics/extensions.html
#EXTENSIONS = {
# 'scrapy.extensions.telnet.TelnetConsole': None,
#}
# Configure item pipelines
# See http://scrapy.readthedocs.org/en/latest/topics/item-pipeline.html
#ITEM_PIPELINES = {
# 'diyifanwen.pipelines.SomePipeline': 300,
#}
# Enable and configure the AutoThrottle extension (disabled by default)
# See http://doc.scrapy.org/en/latest/topics/autothrottle.html
AUTOTHROTTLE_ENABLED = True
# The initial download delay
#AUTOTHROTTLE_START_DELAY = 5
# The maximum download delay to be set in case of high latencies
#AUTOTHROTTLE_MAX_DELAY = 60
# The average number of requests Scrapy should be sending in parallel to
# each remote server
AUTOTHROTTLE_TARGET_CONCURRENCY = 10.0
# Enable showing throttling stats for every response received:
#AUTOTHROTTLE_DEBUG = False
# Enable and configure HTTP caching (disabled by default)
# See http://scrapy.readthedocs.org/en/latest/topics/downloader-middleware.html#httpcache-middleware-settings
#HTTPCACHE_ENABLED = True
#HTTPCACHE_EXPIRATION_SECS = 0
#HTTPCACHE_DIR = 'httpcache'
#HTTPCACHE_IGNORE_HTTP_CODES = []
#HTTPCACHE_STORAGE = 'scrapy.extensions.httpcache.FilesystemCacheStorage'
| mit | -4,219,199,908,389,683,700 | 34.988889 | 135 | 0.767521 | false |
isandlaTech/cohorte-runtime | python/cohorte/composer/top/distributor.py | 2 | 2869 | #!/usr/bin/env python
# -- Content-Encoding: UTF-8 --
"""
Top Composer: Node Distributor
Clusters the components of a composition into groups according to several
criteria.
:author: Thomas Calmant
:license: Apache Software License 2.0
:version: 3.0.0
..
Copyright 2014 isandlaTech
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
# iPOPO Decorators
from pelix.ipopo.decorators import ComponentFactory, Requires, Provides, \
Instantiate
# Composer
import cohorte.composer
# ------------------------------------------------------------------------------
# Bundle version
import cohorte.version
__version__=cohorte.version.__version__
# ------------------------------------------------------------------------------
@ComponentFactory()
@Provides(cohorte.composer.SERVICE_DISTRIBUTOR_NODE)
@Requires('_distance_criteria',
cohorte.composer.SERVICE_TOP_CRITERION_DISTANCE, aggregate=True)
@Instantiate('cohorte-composer-top-distributor')
class NodeDistributor(object):
"""
Clusters components into groups. Each group corresponds to a node.
"""
def __init__(self):
"""
Sets up members
"""
# Distance criteria
self._distance_criteria = []
def distribute(self, composition):
"""
Computes the distribution of the components of the given composition
:param composition: A RawComposition bean
:return: A dictionary: Node name -> set(RawComponent)
"""
return self.redistribute(composition.all_components())
def redistribute(self, components):
"""
Computes the distribution of the given components
:param components: A list of RawComponent beans
:return: A dictionary: Node name -> set(RawComponent)
"""
groups = {}
not_grouped = list(components)
for criterion in self._distance_criteria[:]:
# Group components
grouped, not_grouped = criterion.group(not_grouped, groups)
# Update the distribution
for group, group_components in grouped.items():
groups.setdefault(group, set()).update(group_components)
if not_grouped:
# Some components have not been grouped: use the "undefined" group
groups.setdefault(None, set()).update(not_grouped)
return groups
| apache-2.0 | 1,590,243,616,781,579,000 | 29.849462 | 80 | 0.63855 | false |