repo_name
stringlengths 5
100
| path
stringlengths 4
375
| copies
stringclasses 991
values | size
stringlengths 4
7
| content
stringlengths 666
1M
| license
stringclasses 15
values |
---|---|---|---|---|---|
msmolens/VTK | ThirdParty/ZopeInterface/zope/interface/tests/test_interface.py | 30 | 72591 | ##############################################################################
#
# Copyright (c) 2001, 2002 Zope Foundation and Contributors.
# All Rights Reserved.
#
# This software is subject to the provisions of the Zope Public License,
# Version 2.1 (ZPL). A copy of the ZPL should accompany this distribution.
# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
# FOR A PARTICULAR PURPOSE.
#
##############################################################################
"""Test Interface implementation
"""
import unittest
_marker = object()
class _SilencePy3Deprecations(unittest.TestCase):
# silence deprecation warnings under py3
def failUnless(self, expr):
# St00pid speling.
return self.assertTrue(expr)
def failIf(self, expr):
# St00pid speling.
return self.assertFalse(expr)
class Test_invariant(unittest.TestCase):
def test_w_single(self):
from zope.interface.interface import invariant
from zope.interface.interface import TAGGED_DATA
def _check(*args, **kw):
pass
class Foo(object):
invariant(_check)
self.assertEqual(getattr(Foo, TAGGED_DATA, None),
{'invariants': [_check]})
def test_w_multiple(self):
from zope.interface.interface import invariant
from zope.interface.interface import TAGGED_DATA
def _check(*args, **kw):
pass
def _another_check(*args, **kw):
pass
class Foo(object):
invariant(_check)
invariant(_another_check)
self.assertEqual(getattr(Foo, TAGGED_DATA, None),
{'invariants': [_check, _another_check]})
class Test_taggedValue(unittest.TestCase):
def test_w_single(self):
from zope.interface.interface import taggedValue
from zope.interface.interface import TAGGED_DATA
class Foo(object):
taggedValue('bar', ['baz'])
self.assertEqual(getattr(Foo, TAGGED_DATA, None),
{'bar': ['baz']})
def test_w_multiple(self):
from zope.interface.interface import taggedValue
from zope.interface.interface import TAGGED_DATA
class Foo(object):
taggedValue('bar', ['baz'])
taggedValue('qux', 'spam')
self.assertEqual(getattr(Foo, TAGGED_DATA, None),
{'bar': ['baz'], 'qux': 'spam'})
def test_w_multiple_overwriting(self):
from zope.interface.interface import taggedValue
from zope.interface.interface import TAGGED_DATA
class Foo(object):
taggedValue('bar', ['baz'])
taggedValue('qux', 'spam')
taggedValue('bar', 'frob')
self.assertEqual(getattr(Foo, TAGGED_DATA, None),
{'bar': 'frob', 'qux': 'spam'})
class ElementTests(unittest.TestCase):
DEFAULT_NAME = 'AnElement'
def _getTargetClass(self):
from zope.interface.interface import Element
return Element
def _makeOne(self, name=None, __doc__=_marker):
if name is None:
name = self.DEFAULT_NAME
if __doc__ is _marker:
return self._getTargetClass()(name)
return self._getTargetClass()(name, __doc__)
def test_ctor_defaults(self):
element = self._makeOne()
self.assertEqual(element.__name__, self.DEFAULT_NAME)
self.assertEqual(element.getName(), self.DEFAULT_NAME)
self.assertEqual(element.__doc__, '')
self.assertEqual(element.getDoc(), '')
self.assertEqual(list(element.getTaggedValueTags()), [])
def test_ctor_no_doc_space_in_name(self):
element = self._makeOne('An Element')
self.assertEqual(element.__name__, None)
self.assertEqual(element.__doc__, 'An Element')
def test_getTaggedValue_miss(self):
element = self._makeOne()
self.assertRaises(KeyError, element.getTaggedValue, 'nonesuch')
def test_queryTaggedValue_miss(self):
element = self._makeOne()
self.assertEqual(element.queryTaggedValue('nonesuch'), None)
def test_queryTaggedValue_miss_w_default(self):
element = self._makeOne()
self.assertEqual(element.queryTaggedValue('nonesuch', 'bar'), 'bar')
def test_setTaggedValue(self):
element = self._makeOne()
element.setTaggedValue('foo', 'bar')
self.assertEqual(list(element.getTaggedValueTags()), ['foo'])
self.assertEqual(element.getTaggedValue('foo'), 'bar')
self.assertEqual(element.queryTaggedValue('foo'), 'bar')
class SpecificationBasePyTests(_SilencePy3Deprecations):
def _getTargetClass(self):
from zope.interface.interface import SpecificationBasePy
return SpecificationBasePy
def _makeOne(self):
return self._getTargetClass()()
def test_providedBy_miss(self):
from zope.interface import interface
from zope.interface.declarations import _empty
sb = self._makeOne()
def _providedBy(obj):
return _empty
with _Monkey(interface, providedBy=_providedBy):
self.failIf(sb.providedBy(object()))
def test_providedBy_hit(self):
from zope.interface import interface
sb = self._makeOne()
class _Decl(object):
_implied = {sb: {},}
def _providedBy(obj):
return _Decl()
with _Monkey(interface, providedBy=_providedBy):
self.failUnless(sb.providedBy(object()))
def test_implementedBy_miss(self):
from zope.interface import interface
from zope.interface.declarations import _empty
sb = self._makeOne()
def _implementedBy(obj):
return _empty
with _Monkey(interface, implementedBy=_implementedBy):
self.failIf(sb.implementedBy(object()))
def test_implementedBy_hit(self):
from zope.interface import interface
sb = self._makeOne()
class _Decl(object):
_implied = {sb: {},}
def _implementedBy(obj):
return _Decl()
with _Monkey(interface, implementedBy=_implementedBy):
self.failUnless(sb.implementedBy(object()))
def test_isOrExtends_miss(self):
sb = self._makeOne()
sb._implied = {} # not defined by SpecificationBasePy
self.failIf(sb.isOrExtends(object()))
def test_isOrExtends_hit(self):
sb = self._makeOne()
testing = object()
sb._implied = {testing: {}} # not defined by SpecificationBasePy
self.failUnless(sb(testing))
def test___call___miss(self):
sb = self._makeOne()
sb._implied = {} # not defined by SpecificationBasePy
self.failIf(sb.isOrExtends(object()))
def test___call___hit(self):
sb = self._makeOne()
testing = object()
sb._implied = {testing: {}} # not defined by SpecificationBasePy
self.failUnless(sb(testing))
class InterfaceBasePyTests(_SilencePy3Deprecations):
def _getTargetClass(self):
from zope.interface.interface import InterfaceBasePy
return InterfaceBasePy
def _makeOne(self, object_should_provide):
class IB(self._getTargetClass()):
def _call_conform(self, conform):
return conform(self)
def providedBy(self, obj):
return object_should_provide
return IB()
def test___call___w___conform___returning_value(self):
ib = self._makeOne(False)
conformed = object()
class _Adapted(object):
def __conform__(self, iface):
return conformed
self.failUnless(ib(_Adapted()) is conformed)
def test___call___w___conform___miss_ob_provides(self):
ib = self._makeOne(True)
class _Adapted(object):
def __conform__(self, iface):
return None
adapted = _Adapted()
self.failUnless(ib(adapted) is adapted)
def test___call___wo___conform___ob_no_provides_w_alternate(self):
ib = self._makeOne(False)
adapted = object()
alternate = object()
self.failUnless(ib(adapted, alternate) is alternate)
def test___call___w___conform___ob_no_provides_wo_alternate(self):
ib = self._makeOne(False)
adapted = object()
self.assertRaises(TypeError, ib, adapted)
def test___adapt___ob_provides(self):
ib = self._makeOne(True)
adapted = object()
self.failUnless(ib.__adapt__(adapted) is adapted)
def test___adapt___ob_no_provides_uses_hooks(self):
from zope.interface import interface
ib = self._makeOne(False)
adapted = object()
_missed = []
def _hook_miss(iface, obj):
_missed.append((iface, obj))
return None
def _hook_hit(iface, obj):
return obj
with _Monkey(interface, adapter_hooks=[_hook_miss, _hook_hit]):
self.failUnless(ib.__adapt__(adapted) is adapted)
self.assertEqual(_missed, [(ib, adapted)])
class SpecificationTests(_SilencePy3Deprecations):
def _getTargetClass(self):
from zope.interface.interface import Specification
return Specification
def _makeOne(self, bases=_marker):
if bases is _marker:
return self._getTargetClass()()
return self._getTargetClass()(bases)
def test_ctor(self):
from zope.interface.interface import Interface
spec = self._makeOne()
self.assertEqual(spec.__bases__, ())
self.assertEqual(len(spec._implied), 2)
self.failUnless(spec in spec._implied)
self.failUnless(Interface in spec._implied)
self.assertEqual(len(spec.dependents), 0)
def test_subscribe_first_time(self):
spec = self._makeOne()
dep = DummyDependent()
spec.subscribe(dep)
self.assertEqual(len(spec.dependents), 1)
self.assertEqual(spec.dependents[dep], 1)
def test_subscribe_again(self):
spec = self._makeOne()
dep = DummyDependent()
spec.subscribe(dep)
spec.subscribe(dep)
self.assertEqual(spec.dependents[dep], 2)
def test_unsubscribe_miss(self):
spec = self._makeOne()
dep = DummyDependent()
self.assertRaises(KeyError, spec.unsubscribe, dep)
def test_unsubscribe(self):
spec = self._makeOne()
dep = DummyDependent()
spec.subscribe(dep)
spec.subscribe(dep)
spec.unsubscribe(dep)
self.assertEqual(spec.dependents[dep], 1)
spec.unsubscribe(dep)
self.failIf(dep in spec.dependents)
def test___setBases_subscribes_bases_and_notifies_dependents(self):
from zope.interface.interface import Interface
spec = self._makeOne()
dep = DummyDependent()
spec.subscribe(dep)
class I(Interface):
pass
class J(Interface):
pass
spec.__bases__ = (I,)
self.assertEqual(dep._changed, [spec])
self.assertEqual(I.dependents[spec], 1)
spec.__bases__ = (J,)
self.assertEqual(I.dependents.get(spec), None)
self.assertEqual(J.dependents[spec], 1)
def test_changed_clears_volatiles_and_implied(self):
from zope.interface.interface import Interface
class I(Interface):
pass
spec = self._makeOne()
spec._v_attrs = 'Foo'
spec._implied[I] = ()
spec.changed(spec)
self.failUnless(getattr(spec, '_v_attrs', self) is self)
self.failIf(I in spec._implied)
class InterfaceClassTests(_SilencePy3Deprecations):
def _getTargetClass(self):
from zope.interface.interface import InterfaceClass
return InterfaceClass
def _makeOne(self, name='ITest', bases=(), attrs=None, __doc__=None,
__module__=None):
return self._getTargetClass()(name, bases, attrs, __doc__, __module__)
def test_ctor_defaults(self):
klass = self._getTargetClass()
inst = klass('ITesting')
self.assertEqual(inst.__name__, 'ITesting')
self.assertEqual(inst.__doc__, '')
self.assertEqual(inst.__bases__, ())
self.assertEqual(inst.getBases(), ())
def test_ctor_bad_bases(self):
klass = self._getTargetClass()
self.assertRaises(TypeError, klass, 'ITesting', (object(),))
def test_ctor_w_attrs_attrib_methods(self):
from zope.interface.interface import Attribute
from zope.interface.interface import fromFunction
def _bar():
"""DOCSTRING"""
ATTRS = {'foo': Attribute('Foo', ''),
'bar': fromFunction(_bar),
}
klass = self._getTargetClass()
inst = klass('ITesting', attrs=ATTRS)
self.assertEqual(inst.__name__, 'ITesting')
self.assertEqual(inst.__doc__, '')
self.assertEqual(inst.__bases__, ())
self.assertEqual(inst.names(), ATTRS.keys())
def test_ctor_attrs_w___locals__(self):
ATTRS = {'__locals__': {}}
klass = self._getTargetClass()
inst = klass('ITesting', attrs=ATTRS)
self.assertEqual(inst.__name__, 'ITesting')
self.assertEqual(inst.__doc__, '')
self.assertEqual(inst.__bases__, ())
self.assertEqual(inst.names(), ATTRS.keys())
def test_ctor_attrs_w__decorator_non_return(self):
from zope.interface.interface import _decorator_non_return
ATTRS = {'dropme': _decorator_non_return}
klass = self._getTargetClass()
inst = klass('ITesting', attrs=ATTRS)
self.assertEqual(inst.__name__, 'ITesting')
self.assertEqual(inst.__doc__, '')
self.assertEqual(inst.__bases__, ())
self.assertEqual(list(inst.names()), [])
def test_ctor_attrs_w_invalide_attr_type(self):
from zope.interface.exceptions import InvalidInterface
ATTRS = {'invalid': object()}
klass = self._getTargetClass()
self.assertRaises(InvalidInterface, klass, 'ITesting', attrs=ATTRS)
def test_interfaces(self):
iface = self._makeOne()
self.assertEqual(list(iface.interfaces()), [iface])
def test_getBases(self):
iface = self._makeOne()
sub = self._makeOne('ISub', bases=(iface,))
self.assertEqual(sub.getBases(), (iface,))
def test_isEqualOrExtendedBy_identity(self):
iface = self._makeOne()
self.failUnless(iface.isEqualOrExtendedBy(iface))
def test_isEqualOrExtendedBy_subiface(self):
iface = self._makeOne()
sub = self._makeOne('ISub', bases=(iface,))
self.failUnless(iface.isEqualOrExtendedBy(sub))
self.failIf(sub.isEqualOrExtendedBy(iface))
def test_isEqualOrExtendedBy_unrelated(self):
one = self._makeOne('One')
another = self._makeOne('Another')
self.failIf(one.isEqualOrExtendedBy(another))
self.failIf(another.isEqualOrExtendedBy(one))
def test_names_w_all_False_ignores_bases(self):
from zope.interface.interface import Attribute
from zope.interface.interface import fromFunction
def _bar():
"""DOCSTRING"""
BASE_ATTRS = {'foo': Attribute('Foo', ''),
'bar': fromFunction(_bar),
}
DERIVED_ATTRS = {'baz': Attribute('Baz', ''),
}
base = self._makeOne('IBase', attrs=BASE_ATTRS)
derived = self._makeOne('IDerived', bases=(base,), attrs=DERIVED_ATTRS)
self.assertEqual(sorted(derived.names(all=False)), ['baz'])
def test_names_w_all_True_no_bases(self):
from zope.interface.interface import Attribute
from zope.interface.interface import fromFunction
def _bar():
"""DOCSTRING"""
ATTRS = {'foo': Attribute('Foo', ''),
'bar': fromFunction(_bar),
}
one = self._makeOne(attrs=ATTRS)
self.assertEqual(sorted(one.names(all=True)), ['bar', 'foo'])
def test_names_w_all_True_w_bases_simple(self):
from zope.interface.interface import Attribute
from zope.interface.interface import fromFunction
def _bar():
"""DOCSTRING"""
BASE_ATTRS = {'foo': Attribute('Foo', ''),
'bar': fromFunction(_bar),
}
DERIVED_ATTRS = {'baz': Attribute('Baz', ''),
}
base = self._makeOne('IBase', attrs=BASE_ATTRS)
derived = self._makeOne('IDerived', bases=(base,), attrs=DERIVED_ATTRS)
self.assertEqual(sorted(derived.names(all=True)), ['bar', 'baz', 'foo'])
def test_names_w_all_True_bases_w_same_names(self):
from zope.interface.interface import Attribute
from zope.interface.interface import fromFunction
def _bar():
"""DOCSTRING"""
def _foo():
"""DOCSTRING"""
BASE_ATTRS = {'foo': Attribute('Foo', ''),
'bar': fromFunction(_bar),
}
DERIVED_ATTRS = {'foo': fromFunction(_foo),
'baz': Attribute('Baz', ''),
}
base = self._makeOne('IBase', attrs=BASE_ATTRS)
derived = self._makeOne('IDerived', bases=(base,), attrs=DERIVED_ATTRS)
self.assertEqual(sorted(derived.names(all=True)), ['bar', 'baz', 'foo'])
def test___iter__(self):
from zope.interface.interface import Attribute
from zope.interface.interface import fromFunction
def _bar():
"""DOCSTRING"""
def _foo():
"""DOCSTRING"""
BASE_ATTRS = {'foo': Attribute('Foo', ''),
'bar': fromFunction(_bar),
}
DERIVED_ATTRS = {'foo': fromFunction(_foo),
'baz': Attribute('Baz', ''),
}
base = self._makeOne('IBase', attrs=BASE_ATTRS)
derived = self._makeOne('IDerived', bases=(base,), attrs=DERIVED_ATTRS)
self.assertEqual(sorted(derived), ['bar', 'baz', 'foo'])
def test_namesAndDescriptions_w_all_False_ignores_bases(self):
from zope.interface.interface import Attribute
from zope.interface.interface import fromFunction
def _bar():
"""DOCSTRING"""
BASE_ATTRS = {'foo': Attribute('Foo', ''),
'bar': fromFunction(_bar),
}
DERIVED_ATTRS = {'baz': Attribute('Baz', ''),
}
base = self._makeOne('IBase', attrs=BASE_ATTRS)
derived = self._makeOne('IDerived', bases=(base,), attrs=DERIVED_ATTRS)
self.assertEqual(sorted(derived.namesAndDescriptions(all=False)),
[('baz', DERIVED_ATTRS['baz']),
])
def test_namesAndDescriptions_w_all_True_no_bases(self):
from zope.interface.interface import Attribute
from zope.interface.interface import fromFunction
def _bar():
"""DOCSTRING"""
ATTRS = {'foo': Attribute('Foo', ''),
'bar': fromFunction(_bar),
}
one = self._makeOne(attrs=ATTRS)
self.assertEqual(sorted(one.namesAndDescriptions(all=False)),
[('bar', ATTRS['bar']),
('foo', ATTRS['foo']),
])
def test_namesAndDescriptions_w_all_True_simple(self):
from zope.interface.interface import Attribute
from zope.interface.interface import fromFunction
def _bar():
"""DOCSTRING"""
BASE_ATTRS = {'foo': Attribute('Foo', ''),
'bar': fromFunction(_bar),
}
DERIVED_ATTRS = {'baz': Attribute('Baz', ''),
}
base = self._makeOne('IBase', attrs=BASE_ATTRS)
derived = self._makeOne('IDerived', bases=(base,), attrs=DERIVED_ATTRS)
self.assertEqual(sorted(derived.namesAndDescriptions(all=True)),
[('bar', BASE_ATTRS['bar']),
('baz', DERIVED_ATTRS['baz']),
('foo', BASE_ATTRS['foo']),
])
def test_namesAndDescriptions_w_all_True_bases_w_same_names(self):
from zope.interface.interface import Attribute
from zope.interface.interface import fromFunction
def _bar():
"""DOCSTRING"""
def _foo():
"""DOCSTRING"""
BASE_ATTRS = {'foo': Attribute('Foo', ''),
'bar': fromFunction(_bar),
}
DERIVED_ATTRS = {'foo': fromFunction(_foo),
'baz': Attribute('Baz', ''),
}
base = self._makeOne('IBase', attrs=BASE_ATTRS)
derived = self._makeOne('IDerived', bases=(base,), attrs=DERIVED_ATTRS)
self.assertEqual(sorted(derived.namesAndDescriptions(all=True)),
[('bar', BASE_ATTRS['bar']),
('baz', DERIVED_ATTRS['baz']),
('foo', DERIVED_ATTRS['foo']),
])
def test_getDescriptionFor_miss(self):
one = self._makeOne()
self.assertRaises(KeyError, one.getDescriptionFor, 'nonesuch')
def test_getDescriptionFor_hit(self):
from zope.interface.interface import Attribute
from zope.interface.interface import fromFunction
def _bar():
"""DOCSTRING"""
ATTRS = {'foo': Attribute('Foo', ''),
'bar': fromFunction(_bar),
}
one = self._makeOne(attrs=ATTRS)
self.assertEqual(one.getDescriptionFor('foo'), ATTRS['foo'])
self.assertEqual(one.getDescriptionFor('bar'), ATTRS['bar'])
def test___getitem___miss(self):
one = self._makeOne()
def _test():
return one['nonesuch']
self.assertRaises(KeyError, _test)
def test___getitem___hit(self):
from zope.interface.interface import Attribute
from zope.interface.interface import fromFunction
def _bar():
"""DOCSTRING"""
ATTRS = {'foo': Attribute('Foo', ''),
'bar': fromFunction(_bar),
}
one = self._makeOne(attrs=ATTRS)
self.assertEqual(one['foo'], ATTRS['foo'])
self.assertEqual(one['bar'], ATTRS['bar'])
def test___contains___miss(self):
one = self._makeOne()
self.failIf('nonesuch' in one)
def test___contains___hit(self):
from zope.interface.interface import Attribute
from zope.interface.interface import fromFunction
def _bar():
"""DOCSTRING"""
ATTRS = {'foo': Attribute('Foo', ''),
'bar': fromFunction(_bar),
}
one = self._makeOne(attrs=ATTRS)
self.failUnless('foo' in one)
self.failUnless('bar' in one)
def test_direct_miss(self):
one = self._makeOne()
self.assertEqual(one.direct('nonesuch'), None)
def test_direct_hit_local_miss_bases(self):
from zope.interface.interface import Attribute
from zope.interface.interface import fromFunction
def _bar():
"""DOCSTRING"""
def _foo():
"""DOCSTRING"""
BASE_ATTRS = {'foo': Attribute('Foo', ''),
'bar': fromFunction(_bar),
}
DERIVED_ATTRS = {'foo': fromFunction(_foo),
'baz': Attribute('Baz', ''),
}
base = self._makeOne('IBase', attrs=BASE_ATTRS)
derived = self._makeOne('IDerived', bases=(base,), attrs=DERIVED_ATTRS)
self.assertEqual(derived.direct('foo'), DERIVED_ATTRS['foo'])
self.assertEqual(derived.direct('baz'), DERIVED_ATTRS['baz'])
self.assertEqual(derived.direct('bar'), None)
def test_queryDescriptionFor_miss(self):
iface = self._makeOne()
self.assertEqual(iface.queryDescriptionFor('nonesuch'), None)
def test_queryDescriptionFor_hit(self):
from zope.interface import Attribute
ATTRS = {'attr': Attribute('Title', 'Description')}
iface = self._makeOne(attrs=ATTRS)
self.assertEqual(iface.queryDescriptionFor('attr'), ATTRS['attr'])
#TODO (or not: 'deferred' looks like a fossil to me.
#def test_deferred_cache_hit(self):
#def test_deferred_cache_miss(self):
#def test_deferred_cache_miss_w_bases(self):
def test_validateInvariants_pass(self):
_called_with = []
def _passable(*args, **kw):
_called_with.append((args, kw))
return True
iface = self._makeOne()
obj = object()
iface.setTaggedValue('invariants', [_passable])
self.assertEqual(iface.validateInvariants(obj), None)
self.assertEqual(_called_with, [((obj,), {})])
def test_validateInvariants_fail_wo_errors_passed(self):
from zope.interface.exceptions import Invalid
_passable_called_with = []
def _passable(*args, **kw):
_passable_called_with.append((args, kw))
return True
_fail_called_with = []
def _fail(*args, **kw):
_fail_called_with.append((args, kw))
raise Invalid
iface = self._makeOne()
obj = object()
iface.setTaggedValue('invariants', [_passable, _fail])
self.assertRaises(Invalid, iface.validateInvariants, obj)
self.assertEqual(_passable_called_with, [((obj,), {})])
self.assertEqual(_fail_called_with, [((obj,), {})])
def test_validateInvariants_fail_w_errors_passed(self):
from zope.interface.exceptions import Invalid
_errors = []
_fail_called_with = []
def _fail(*args, **kw):
_fail_called_with.append((args, kw))
raise Invalid
iface = self._makeOne()
obj = object()
iface.setTaggedValue('invariants', [_fail])
self.assertRaises(Invalid, iface.validateInvariants, obj, _errors)
self.assertEqual(_fail_called_with, [((obj,), {})])
self.assertEqual(len(_errors), 1)
self.failUnless(isinstance(_errors[0], Invalid))
def test_validateInvariants_fail_in_base_wo_errors_passed(self):
from zope.interface.exceptions import Invalid
_passable_called_with = []
def _passable(*args, **kw):
_passable_called_with.append((args, kw))
return True
_fail_called_with = []
def _fail(*args, **kw):
_fail_called_with.append((args, kw))
raise Invalid
base = self._makeOne('IBase')
derived = self._makeOne('IDerived', (base,))
obj = object()
base.setTaggedValue('invariants', [_fail])
derived.setTaggedValue('invariants', [_passable])
self.assertRaises(Invalid, derived.validateInvariants, obj)
self.assertEqual(_passable_called_with, [((obj,), {})])
self.assertEqual(_fail_called_with, [((obj,), {})])
#TODO
def test_validateInvariants_fail_in_base_w_errors_passed(self):
from zope.interface.exceptions import Invalid
_errors = []
_passable_called_with = []
def _passable(*args, **kw):
_passable_called_with.append((args, kw))
return True
_fail_called_with = []
def _fail(*args, **kw):
_fail_called_with.append((args, kw))
raise Invalid
base = self._makeOne('IBase')
derived = self._makeOne('IDerived', (base,))
obj = object()
base.setTaggedValue('invariants', [_fail])
derived.setTaggedValue('invariants', [_passable])
self.assertRaises(Invalid, derived.validateInvariants, obj, _errors)
self.assertEqual(_passable_called_with, [((obj,), {})])
self.assertEqual(_fail_called_with, [((obj,), {})])
self.assertEqual(len(_errors), 1)
self.failUnless(isinstance(_errors[0], Invalid))
def test___reduce__(self):
iface = self._makeOne('PickleMe')
self.assertEqual(iface.__reduce__(), 'PickleMe')
def test___hash___normal(self):
iface = self._makeOne('HashMe')
self.assertEqual(hash(iface),
hash((('HashMe',
'zope.interface.tests.test_interface'))))
def test___hash___missing_required_attrs(self):
import warnings
try:
from warnings import catch_warnings
except ImportError: # Python 2.5
return
class Derived(self._getTargetClass()):
def __init__(self):
pass # Don't call base class.
derived = Derived()
with catch_warnings(record=True) as warned:
warnings.simplefilter('always') # see LP #825249
self.assertEqual(hash(derived), 1)
self.assertEqual(len(warned), 1)
self.failUnless(warned[0].category is UserWarning)
self.assertEqual(str(warned[0].message),
'Hashing uninitialized InterfaceClass instance')
def test_comparison_with_None(self):
iface = self._makeOne()
self.failUnless(iface < None)
self.failUnless(iface <= None)
self.failIf(iface == None)
self.failUnless(iface != None)
self.failIf(iface >= None)
self.failIf(iface > None)
self.failIf(None < iface)
self.failIf(None <= iface)
self.failIf(None == iface)
self.failUnless(None != iface)
self.failUnless(None >= iface)
self.failUnless(None > iface)
def test_comparison_with_same_instance(self):
iface = self._makeOne()
self.failIf(iface < iface)
self.failUnless(iface <= iface)
self.failUnless(iface == iface)
self.failIf(iface != iface)
self.failUnless(iface >= iface)
self.failIf(iface > iface)
def test_comparison_with_same_named_instance_in_other_module(self):
one = self._makeOne('IName', __module__='zope.interface.tests.one')
other = self._makeOne('IName', __module__='zope.interface.tests.other')
self.failUnless(one < other)
self.failIf(other < one)
self.failUnless(one <= other)
self.failIf(other <= one)
self.failIf(one == other)
self.failIf(other == one)
self.failUnless(one != other)
self.failUnless(other != one)
self.failIf(one >= other)
self.failUnless(other >= one)
self.failIf(one > other)
self.failUnless(other > one)
class InterfaceTests(_SilencePy3Deprecations):
def test_attributes_link_to_interface(self):
from zope.interface import Interface
from zope.interface import Attribute
class I1(Interface):
attr = Attribute("My attr")
self.failUnless(I1['attr'].interface is I1)
def test_methods_link_to_interface(self):
from zope.interface import Interface
class I1(Interface):
def method(foo, bar, bingo):
pass
self.failUnless(I1['method'].interface is I1)
def test_classImplements_simple(self):
from zope.interface import Interface
from zope.interface import implementedBy
from zope.interface import providedBy
class ICurrent(Interface):
def method1(a, b):
pass
def method2(a, b):
pass
class IOther(Interface):
pass
class Current(object):
__implemented__ = ICurrent
def method1(self, a, b):
return 1
def method2(self, a, b):
return 2
current = Current()
self.failUnless(ICurrent.implementedBy(Current))
self.failIf(IOther.implementedBy(Current))
self.failUnless(ICurrent in implementedBy(Current))
self.failIf(IOther in implementedBy(Current))
self.failUnless(ICurrent in providedBy(current))
self.failIf(IOther in providedBy(current))
def test_classImplements_base_not_derived(self):
from zope.interface import Interface
from zope.interface import implementedBy
from zope.interface import providedBy
class IBase(Interface):
def method():
pass
class IDerived(IBase):
pass
class Current():
__implemented__ = IBase
def method(self):
pass
current = Current()
self.failUnless(IBase.implementedBy(Current))
self.failIf(IDerived.implementedBy(Current))
self.failUnless(IBase in implementedBy(Current))
self.failIf(IDerived in implementedBy(Current))
self.failUnless(IBase in providedBy(current))
self.failIf(IDerived in providedBy(current))
def test_classImplements_base_and_derived(self):
from zope.interface import Interface
from zope.interface import implementedBy
from zope.interface import providedBy
class IBase(Interface):
def method():
pass
class IDerived(IBase):
pass
class Current(object):
__implemented__ = IDerived
def method(self):
pass
current = Current()
self.failUnless(IBase.implementedBy(Current))
self.failUnless(IDerived.implementedBy(Current))
self.failIf(IBase in implementedBy(Current))
self.failUnless(IBase in implementedBy(Current).flattened())
self.failUnless(IDerived in implementedBy(Current))
self.failIf(IBase in providedBy(current))
self.failUnless(IBase in providedBy(current).flattened())
self.failUnless(IDerived in providedBy(current))
def test_classImplements_multiple(self):
from zope.interface import Interface
from zope.interface import implementedBy
from zope.interface import providedBy
class ILeft(Interface):
def method():
pass
class IRight(ILeft):
pass
class Left(object):
__implemented__ = ILeft
def method(self):
pass
class Right(object):
__implemented__ = IRight
class Ambi(Left, Right):
pass
ambi = Ambi()
self.failUnless(ILeft.implementedBy(Ambi))
self.failUnless(IRight.implementedBy(Ambi))
self.failUnless(ILeft in implementedBy(Ambi))
self.failUnless(IRight in implementedBy(Ambi))
self.failUnless(ILeft in providedBy(ambi))
self.failUnless(IRight in providedBy(ambi))
def test_classImplements_multiple_w_explict_implements(self):
from zope.interface import Interface
from zope.interface import implementedBy
from zope.interface import providedBy
class ILeft(Interface):
def method():
pass
class IRight(ILeft):
pass
class IOther(Interface):
pass
class Left():
__implemented__ = ILeft
def method(self):
pass
class Right(object):
__implemented__ = IRight
class Other(object):
__implemented__ = IOther
class Mixed(Left, Right):
__implemented__ = Left.__implemented__, Other.__implemented__
mixed = Mixed()
self.failUnless(ILeft.implementedBy(Mixed))
self.failIf(IRight.implementedBy(Mixed))
self.failUnless(IOther.implementedBy(Mixed))
self.failUnless(ILeft in implementedBy(Mixed))
self.failIf(IRight in implementedBy(Mixed))
self.failUnless(IOther in implementedBy(Mixed))
self.failUnless(ILeft in providedBy(mixed))
self.failIf(IRight in providedBy(mixed))
self.failUnless(IOther in providedBy(mixed))
def test_interface_deferred_class_method_broken(self):
from zope.interface import Interface
from zope.interface.exceptions import BrokenImplementation
class IDeferring(Interface):
def method():
pass
class Deferring(IDeferring.deferred()):
__implemented__ = IDeferring
deferring = Deferring()
self.assertRaises(BrokenImplementation, deferring.method)
def testInterfaceExtendsInterface(self):
from zope.interface import Interface
new = Interface.__class__
FunInterface = new('FunInterface')
BarInterface = new('BarInterface', [FunInterface])
BobInterface = new('BobInterface')
BazInterface = new('BazInterface', [BobInterface, BarInterface])
self.failUnless(BazInterface.extends(BobInterface))
self.failUnless(BazInterface.extends(BarInterface))
self.failUnless(BazInterface.extends(FunInterface))
self.failIf(BobInterface.extends(FunInterface))
self.failIf(BobInterface.extends(BarInterface))
self.failUnless(BarInterface.extends(FunInterface))
self.failIf(BarInterface.extends(BazInterface))
def test_verifyClass(self):
from zope.interface import Attribute
from zope.interface import Interface
from zope.interface.verify import verifyClass
from zope.interface._compat import _u
class ICheckMe(Interface):
attr = Attribute(_u('My attr'))
def method():
pass
class CheckMe(object):
__implemented__ = ICheckMe
attr = 'value'
def method(self):
pass
self.failUnless(verifyClass(ICheckMe, CheckMe))
def test_verifyObject(self):
from zope.interface import Attribute
from zope.interface import Interface
from zope.interface.verify import verifyObject
from zope.interface._compat import _u
class ICheckMe(Interface):
attr = Attribute(_u('My attr'))
def method():
pass
class CheckMe(object):
__implemented__ = ICheckMe
attr = 'value'
def method(self):
pass
check_me = CheckMe()
self.failUnless(verifyObject(ICheckMe, check_me))
def test_interface_object_provides_Interface(self):
from zope.interface import Interface
class AnInterface(Interface):
pass
self.failUnless(Interface.providedBy(AnInterface))
def test_names_simple(self):
from zope.interface import Attribute
from zope.interface import Interface
from zope.interface._compat import _u
class ISimple(Interface):
attr = Attribute(_u('My attr'))
def method():
pass
self.assertEqual(sorted(ISimple.names()), ['attr', 'method'])
def test_names_derived(self):
from zope.interface import Attribute
from zope.interface import Interface
from zope.interface._compat import _u
class IBase(Interface):
attr = Attribute(_u('My attr'))
def method():
pass
class IDerived(IBase):
attr2 = Attribute(_u('My attr2'))
def method():
pass
def method2():
pass
self.assertEqual(sorted(IDerived.names()),
['attr2', 'method', 'method2'])
self.assertEqual(sorted(IDerived.names(all=True)),
['attr', 'attr2', 'method', 'method2'])
def test_namesAndDescriptions_simple(self):
from zope.interface import Attribute
from zope.interface.interface import Method
from zope.interface import Interface
from zope.interface._compat import _u
class ISimple(Interface):
attr = Attribute(_u('My attr'))
def method():
"My method"
name_values = sorted(ISimple.namesAndDescriptions())
self.assertEqual(len(name_values), 2)
self.assertEqual(name_values[0][0], 'attr')
self.failUnless(isinstance(name_values[0][1], Attribute))
self.assertEqual(name_values[0][1].__name__, 'attr')
self.assertEqual(name_values[0][1].__doc__, 'My attr')
self.assertEqual(name_values[1][0], 'method')
self.failUnless(isinstance(name_values[1][1], Method))
self.assertEqual(name_values[1][1].__name__, 'method')
self.assertEqual(name_values[1][1].__doc__, 'My method')
def test_namesAndDescriptions_derived(self):
from zope.interface import Attribute
from zope.interface import Interface
from zope.interface.interface import Method
from zope.interface._compat import _u
class IBase(Interface):
attr = Attribute(_u('My attr'))
def method():
"My method"
class IDerived(IBase):
attr2 = Attribute(_u('My attr2'))
def method():
"My method, overridden"
def method2():
"My method2"
name_values = sorted(IDerived.namesAndDescriptions())
self.assertEqual(len(name_values), 3)
self.assertEqual(name_values[0][0], 'attr2')
self.failUnless(isinstance(name_values[0][1], Attribute))
self.assertEqual(name_values[0][1].__name__, 'attr2')
self.assertEqual(name_values[0][1].__doc__, 'My attr2')
self.assertEqual(name_values[1][0], 'method')
self.failUnless(isinstance(name_values[1][1], Method))
self.assertEqual(name_values[1][1].__name__, 'method')
self.assertEqual(name_values[1][1].__doc__, 'My method, overridden')
self.assertEqual(name_values[2][0], 'method2')
self.failUnless(isinstance(name_values[2][1], Method))
self.assertEqual(name_values[2][1].__name__, 'method2')
self.assertEqual(name_values[2][1].__doc__, 'My method2')
name_values = sorted(IDerived.namesAndDescriptions(all=True))
self.assertEqual(len(name_values), 4)
self.assertEqual(name_values[0][0], 'attr')
self.failUnless(isinstance(name_values[0][1], Attribute))
self.assertEqual(name_values[0][1].__name__, 'attr')
self.assertEqual(name_values[0][1].__doc__, 'My attr')
self.assertEqual(name_values[1][0], 'attr2')
self.failUnless(isinstance(name_values[1][1], Attribute))
self.assertEqual(name_values[1][1].__name__, 'attr2')
self.assertEqual(name_values[1][1].__doc__, 'My attr2')
self.assertEqual(name_values[2][0], 'method')
self.failUnless(isinstance(name_values[2][1], Method))
self.assertEqual(name_values[2][1].__name__, 'method')
self.assertEqual(name_values[2][1].__doc__, 'My method, overridden')
self.assertEqual(name_values[3][0], 'method2')
self.failUnless(isinstance(name_values[3][1], Method))
self.assertEqual(name_values[3][1].__name__, 'method2')
self.assertEqual(name_values[3][1].__doc__, 'My method2')
def test_getDescriptionFor_nonesuch_no_default(self):
from zope.interface import Interface
class IEmpty(Interface):
pass
self.assertRaises(KeyError, IEmpty.getDescriptionFor, 'nonesuch')
def test_getDescriptionFor_simple(self):
from zope.interface import Attribute
from zope.interface.interface import Method
from zope.interface import Interface
from zope.interface._compat import _u
class ISimple(Interface):
attr = Attribute(_u('My attr'))
def method():
"My method"
a_desc = ISimple.getDescriptionFor('attr')
self.failUnless(isinstance(a_desc, Attribute))
self.assertEqual(a_desc.__name__, 'attr')
self.assertEqual(a_desc.__doc__, 'My attr')
m_desc = ISimple.getDescriptionFor('method')
self.failUnless(isinstance(m_desc, Method))
self.assertEqual(m_desc.__name__, 'method')
self.assertEqual(m_desc.__doc__, 'My method')
def test_getDescriptionFor_derived(self):
from zope.interface import Attribute
from zope.interface.interface import Method
from zope.interface import Interface
from zope.interface._compat import _u
class IBase(Interface):
attr = Attribute(_u('My attr'))
def method():
"My method"
class IDerived(IBase):
attr2 = Attribute(_u('My attr2'))
def method():
"My method, overridden"
def method2():
"My method2"
a_desc = IDerived.getDescriptionFor('attr')
self.failUnless(isinstance(a_desc, Attribute))
self.assertEqual(a_desc.__name__, 'attr')
self.assertEqual(a_desc.__doc__, 'My attr')
m_desc = IDerived.getDescriptionFor('method')
self.failUnless(isinstance(m_desc, Method))
self.assertEqual(m_desc.__name__, 'method')
self.assertEqual(m_desc.__doc__, 'My method, overridden')
a2_desc = IDerived.getDescriptionFor('attr2')
self.failUnless(isinstance(a2_desc, Attribute))
self.assertEqual(a2_desc.__name__, 'attr2')
self.assertEqual(a2_desc.__doc__, 'My attr2')
m2_desc = IDerived.getDescriptionFor('method2')
self.failUnless(isinstance(m2_desc, Method))
self.assertEqual(m2_desc.__name__, 'method2')
self.assertEqual(m2_desc.__doc__, 'My method2')
def test___getitem__nonesuch(self):
from zope.interface import Interface
class IEmpty(Interface):
pass
self.assertRaises(KeyError, IEmpty.__getitem__, 'nonesuch')
def test___getitem__simple(self):
from zope.interface import Attribute
from zope.interface.interface import Method
from zope.interface import Interface
from zope.interface._compat import _u
class ISimple(Interface):
attr = Attribute(_u('My attr'))
def method():
"My method"
a_desc = ISimple['attr']
self.failUnless(isinstance(a_desc, Attribute))
self.assertEqual(a_desc.__name__, 'attr')
self.assertEqual(a_desc.__doc__, 'My attr')
m_desc = ISimple['method']
self.failUnless(isinstance(m_desc, Method))
self.assertEqual(m_desc.__name__, 'method')
self.assertEqual(m_desc.__doc__, 'My method')
def test___getitem___derived(self):
from zope.interface import Attribute
from zope.interface.interface import Method
from zope.interface import Interface
from zope.interface._compat import _u
class IBase(Interface):
attr = Attribute(_u('My attr'))
def method():
"My method"
class IDerived(IBase):
attr2 = Attribute(_u('My attr2'))
def method():
"My method, overridden"
def method2():
"My method2"
a_desc = IDerived['attr']
self.failUnless(isinstance(a_desc, Attribute))
self.assertEqual(a_desc.__name__, 'attr')
self.assertEqual(a_desc.__doc__, 'My attr')
m_desc = IDerived['method']
self.failUnless(isinstance(m_desc, Method))
self.assertEqual(m_desc.__name__, 'method')
self.assertEqual(m_desc.__doc__, 'My method, overridden')
a2_desc = IDerived['attr2']
self.failUnless(isinstance(a2_desc, Attribute))
self.assertEqual(a2_desc.__name__, 'attr2')
self.assertEqual(a2_desc.__doc__, 'My attr2')
m2_desc = IDerived['method2']
self.failUnless(isinstance(m2_desc, Method))
self.assertEqual(m2_desc.__name__, 'method2')
self.assertEqual(m2_desc.__doc__, 'My method2')
def test___contains__nonesuch(self):
from zope.interface import Interface
class IEmpty(Interface):
pass
self.failIf('nonesuch' in IEmpty)
def test___contains__simple(self):
from zope.interface import Attribute
from zope.interface import Interface
from zope.interface._compat import _u
class ISimple(Interface):
attr = Attribute(_u('My attr'))
def method():
"My method"
self.failUnless('attr' in ISimple)
self.failUnless('method' in ISimple)
def test___contains__derived(self):
from zope.interface import Attribute
from zope.interface import Interface
from zope.interface._compat import _u
class IBase(Interface):
attr = Attribute(_u('My attr'))
def method():
"My method"
class IDerived(IBase):
attr2 = Attribute(_u('My attr2'))
def method():
"My method, overridden"
def method2():
"My method2"
self.failUnless('attr' in IDerived)
self.failUnless('method' in IDerived)
self.failUnless('attr2' in IDerived)
self.failUnless('method2' in IDerived)
def test___iter__empty(self):
from zope.interface import Interface
class IEmpty(Interface):
pass
self.assertEqual(list(IEmpty), [])
def test___iter__simple(self):
from zope.interface import Attribute
from zope.interface import Interface
from zope.interface._compat import _u
class ISimple(Interface):
attr = Attribute(_u('My attr'))
def method():
"My method"
self.assertEqual(sorted(list(ISimple)), ['attr', 'method'])
def test___iter__derived(self):
from zope.interface import Attribute
from zope.interface import Interface
from zope.interface._compat import _u
class IBase(Interface):
attr = Attribute(_u('My attr'))
def method():
"My method"
class IDerived(IBase):
attr2 = Attribute(_u('My attr2'))
def method():
"My method, overridden"
def method2():
"My method2"
self.assertEqual(sorted(list(IDerived)),
['attr', 'attr2', 'method', 'method2'])
def test_function_attributes_become_tagged_values(self):
from zope.interface import Interface
class ITagMe(Interface):
def method():
pass
method.optional = 1
method = ITagMe['method']
self.assertEqual(method.getTaggedValue('optional'), 1)
def test___doc___non_element(self):
from zope.interface import Interface
class IHaveADocString(Interface):
"xxx"
self.assertEqual(IHaveADocString.__doc__, "xxx")
self.assertEqual(list(IHaveADocString), [])
def test___doc___as_element(self):
from zope.interface import Attribute
from zope.interface import Interface
class IHaveADocString(Interface):
"xxx"
__doc__ = Attribute('the doc')
self.assertEqual(IHaveADocString.__doc__, "")
self.assertEqual(list(IHaveADocString), ['__doc__'])
def _errorsEqual(self, has_invariant, error_len, error_msgs, iface):
from zope.interface.exceptions import Invalid
self.assertRaises(Invalid, iface.validateInvariants, has_invariant)
e = []
try:
iface.validateInvariants(has_invariant, e)
except Invalid as error:
self.assertEqual(error.args[0], e)
else:
self._assert(0) # validateInvariants should always raise
# Invalid
self.assertEqual(len(e), error_len)
msgs = [error.args[0] for error in e]
msgs.sort()
for msg in msgs:
self.assertEqual(msg, error_msgs.pop(0))
def test_invariant_simple(self):
from zope.interface import Attribute
from zope.interface import Interface
from zope.interface import directlyProvides
from zope.interface import invariant
class IInvariant(Interface):
foo = Attribute('foo')
bar = Attribute('bar; must eval to Boolean True if foo does')
invariant(_ifFooThenBar)
class HasInvariant(object):
pass
# set up
has_invariant = HasInvariant()
directlyProvides(has_invariant, IInvariant)
# the tests
self.assertEqual(IInvariant.getTaggedValue('invariants'),
[_ifFooThenBar])
self.assertEqual(IInvariant.validateInvariants(has_invariant), None)
has_invariant.bar = 27
self.assertEqual(IInvariant.validateInvariants(has_invariant), None)
has_invariant.foo = 42
self.assertEqual(IInvariant.validateInvariants(has_invariant), None)
del has_invariant.bar
self._errorsEqual(has_invariant, 1, ['If Foo, then Bar!'],
IInvariant)
def test_invariant_nested(self):
from zope.interface import Attribute
from zope.interface import Interface
from zope.interface import directlyProvides
from zope.interface import invariant
class IInvariant(Interface):
foo = Attribute('foo')
bar = Attribute('bar; must eval to Boolean True if foo does')
invariant(_ifFooThenBar)
class ISubInvariant(IInvariant):
invariant(_barGreaterThanFoo)
class HasInvariant(object):
pass
# nested interfaces with invariants:
self.assertEqual(ISubInvariant.getTaggedValue('invariants'),
[_barGreaterThanFoo])
has_invariant = HasInvariant()
directlyProvides(has_invariant, ISubInvariant)
has_invariant.foo = 42
# even though the interface has changed, we should still only have one
# error.
self._errorsEqual(has_invariant, 1, ['If Foo, then Bar!'],
ISubInvariant)
# however, if we set foo to 0 (Boolean False) and bar to a negative
# number then we'll get the new error
has_invariant.foo = 2
has_invariant.bar = 1
self._errorsEqual(has_invariant, 1,
['Please, Boo MUST be greater than Foo!'],
ISubInvariant)
# and if we set foo to a positive number and boo to 0, we'll
# get both errors!
has_invariant.foo = 1
has_invariant.bar = 0
self._errorsEqual(has_invariant, 2,
['If Foo, then Bar!',
'Please, Boo MUST be greater than Foo!'],
ISubInvariant)
# for a happy ending, we'll make the invariants happy
has_invariant.foo = 1
has_invariant.bar = 2
self.assertEqual(IInvariant.validateInvariants(has_invariant), None)
def test_invariant_mutandis(self):
from zope.interface import Attribute
from zope.interface import Interface
from zope.interface import directlyProvides
from zope.interface import invariant
class IInvariant(Interface):
foo = Attribute('foo')
bar = Attribute('bar; must eval to Boolean True if foo does')
invariant(_ifFooThenBar)
class HasInvariant(object):
pass
# now we'll do two invariants on the same interface,
# just to make sure that a small
# multi-invariant interface is at least minimally tested.
has_invariant = HasInvariant()
directlyProvides(has_invariant, IInvariant)
has_invariant.foo = 42
# if you really need to mutate, then this would be the way to do it.
# Probably a bad idea, though. :-)
old_invariants = IInvariant.getTaggedValue('invariants')
invariants = old_invariants[:]
invariants.append(_barGreaterThanFoo)
IInvariant.setTaggedValue('invariants', invariants)
# even though the interface has changed, we should still only have one
# error.
self._errorsEqual(has_invariant, 1, ['If Foo, then Bar!'],
IInvariant)
# however, if we set foo to 0 (Boolean False) and bar to a negative
# number then we'll get the new error
has_invariant.foo = 2
has_invariant.bar = 1
self._errorsEqual(has_invariant, 1,
['Please, Boo MUST be greater than Foo!'], IInvariant)
# and if we set foo to a positive number and boo to 0, we'll
# get both errors!
has_invariant.foo = 1
has_invariant.bar = 0
self._errorsEqual(has_invariant, 2,
['If Foo, then Bar!',
'Please, Boo MUST be greater than Foo!'],
IInvariant)
# for another happy ending, we'll make the invariants happy again
has_invariant.foo = 1
has_invariant.bar = 2
self.assertEqual(IInvariant.validateInvariants(has_invariant), None)
# clean up
IInvariant.setTaggedValue('invariants', old_invariants)
def test___doc___element(self):
from zope.interface import Interface
from zope.interface import Attribute
class I(Interface):
"xxx"
self.assertEqual(I.__doc__, "xxx")
self.assertEqual(list(I), [])
class I(Interface):
"xxx"
__doc__ = Attribute('the doc')
self.assertEqual(I.__doc__, "")
self.assertEqual(list(I), ['__doc__'])
def testIssue228(self):
# Test for http://collector.zope.org/Zope3-dev/228
# Old style classes don't have a '__class__' attribute
import sys
if sys.version[0] < '3':
# No old style classes in Python 3, so the test becomes moot.
from zope.interface import Interface
class I(Interface):
"xxx"
class OldStyle:
__providedBy__ = None
self.assertRaises(AttributeError, I.providedBy, OldStyle)
def test_invariant_as_decorator(self):
from zope.interface import Interface
from zope.interface import Attribute
from zope.interface import implementer
from zope.interface import invariant
from zope.interface.exceptions import Invalid
class IRange(Interface):
min = Attribute("Lower bound")
max = Attribute("Upper bound")
@invariant
def range_invariant(ob):
if ob.max < ob.min:
raise Invalid('max < min')
@implementer(IRange)
class Range(object):
def __init__(self, min, max):
self.min, self.max = min, max
IRange.validateInvariants(Range(1,2))
IRange.validateInvariants(Range(1,1))
try:
IRange.validateInvariants(Range(2,1))
except Invalid as e:
self.assertEqual(str(e), 'max < min')
def test_taggedValue(self):
from zope.interface import Attribute
from zope.interface import Interface
from zope.interface import taggedValue
class ITagged(Interface):
foo = Attribute('foo')
bar = Attribute('bar; must eval to Boolean True if foo does')
taggedValue('qux', 'Spam')
class HasInvariant(object):
pass
self.assertEqual(ITagged.getTaggedValue('qux'), 'Spam')
self.failUnless('qux' in ITagged.getTaggedValueTags())
def test_description_cache_management(self):
# See https://bugs.launchpad.net/zope.interface/+bug/185974
# There was a bug where the cache used by Specification.get() was not
# cleared when the bases were changed.
from zope.interface import Interface
from zope.interface import Attribute
class I1(Interface):
a = Attribute('a')
class I2(I1):
pass
class I3(I2):
pass
self.failUnless(I3.get('a') is I1.get('a'))
I2.__bases__ = (Interface,)
self.failUnless(I3.get('a') is None)
def test___call___defers_to___conform___(self):
from zope.interface import Interface
from zope.interface import implementer
class I(Interface):
pass
@implementer(I)
class C(object):
def __conform__(self, proto):
return 0
self.assertEqual(I(C()), 0)
def test___call___object_implements(self):
from zope.interface import Interface
from zope.interface import implementer
class I(Interface):
pass
@implementer(I)
class C(object):
pass
c = C()
self.failUnless(I(c) is c)
def test___call___miss_wo_alternate(self):
from zope.interface import Interface
class I(Interface):
pass
class C(object):
pass
c = C()
self.assertRaises(TypeError, I, c)
def test___call___miss_w_alternate(self):
from zope.interface import Interface
class I(Interface):
pass
class C(object):
pass
c = C()
self.failUnless(I(c, self) is self)
def test___call___w_adapter_hook(self):
from zope.interface import Interface
from zope.interface.interface import adapter_hooks
old_hooks = adapter_hooks[:]
def _miss(iface, obj):
pass
def _hit(iface, obj):
return self
class I(Interface):
pass
class C(object):
pass
c = C()
old_adapter_hooks = adapter_hooks[:]
adapter_hooks[:] = [_miss, _hit]
try:
self.failUnless(I(c) is self)
finally:
adapter_hooks[:] = old_adapter_hooks
class AttributeTests(ElementTests):
DEFAULT_NAME = 'TestAttribute'
def _getTargetClass(self):
from zope.interface.interface import Attribute
return Attribute
class MethodTests(AttributeTests):
DEFAULT_NAME = 'TestMethod'
def _getTargetClass(self):
from zope.interface.interface import Method
return Method
def test_optional_as_property(self):
method = self._makeOne()
self.assertEqual(method.optional, {})
method.optional = {'foo': 'bar'}
self.assertEqual(method.optional, {'foo': 'bar'})
del method.optional
self.assertEqual(method.optional, {})
def test___call___raises_BrokenImplementation(self):
from zope.interface.exceptions import BrokenImplementation
method = self._makeOne()
try:
method()
except BrokenImplementation as e:
self.assertEqual(e.interface, None)
self.assertEqual(e.name, self.DEFAULT_NAME)
else:
self.fail('__call__ should raise BrokenImplementation')
def test_getSignatureInfo_bare(self):
method = self._makeOne()
info = method.getSignatureInfo()
self.assertEqual(list(info['positional']), [])
self.assertEqual(list(info['required']), [])
self.assertEqual(info['optional'], {})
self.assertEqual(info['varargs'], None)
self.assertEqual(info['kwargs'], None)
def test_getSignatureString_bare(self):
method = self._makeOne()
self.assertEqual(method.getSignatureString(), '()')
def test_getSignatureString_w_only_required(self):
method = self._makeOne()
method.positional = method.required = ['foo']
self.assertEqual(method.getSignatureString(), '(foo)')
def test_getSignatureString_w_optional(self):
method = self._makeOne()
method.positional = method.required = ['foo']
method.optional = {'foo': 'bar'}
self.assertEqual(method.getSignatureString(), "(foo='bar')")
def test_getSignatureString_w_varargs(self):
method = self._makeOne()
method.varargs = 'args'
self.assertEqual(method.getSignatureString(), "(*args)")
def test_getSignatureString_w_kwargs(self):
method = self._makeOne()
method.kwargs = 'kw'
self.assertEqual(method.getSignatureString(), "(**kw)")
class Test_fromFunction(unittest.TestCase):
def _callFUT(self, *args, **kw):
from zope.interface.interface import fromFunction
return fromFunction(*args, **kw)
def test_bare(self):
def _func():
"DOCSTRING"
method = self._callFUT(_func)
self.assertEqual(method.getName(), '_func')
self.assertEqual(method.getDoc(), 'DOCSTRING')
self.assertEqual(method.interface, None)
self.assertEqual(list(method.getTaggedValueTags()), [])
info = method.getSignatureInfo()
self.assertEqual(list(info['positional']), [])
self.assertEqual(list(info['required']), [])
self.assertEqual(info['optional'], {})
self.assertEqual(info['varargs'], None)
self.assertEqual(info['kwargs'], None)
def test_w_interface(self):
from zope.interface.interface import InterfaceClass
class IFoo(InterfaceClass):
pass
def _func():
"DOCSTRING"
method = self._callFUT(_func, interface=IFoo)
self.assertEqual(method.interface, IFoo)
def test_w_name(self):
def _func():
"DOCSTRING"
method = self._callFUT(_func, name='anotherName')
self.assertEqual(method.getName(), 'anotherName')
def test_w_only_required(self):
def _func(foo):
"DOCSTRING"
method = self._callFUT(_func)
info = method.getSignatureInfo()
self.assertEqual(list(info['positional']), ['foo'])
self.assertEqual(list(info['required']), ['foo'])
self.assertEqual(info['optional'], {})
self.assertEqual(info['varargs'], None)
self.assertEqual(info['kwargs'], None)
def test_w_optional(self):
def _func(foo='bar'):
"DOCSTRING"
method = self._callFUT(_func)
info = method.getSignatureInfo()
self.assertEqual(list(info['positional']), ['foo'])
self.assertEqual(list(info['required']), [])
self.assertEqual(info['optional'], {'foo': 'bar'})
self.assertEqual(info['varargs'], None)
self.assertEqual(info['kwargs'], None)
def test_w_optional_self(self):
# XXX This is a weird case, trying to cover the following code in
# FUT::
#
# nr = na-len(defaults)
# if nr < 0:
# defaults=defaults[-nr:]
# nr = 0
def _func(self='bar'):
"DOCSTRING"
method = self._callFUT(_func, imlevel=1)
info = method.getSignatureInfo()
self.assertEqual(list(info['positional']), [])
self.assertEqual(list(info['required']), [])
self.assertEqual(info['optional'], {})
self.assertEqual(info['varargs'], None)
self.assertEqual(info['kwargs'], None)
def test_w_varargs(self):
def _func(*args):
"DOCSTRING"
method = self._callFUT(_func)
info = method.getSignatureInfo()
self.assertEqual(list(info['positional']), [])
self.assertEqual(list(info['required']), [])
self.assertEqual(info['optional'], {})
self.assertEqual(info['varargs'], 'args')
self.assertEqual(info['kwargs'], None)
def test_w_kwargs(self):
def _func(**kw):
"DOCSTRING"
method = self._callFUT(_func)
info = method.getSignatureInfo()
self.assertEqual(list(info['positional']), [])
self.assertEqual(list(info['required']), [])
self.assertEqual(info['optional'], {})
self.assertEqual(info['varargs'], None)
self.assertEqual(info['kwargs'], 'kw')
def test_full_spectrum(self):
def _func(foo, bar='baz', *args, **kw):
"DOCSTRING"
method = self._callFUT(_func)
info = method.getSignatureInfo()
self.assertEqual(list(info['positional']), ['foo', 'bar'])
self.assertEqual(list(info['required']), ['foo'])
self.assertEqual(info['optional'], {'bar': 'baz'})
self.assertEqual(info['varargs'], 'args')
self.assertEqual(info['kwargs'], 'kw')
class Test_fromMethod(unittest.TestCase):
def _callFUT(self, *args, **kw):
from zope.interface.interface import fromMethod
return fromMethod(*args, **kw)
def test_no_args(self):
class Foo(object):
def bar(self):
"DOCSTRING"
method = self._callFUT(Foo.bar)
self.assertEqual(method.getName(), 'bar')
self.assertEqual(method.getDoc(), 'DOCSTRING')
self.assertEqual(method.interface, None)
self.assertEqual(list(method.getTaggedValueTags()), [])
info = method.getSignatureInfo()
self.assertEqual(list(info['positional']), [])
self.assertEqual(list(info['required']), [])
self.assertEqual(info['optional'], {})
self.assertEqual(info['varargs'], None)
self.assertEqual(info['kwargs'], None)
def test_full_spectrum(self):
class Foo(object):
def bar(self, foo, bar='baz', *args, **kw):
"DOCSTRING"
method = self._callFUT(Foo.bar)
info = method.getSignatureInfo()
self.assertEqual(list(info['positional']), ['foo', 'bar'])
self.assertEqual(list(info['required']), ['foo'])
self.assertEqual(info['optional'], {'bar': 'baz'})
self.assertEqual(info['varargs'], 'args')
self.assertEqual(info['kwargs'], 'kw')
def test_w_non_method(self):
def foo():
"DOCSTRING"
method = self._callFUT(foo)
self.assertEqual(method.getName(), 'foo')
self.assertEqual(method.getDoc(), 'DOCSTRING')
self.assertEqual(method.interface, None)
self.assertEqual(list(method.getTaggedValueTags()), [])
info = method.getSignatureInfo()
self.assertEqual(list(info['positional']), [])
self.assertEqual(list(info['required']), [])
self.assertEqual(info['optional'], {})
self.assertEqual(info['varargs'], None)
self.assertEqual(info['kwargs'], None)
class DummyDependent(object):
def __init__(self):
self._changed = []
def changed(self, originally_changed):
self._changed.append(originally_changed)
def _barGreaterThanFoo(obj):
from zope.interface.exceptions import Invalid
foo = getattr(obj, 'foo', None)
bar = getattr(obj, 'bar', None)
if foo is not None and isinstance(foo, type(bar)):
# type checking should be handled elsewhere (like, say,
# schema); these invariants should be intra-interface
# constraints. This is a hacky way to do it, maybe, but you
# get the idea
if not bar > foo:
raise Invalid('Please, Boo MUST be greater than Foo!')
def _ifFooThenBar(obj):
from zope.interface.exceptions import Invalid
if getattr(obj, 'foo', None) and not getattr(obj, 'bar', None):
raise Invalid('If Foo, then Bar!')
class _Monkey(object):
# context-manager for replacing module names in the scope of a test.
def __init__(self, module, **kw):
self.module = module
self.to_restore = dict([(key, getattr(module, key)) for key in kw])
for key, value in kw.items():
setattr(module, key, value)
def __enter__(self):
return self
def __exit__(self, exc_type, exc_val, exc_tb):
for key, value in self.to_restore.items():
setattr(self.module, key, value)
def test_suite():
import doctest
return unittest.TestSuite((
unittest.makeSuite(ElementTests),
unittest.makeSuite(SpecificationBasePyTests),
unittest.makeSuite(InterfaceBasePyTests),
unittest.makeSuite(SpecificationTests),
unittest.makeSuite(InterfaceTests),
unittest.makeSuite(AttributeTests),
unittest.makeSuite(MethodTests),
unittest.makeSuite(Test_fromFunction),
#unittest.makeSuite(Test_fromMethod),
doctest.DocTestSuite(),
doctest.DocTestSuite("zope.interface.interface"),
))
| bsd-3-clause |
menardorama/ReadyNAS-Add-ons | headphones-1.0.0/debian/headphones/apps/headphones/lib/requests/auth.py | 120 | 6669 | # -*- coding: utf-8 -*-
"""
requests.auth
~~~~~~~~~~~~~
This module contains the authentication handlers for Requests.
"""
import os
import re
import time
import hashlib
from base64 import b64encode
from .compat import urlparse, str
from .cookies import extract_cookies_to_jar
from .utils import parse_dict_header, to_native_string
from .status_codes import codes
CONTENT_TYPE_FORM_URLENCODED = 'application/x-www-form-urlencoded'
CONTENT_TYPE_MULTI_PART = 'multipart/form-data'
def _basic_auth_str(username, password):
"""Returns a Basic Auth string."""
authstr = 'Basic ' + to_native_string(
b64encode(('%s:%s' % (username, password)).encode('latin1')).strip()
)
return authstr
class AuthBase(object):
"""Base class that all auth implementations derive from"""
def __call__(self, r):
raise NotImplementedError('Auth hooks must be callable.')
class HTTPBasicAuth(AuthBase):
"""Attaches HTTP Basic Authentication to the given Request object."""
def __init__(self, username, password):
self.username = username
self.password = password
def __call__(self, r):
r.headers['Authorization'] = _basic_auth_str(self.username, self.password)
return r
class HTTPProxyAuth(HTTPBasicAuth):
"""Attaches HTTP Proxy Authentication to a given Request object."""
def __call__(self, r):
r.headers['Proxy-Authorization'] = _basic_auth_str(self.username, self.password)
return r
class HTTPDigestAuth(AuthBase):
"""Attaches HTTP Digest Authentication to the given Request object."""
def __init__(self, username, password):
self.username = username
self.password = password
self.last_nonce = ''
self.nonce_count = 0
self.chal = {}
self.pos = None
self.num_401_calls = 1
def build_digest_header(self, method, url):
realm = self.chal['realm']
nonce = self.chal['nonce']
qop = self.chal.get('qop')
algorithm = self.chal.get('algorithm')
opaque = self.chal.get('opaque')
if algorithm is None:
_algorithm = 'MD5'
else:
_algorithm = algorithm.upper()
# lambdas assume digest modules are imported at the top level
if _algorithm == 'MD5' or _algorithm == 'MD5-SESS':
def md5_utf8(x):
if isinstance(x, str):
x = x.encode('utf-8')
return hashlib.md5(x).hexdigest()
hash_utf8 = md5_utf8
elif _algorithm == 'SHA':
def sha_utf8(x):
if isinstance(x, str):
x = x.encode('utf-8')
return hashlib.sha1(x).hexdigest()
hash_utf8 = sha_utf8
KD = lambda s, d: hash_utf8("%s:%s" % (s, d))
if hash_utf8 is None:
return None
# XXX not implemented yet
entdig = None
p_parsed = urlparse(url)
path = p_parsed.path
if p_parsed.query:
path += '?' + p_parsed.query
A1 = '%s:%s:%s' % (self.username, realm, self.password)
A2 = '%s:%s' % (method, path)
HA1 = hash_utf8(A1)
HA2 = hash_utf8(A2)
if nonce == self.last_nonce:
self.nonce_count += 1
else:
self.nonce_count = 1
ncvalue = '%08x' % self.nonce_count
s = str(self.nonce_count).encode('utf-8')
s += nonce.encode('utf-8')
s += time.ctime().encode('utf-8')
s += os.urandom(8)
cnonce = (hashlib.sha1(s).hexdigest()[:16])
noncebit = "%s:%s:%s:%s:%s" % (nonce, ncvalue, cnonce, qop, HA2)
if _algorithm == 'MD5-SESS':
HA1 = hash_utf8('%s:%s:%s' % (HA1, nonce, cnonce))
if qop is None:
respdig = KD(HA1, "%s:%s" % (nonce, HA2))
elif qop == 'auth' or 'auth' in qop.split(','):
respdig = KD(HA1, noncebit)
else:
# XXX handle auth-int.
return None
self.last_nonce = nonce
# XXX should the partial digests be encoded too?
base = 'username="%s", realm="%s", nonce="%s", uri="%s", ' \
'response="%s"' % (self.username, realm, nonce, path, respdig)
if opaque:
base += ', opaque="%s"' % opaque
if algorithm:
base += ', algorithm="%s"' % algorithm
if entdig:
base += ', digest="%s"' % entdig
if qop:
base += ', qop="auth", nc=%s, cnonce="%s"' % (ncvalue, cnonce)
return 'Digest %s' % (base)
def handle_redirect(self, r, **kwargs):
"""Reset num_401_calls counter on redirects."""
if r.is_redirect:
self.num_401_calls = 1
def handle_401(self, r, **kwargs):
"""Takes the given response and tries digest-auth, if needed."""
if self.pos is not None:
# Rewind the file position indicator of the body to where
# it was to resend the request.
r.request.body.seek(self.pos)
num_401_calls = getattr(self, 'num_401_calls', 1)
s_auth = r.headers.get('www-authenticate', '')
if 'digest' in s_auth.lower() and num_401_calls < 2:
self.num_401_calls += 1
pat = re.compile(r'digest ', flags=re.IGNORECASE)
self.chal = parse_dict_header(pat.sub('', s_auth, count=1))
# Consume content and release the original connection
# to allow our new request to reuse the same one.
r.content
r.raw.release_conn()
prep = r.request.copy()
extract_cookies_to_jar(prep._cookies, r.request, r.raw)
prep.prepare_cookies(prep._cookies)
prep.headers['Authorization'] = self.build_digest_header(
prep.method, prep.url)
_r = r.connection.send(prep, **kwargs)
_r.history.append(r)
_r.request = prep
return _r
self.num_401_calls = 1
return r
def __call__(self, r):
# If we have a saved nonce, skip the 401
if self.last_nonce:
r.headers['Authorization'] = self.build_digest_header(r.method, r.url)
try:
self.pos = r.body.tell()
except AttributeError:
# In the case of HTTPDigestAuth being reused and the body of
# the previous request was a file-like object, pos has the
# file position of the previous body. Ensure it's set to
# None.
self.pos = None
r.register_hook('response', self.handle_401)
r.register_hook('response', self.handle_redirect)
return r
| gpl-2.0 |
unatv2/unatv2 | qa/rpc-tests/listtransactions.py | 145 | 6081 | #!/usr/bin/env python
# Copyright (c) 2014 The Bitcoin Core developers
# Distributed under the MIT/X11 software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
# Exercise the listtransactions API
# Add python-bitcoinrpc to module search path:
import os
import sys
sys.path.append(os.path.join(os.path.dirname(os.path.abspath(__file__)), "python-bitcoinrpc"))
import json
import shutil
import subprocess
import tempfile
import traceback
from bitcoinrpc.authproxy import AuthServiceProxy, JSONRPCException
from util import *
def check_array_result(object_array, to_match, expected):
"""
Pass in array of JSON objects, a dictionary with key/value pairs
to match against, and another dictionary with expected key/value
pairs.
"""
num_matched = 0
for item in object_array:
all_match = True
for key,value in to_match.items():
if item[key] != value:
all_match = False
if not all_match:
continue
for key,value in expected.items():
if item[key] != value:
raise AssertionError("%s : expected %s=%s"%(str(item), str(key), str(value)))
num_matched = num_matched+1
if num_matched == 0:
raise AssertionError("No objects matched %s"%(str(to_match)))
def run_test(nodes):
# Simple send, 0 to 1:
txid = nodes[0].sendtoaddress(nodes[1].getnewaddress(), 0.1)
sync_mempools(nodes)
check_array_result(nodes[0].listtransactions(),
{"txid":txid},
{"category":"send","account":"","amount":Decimal("-0.1"),"confirmations":0})
check_array_result(nodes[1].listtransactions(),
{"txid":txid},
{"category":"receive","account":"","amount":Decimal("0.1"),"confirmations":0})
# mine a block, confirmations should change:
nodes[0].setgenerate(True, 1)
sync_blocks(nodes)
check_array_result(nodes[0].listtransactions(),
{"txid":txid},
{"category":"send","account":"","amount":Decimal("-0.1"),"confirmations":1})
check_array_result(nodes[1].listtransactions(),
{"txid":txid},
{"category":"receive","account":"","amount":Decimal("0.1"),"confirmations":1})
# send-to-self:
txid = nodes[0].sendtoaddress(nodes[0].getnewaddress(), 0.2)
check_array_result(nodes[0].listtransactions(),
{"txid":txid, "category":"send"},
{"amount":Decimal("-0.2")})
check_array_result(nodes[0].listtransactions(),
{"txid":txid, "category":"receive"},
{"amount":Decimal("0.2")})
# sendmany from node1: twice to self, twice to node2:
send_to = { nodes[0].getnewaddress() : 0.11, nodes[1].getnewaddress() : 0.22,
nodes[0].getaccountaddress("from1") : 0.33, nodes[1].getaccountaddress("toself") : 0.44 }
txid = nodes[1].sendmany("", send_to)
sync_mempools(nodes)
check_array_result(nodes[1].listtransactions(),
{"category":"send","amount":Decimal("-0.11")},
{"txid":txid} )
check_array_result(nodes[0].listtransactions(),
{"category":"receive","amount":Decimal("0.11")},
{"txid":txid} )
check_array_result(nodes[1].listtransactions(),
{"category":"send","amount":Decimal("-0.22")},
{"txid":txid} )
check_array_result(nodes[1].listtransactions(),
{"category":"receive","amount":Decimal("0.22")},
{"txid":txid} )
check_array_result(nodes[1].listtransactions(),
{"category":"send","amount":Decimal("-0.33")},
{"txid":txid} )
check_array_result(nodes[0].listtransactions(),
{"category":"receive","amount":Decimal("0.33")},
{"txid":txid, "account" : "from1"} )
check_array_result(nodes[1].listtransactions(),
{"category":"send","amount":Decimal("-0.44")},
{"txid":txid, "account" : ""} )
check_array_result(nodes[1].listtransactions(),
{"category":"receive","amount":Decimal("0.44")},
{"txid":txid, "account" : "toself"} )
def main():
import optparse
parser = optparse.OptionParser(usage="%prog [options]")
parser.add_option("--nocleanup", dest="nocleanup", default=False, action="store_true",
help="Leave bitcoinds and test.* datadir on exit or error")
parser.add_option("--srcdir", dest="srcdir", default="../../src",
help="Source directory containing bitcoind/bitcoin-cli (default: %default%)")
parser.add_option("--tmpdir", dest="tmpdir", default=tempfile.mkdtemp(prefix="test"),
help="Root directory for datadirs")
(options, args) = parser.parse_args()
os.environ['PATH'] = options.srcdir+":"+os.environ['PATH']
check_json_precision()
success = False
nodes = []
try:
print("Initializing test directory "+options.tmpdir)
if not os.path.isdir(options.tmpdir):
os.makedirs(options.tmpdir)
initialize_chain(options.tmpdir)
nodes = start_nodes(2, options.tmpdir)
connect_nodes(nodes[1], 0)
sync_blocks(nodes)
run_test(nodes)
success = True
except AssertionError as e:
print("Assertion failed: "+e.message)
except Exception as e:
print("Unexpected exception caught during testing: "+str(e))
traceback.print_tb(sys.exc_info()[2])
if not options.nocleanup:
print("Cleaning up")
stop_nodes(nodes)
wait_bitcoinds()
shutil.rmtree(options.tmpdir)
if success:
print("Tests successful")
sys.exit(0)
else:
print("Failed")
sys.exit(1)
if __name__ == '__main__':
main()
| mit |
saketkc/statsmodels | tools/backport_pr.py | 30 | 5263 | #!/usr/bin/env python
"""
Backport pull requests to a particular branch.
Usage: backport_pr.py branch [PR]
e.g.:
python tools/backport_pr.py 0.13.1 123
to backport PR #123 onto branch 0.13.1
or
python tools/backport_pr.py 1.x
to see what PRs are marked for backport that have yet to be applied.
Copied from IPython 9e82bc5
https://github.com/ipython/ipython/blob/master/tools/backport_pr.py
"""
from __future__ import print_function
import os
import re
import sys
from subprocess import Popen, PIPE, check_call, check_output
from urllib import urlopen
from gh_api import (
get_issues_list,
get_pull_request,
get_pull_request_files,
is_pull_request,
get_milestone_id,
)
from pandas import Series
def find_rejects(root='.'):
for dirname, dirs, files in os.walk(root):
for fname in files:
if fname.endswith('.rej'):
yield os.path.join(dirname, fname)
def get_current_branch():
branches = check_output(['git', 'branch'])
for branch in branches.splitlines():
if branch.startswith('*'):
return branch[1:].strip()
def backport_pr(branch, num, project='statsmodels/statsmodels'):
current_branch = get_current_branch()
if branch != current_branch:
check_call(['git', 'checkout', branch])
check_call(['git', 'pull'])
pr = get_pull_request(project, num, auth=True)
files = get_pull_request_files(project, num, auth=True)
patch_url = pr['patch_url']
title = pr['title']
description = pr['body']
fname = "PR%i.patch" % num
if os.path.exists(fname):
print("using patch from {fname}".format(**locals()))
with open(fname) as f:
patch = f.read()
else:
req = urlopen(patch_url)
patch = req.read()
msg = "Backport PR #%i: %s" % (num, title) + '\n\n' + description
check = Popen(['git', 'apply', '--check', '--verbose'], stdin=PIPE)
a,b = check.communicate(patch)
if check.returncode:
print("patch did not apply, saving to {fname}".format(**locals()))
print("edit {fname} until `cat {fname} | git apply --check` succeeds".format(**locals()))
print("then run tools/backport_pr.py {num} again".format(**locals()))
if not os.path.exists(fname):
with open(fname, 'wb') as f:
f.write(patch)
return 1
p = Popen(['git', 'apply'], stdin=PIPE)
a,b = p.communicate(patch)
filenames = [ f['filename'] for f in files ]
check_call(['git', 'add'] + filenames)
check_call(['git', 'commit', '-m', msg])
print("PR #%i applied, with msg:" % num)
print()
print(msg)
print()
if branch != current_branch:
check_call(['git', 'checkout', current_branch])
return 0
backport_re = re.compile(r"[Bb]ackport.*?(\d+)")
def already_backported(branch, since_tag=None):
"""return set of PRs that have been backported already"""
if since_tag is None:
since_tag = check_output(['git','describe', branch, '--abbrev=0']).decode('utf8').strip()
cmd = ['git', 'log', '%s..%s' % (since_tag, branch), '--oneline']
lines = check_output(cmd).decode('utf8')
return set(int(num) for num in backport_re.findall(lines))
def should_backport(labels=None, milestone=None):
"""return set of PRs marked for backport"""
if labels is None and milestone is None:
raise ValueError("Specify one of labels or milestone.")
elif labels is not None and milestone is not None:
raise ValueError("Specify only one of labels or milestone.")
if labels is not None:
issues = get_issues_list("statsmodels/statsmodels",
labels=labels,
state='closed',
auth=True,
)
else:
milestone_id = get_milestone_id("statsmodels/statsmodels", milestone,
auth=True)
issues = get_issues_list("statsmodels/statsmodels",
milestone=milestone_id,
state='closed',
auth=True,
)
should_backport = []
merged_dates = []
for issue in issues:
if not is_pull_request(issue):
continue
pr = get_pull_request("statsmodels/statsmodels", issue['number'],
auth=True)
if not pr['merged']:
print ("Marked PR closed without merge: %i" % pr['number'])
continue
if pr['number'] not in should_backport:
merged_dates.append(pr['merged_at'])
should_backport.append(pr['number'])
return Series(merged_dates, index=should_backport)
if __name__ == '__main__':
if len(sys.argv) < 2:
print(__doc__)
sys.exit(1)
if len(sys.argv) < 3:
branch = sys.argv[1]
already = already_backported(branch)
#NOTE: change this to the label you've used for marking a backport
should = should_backport(milestone="0.5.1")
print ("The following PRs should be backported:")
to_backport = []
if already:
should = should.ix[set(should.index).difference(already)]
should.sort()
for pr, date in should.iteritems():
print (pr)
sys.exit(0)
sys.exit(backport_pr(sys.argv[1], int(sys.argv[2])))
| bsd-3-clause |
PulsePod/evepod | lib/python2.7/site-packages/pip/vendor/html5lib/filters/whitespace.py | 1730 | 1142 | from __future__ import absolute_import, division, unicode_literals
import re
from . import _base
from ..constants import rcdataElements, spaceCharacters
spaceCharacters = "".join(spaceCharacters)
SPACES_REGEX = re.compile("[%s]+" % spaceCharacters)
class Filter(_base.Filter):
spacePreserveElements = frozenset(["pre", "textarea"] + list(rcdataElements))
def __iter__(self):
preserve = 0
for token in _base.Filter.__iter__(self):
type = token["type"]
if type == "StartTag" \
and (preserve or token["name"] in self.spacePreserveElements):
preserve += 1
elif type == "EndTag" and preserve:
preserve -= 1
elif not preserve and type == "SpaceCharacters" and token["data"]:
# Test on token["data"] above to not introduce spaces where there were not
token["data"] = " "
elif not preserve and type == "Characters":
token["data"] = collapse_spaces(token["data"])
yield token
def collapse_spaces(text):
return SPACES_REGEX.sub(' ', text)
| apache-2.0 |
sergev/mraa | examples/python/rgblcd.py | 43 | 1470 | #!/usr/bin/env python
# Author: Brendan Le Foll <[email protected]>
# Copyright (c) 2014 Intel Corporation.
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE
import mraa
# This example will change the LCD backlight on the Grove-LCD RGB backlight
# to a nice shade of purple
x = mraa.I2c(0)
x.address(0x62)
# initialise device
x.writeReg(0, 0)
x.writeReg(1, 0)
# sent RGB color data
x.writeReg(0x08, 0xAA)
x.writeReg(0x04, 255)
x.writeReg(0x02, 255)
| mit |
pypa/setuptools | setuptools/command/setopt.py | 6 | 5051 | from distutils.util import convert_path
from distutils import log
from distutils.errors import DistutilsOptionError
import distutils
import os
import configparser
from setuptools import Command
__all__ = ['config_file', 'edit_config', 'option_base', 'setopt']
def config_file(kind="local"):
"""Get the filename of the distutils, local, global, or per-user config
`kind` must be one of "local", "global", or "user"
"""
if kind == 'local':
return 'setup.cfg'
if kind == 'global':
return os.path.join(
os.path.dirname(distutils.__file__), 'distutils.cfg'
)
if kind == 'user':
dot = os.name == 'posix' and '.' or ''
return os.path.expanduser(convert_path("~/%spydistutils.cfg" % dot))
raise ValueError(
"config_file() type must be 'local', 'global', or 'user'", kind
)
def edit_config(filename, settings, dry_run=False):
"""Edit a configuration file to include `settings`
`settings` is a dictionary of dictionaries or ``None`` values, keyed by
command/section name. A ``None`` value means to delete the entire section,
while a dictionary lists settings to be changed or deleted in that section.
A setting of ``None`` means to delete that setting.
"""
log.debug("Reading configuration from %s", filename)
opts = configparser.RawConfigParser()
opts.read([filename])
for section, options in settings.items():
if options is None:
log.info("Deleting section [%s] from %s", section, filename)
opts.remove_section(section)
else:
if not opts.has_section(section):
log.debug("Adding new section [%s] to %s", section, filename)
opts.add_section(section)
for option, value in options.items():
if value is None:
log.debug(
"Deleting %s.%s from %s",
section, option, filename
)
opts.remove_option(section, option)
if not opts.options(section):
log.info("Deleting empty [%s] section from %s",
section, filename)
opts.remove_section(section)
else:
log.debug(
"Setting %s.%s to %r in %s",
section, option, value, filename
)
opts.set(section, option, value)
log.info("Writing %s", filename)
if not dry_run:
with open(filename, 'w') as f:
opts.write(f)
class option_base(Command):
"""Abstract base class for commands that mess with config files"""
user_options = [
('global-config', 'g',
"save options to the site-wide distutils.cfg file"),
('user-config', 'u',
"save options to the current user's pydistutils.cfg file"),
('filename=', 'f',
"configuration file to use (default=setup.cfg)"),
]
boolean_options = [
'global-config', 'user-config',
]
def initialize_options(self):
self.global_config = None
self.user_config = None
self.filename = None
def finalize_options(self):
filenames = []
if self.global_config:
filenames.append(config_file('global'))
if self.user_config:
filenames.append(config_file('user'))
if self.filename is not None:
filenames.append(self.filename)
if not filenames:
filenames.append(config_file('local'))
if len(filenames) > 1:
raise DistutilsOptionError(
"Must specify only one configuration file option",
filenames
)
self.filename, = filenames
class setopt(option_base):
"""Save command-line options to a file"""
description = "set an option in setup.cfg or another config file"
user_options = [
('command=', 'c', 'command to set an option for'),
('option=', 'o', 'option to set'),
('set-value=', 's', 'value of the option'),
('remove', 'r', 'remove (unset) the value'),
] + option_base.user_options
boolean_options = option_base.boolean_options + ['remove']
def initialize_options(self):
option_base.initialize_options(self)
self.command = None
self.option = None
self.set_value = None
self.remove = None
def finalize_options(self):
option_base.finalize_options(self)
if self.command is None or self.option is None:
raise DistutilsOptionError("Must specify --command *and* --option")
if self.set_value is None and not self.remove:
raise DistutilsOptionError("Must specify --set-value or --remove")
def run(self):
edit_config(
self.filename, {
self.command: {self.option.replace('-', '_'): self.set_value}
},
self.dry_run
)
| mit |
gonesurfing/Quisk_rpi_remote | hiqsdr/quisk_hardware.py | 1 | 13927 | # This is a sample hardware file for UDP control. Use this file for my 2010 transceiver
# described in QEX and for the improved version HiQSDR. To turn on the extended
# features in HiQSDR, update your FPGA firmware to version 1.1 or later and use use_rx_udp = 2.
from __future__ import print_function
import struct, socket, math, traceback
import _quisk as QS
from quisk_hardware_model import Hardware as BaseHardware
DEBUG = 0
class Hardware(BaseHardware):
def __init__(self, app, conf):
BaseHardware.__init__(self, app, conf)
self.use_sidetone = 1
self.got_udp_status = '' # status from UDP receiver
# want_udp_status is a 14-byte string with numbers in little-endian order:
# [0:2] 'St'
# [2:6] Rx tune phase
# [6:10] Tx tune phase
# [10] Tx output level 0 to 255
# [11] Tx control bits:
# 0x01 Enable CW transmit
# 0x02 Enable all other transmit
# 0x04 Use the HiQSDR extended IO pins not present in the 2010 QEX ver 1.0
# 0x08 The key is down (software key)
# [12] Rx control bits
# Second stage decimation less one, 1-39, six bits
# [13] zero or firmware version number
# The above is used for firmware version 1.0.
# Version 1.1 adds eight more bytes for the HiQSDR conntrol ports:
# [14] X1 connector: Preselect pins 69, 68, 65, 64; Preamp pin 63, Tx LED pin 57
# [15] Attenuator pins 84, 83, 82, 81, 80
# [16] More bits: AntSwitch pin 41 is 0x01
# [17:22] The remaining five bytes are sent as zero.
# Version 1.2 uses the same format as 1.1, but adds the "Qs" command (see below).
# Version 1.3 adds features needed by the new quisk_vna.py program:
# [17] This one byte must be zero
# [18:20] This is vna_count, the number of VNA data points; or zero for normal operation
# [20:22] These two bytes must be zero
# The "Qs" command is a two-byte UDP packet sent to the control port. It returns the hardware status
# as the above string, except that the string starts with "Qs" instead of "St". Do not send the "Qs" command
# from Quisk, as it interferes with the "St" command. The "Qs" command is meant to be used from an
# external program, such as HamLib or a logging program.
# When vna_count != 0, we are in VNA mode. The start frequency is rx_phase, and for each point tx_phase is added
# to advance the frequency. A zero sample is added to mark the blocks. The samples are I and Q averaged at DC.
self.rx_phase = 0
self.tx_phase = 0
self.tx_level = 0
self.tx_control = 0
self.rx_control = 0
self.vna_count = 0 # VNA scan count; MUST be zero for non-VNA operation
self.index = 0
self.mode = None
self.band = None
self.rf_gain = 0
self.HiQSDR_Connector_X1 = 0
self.HiQSDR_Attenuator = 0
self.HiQSDR_Bits = 0
if conf.use_rx_udp == 2: # Set to 2 for the HiQSDR
self.rf_gain_labels = ('RF 0 dB', 'RF +10', 'RF -10', 'RF -20', 'RF -30')
self.antenna_labels = ('Ant 1', 'Ant 2')
self.firmware_version = None # firmware version is initially unknown
self.rx_udp_socket = None
self.vfo_frequency = 0 # current vfo frequency
self.tx_frequency = 0
self.decimations = [] # supported decimation rates
for dec in (40, 20, 10, 8, 5, 4, 2):
self.decimations.append(dec * 64)
if self.conf.fft_size_multiplier == 0:
self.conf.fft_size_multiplier = 6 # Set size needed by VarDecim
def open(self):
# Create the proper broadcast address for rx_udp_ip.
nm = self.conf.rx_udp_ip_netmask.split('.')
ip = self.conf.rx_udp_ip.split('.')
nm = map(int, nm)
ip = map(int, ip)
bc = ''
for i in range(4):
x = (ip[i] | ~ nm[i]) & 0xFF
bc = bc + str(x) + '.'
self.broadcast_addr = bc[:-1]
# This socket is used for the Simple Network Discovery Protocol by AE4JY
self.socket_sndp = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
self.socket_sndp.setblocking(0)
self.socket_sndp.setsockopt(socket.SOL_SOCKET, socket.SO_BROADCAST, 1)
self.sndp_request = chr(56) + chr(0) + chr(0x5A) + chr(0xA5) + chr(0) * 52
self.sndp_active = self.conf.sndp_active
# conf.rx_udp_port is used for returning ADC samples
# conf.rx_udp_port + 1 is used for control
self.rx_udp_socket = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
self.rx_udp_socket.setblocking(0)
self.rx_udp_socket.connect((self.conf.rx_udp_ip, self.conf.rx_udp_port + 1))
return QS.open_rx_udp(self.conf.rx_udp_ip, self.conf.rx_udp_port)
def close(self):
if self.rx_udp_socket:
self.rx_udp_socket.close()
self.rx_udp_socket = None
def ReturnFrequency(self): # Return the current tuning and VFO frequency
return None, None # frequencies have not changed
def ReturnVfoFloat(self): # Return the accurate VFO as a float
return float(self.rx_phase) * self.conf.rx_udp_clock / 2.0**32
def ChangeFrequency(self, tx_freq, vfo_freq, source='', band='', event=None):
if vfo_freq != self.vfo_frequency:
self.vfo_frequency = vfo_freq
self.rx_phase = int(float(vfo_freq) / self.conf.rx_udp_clock * 2.0**32 + 0.5) & 0xFFFFFFFF
if tx_freq and tx_freq > 0:
self.tx_frequency = tx_freq
tx = tx_freq
self.tx_phase = int(float(tx) / self.conf.rx_udp_clock * 2.0**32 + 0.5) & 0xFFFFFFFF
self.NewUdpStatus()
return tx_freq, vfo_freq
def ChangeMode(self, mode):
# mode is a string: "USB", "AM", etc.
self.mode = mode
self.tx_control &= ~0x03 # Erase last two bits
if self.vna_count:
pass
elif mode in ("CWL", "CWU"):
self.tx_control |= 0x01
elif mode in ("USB", "LSB", "AM", "FM"):
self.tx_control |= 0x02
elif mode[0:4] == 'DGT-':
self.tx_control |= 0x02
elif mode[0:3] == 'IMD':
self.tx_control |= 0x02
self.SetTxLevel()
def ChangeBand(self, band):
# band is a string: "60", "40", "WWV", etc.
self.band = band
self.HiQSDR_Connector_X1 &= ~0x0F # Mask in the last four bits
self.HiQSDR_Connector_X1 |= self.conf.HiQSDR_BandDict.get(band, 0) & 0x0F
self.SetTxLevel()
def SetTxLevel(self):
# As tx_level varies from 50 to 200, the output level changes from 263 to 752 mV
# So 0 to 255 is 100 to 931, or 1.0 to 9.31; v = 1.0 + 0.0326 * level
if not self.vna_count:
try:
self.tx_level = self.conf.tx_level[self.band]
except KeyError:
self.tx_level = self.conf.tx_level[None] # The default
if self.mode[0:4] == 'DGT-':
reduc = self.application.digital_tx_level
else:
reduc = self.application.tx_level
if reduc < 100: # reduce power by a percentage
level = 1.0 + self.tx_level * 0.0326
level *= math.sqrt(reduc / 100.0) # Convert from a power to an amplitude
self.tx_level = int((level - 1.0) / 0.0326 + 0.5)
if self.tx_level < 0:
self.tx_level = 0
self.NewUdpStatus()
def OnButtonRfGain(self, event):
# The HiQSDR attenuator is five bits: 2, 4, 8, 10, 20 dB
btn = event.GetEventObject()
n = btn.index
self.HiQSDR_Connector_X1 &= ~0x10 # Mask in the preamp bit
if n == 0: # 0dB
self.HiQSDR_Attenuator = 0
self.rf_gain = 0
elif n == 1: # +10
self.HiQSDR_Attenuator = 0
self.HiQSDR_Connector_X1 |= 0x10
self.rf_gain = 10
elif n == 2: # -10
self.HiQSDR_Attenuator = 0x08
self.rf_gain = -10
elif n == 3: # -20
self.HiQSDR_Attenuator = 0x10
self.rf_gain = -20
elif n == 4: # -30
self.HiQSDR_Attenuator = 0x18
self.rf_gain = -30
else:
self.HiQSDR_Attenuator = 0
self.rf_gain = 0
print ('Unknown RfGain')
self.NewUdpStatus()
def OnButtonPTT(self, event):
# This feature requires firmware version 1.1 or higher
if self.firmware_version:
btn = event.GetEventObject()
if btn.GetValue(): # Turn the software key bit on or off
self.tx_control |= 0x08
else:
self.tx_control &= ~0x08
self.NewUdpStatus(True) # Prompt update for PTT
def OnButtonAntenna(self, event):
# This feature requires extended IO
btn = event.GetEventObject()
if btn.index:
self.HiQSDR_Bits |= 0x01
else:
self.HiQSDR_Bits &= ~0x01
self.NewUdpStatus()
def HeartBeat(self):
if self.sndp_active: # AE4JY Simple Network Discovery Protocol - attempt to set the FPGA IP address
try:
self.socket_sndp.sendto(self.sndp_request, (self.broadcast_addr, 48321))
data = self.socket_sndp.recv(1024)
# print(repr(data))
except:
# traceback.print_exc()
pass
else:
if len(data) == 56 and data[5:14] == 'HiQSDR-v1':
ip = self.conf.rx_udp_ip.split('.')
t = (data[0:4] + chr(2) + data[5:37] + chr(int(ip[3])) + chr(int(ip[2])) + chr(int(ip[1])) + chr(int(ip[0]))
+ chr(0) * 12 + chr(self.conf.rx_udp_port & 0xFF) + chr(self.conf.rx_udp_port >> 8) + chr(0))
# print(repr(t))
self.socket_sndp.sendto(t, (self.broadcast_addr, 48321))
try: # receive the old status if any
data = self.rx_udp_socket.recv(1024)
if DEBUG:
self.PrintStatus(' got ', data)
except:
pass
else:
if data[0:2] == 'St':
self.got_udp_status = data
if self.firmware_version is None: # get the firmware version
if self.want_udp_status[0:13] != self.got_udp_status[0:13]:
try:
self.rx_udp_socket.send(self.want_udp_status)
if DEBUG:
self.PrintStatus('Start', self.want_udp_status)
except:
pass
else: # We got a correct response.
self.firmware_version = ord(self.got_udp_status[13]) # Firmware version is returned here
if DEBUG:
print ('Got version', self.firmware_version)
if self.firmware_version > 0 and self.conf.use_rx_udp == 2:
self.tx_control |= 0x04 # Use extra control bytes
self.sndp_active = False
self.NewUdpStatus()
else:
if self.want_udp_status != self.got_udp_status:
if DEBUG:
self.PrintStatus('Have ', self.got_udp_status)
self.PrintStatus(' send', self.want_udp_status)
try:
self.rx_udp_socket.send(self.want_udp_status)
except:
pass
elif DEBUG:
self.rx_udp_socket.send('Qs')
def PrintStatus(self, msg, string):
print (msg, ' ', end=' ')
print (string[0:2], end=' ')
for c in string[2:]:
print ("%2X" % ord(c), end=' ')
print ()
def GetFirmwareVersion(self):
return self.firmware_version
def OnSpot(self, level):
pass
def OnBtnFDX(self, is_fdx): # Status of FDX button, 0 or 1
if is_fdx:
self.HiQSDR_Connector_X1 |= 0x20 # Mask in the FDX bit
else:
self.HiQSDR_Connector_X1 &= ~0x20
self.NewUdpStatus()
def VarDecimGetChoices(self): # return text labels for the control
clock = self.conf.rx_udp_clock
l = [] # a list of sample rates
for dec in self.decimations:
l.append(str(int(float(clock) / dec / 1e3 + 0.5)))
return l
def VarDecimGetLabel(self): # return a text label for the control
return "Sample rate ksps"
def VarDecimGetIndex(self): # return the current index
return self.index
def VarDecimSet(self, index=None): # set decimation, return sample rate
if index is None: # initial call to set decimation before the call to open()
rate = self.application.vardecim_set # May be None or from different hardware
try:
dec = int(float(self.conf.rx_udp_clock // rate + 0.5))
self.index = self.decimations.index(dec)
except:
try:
self.index = self.decimations.index(self.conf.rx_udp_decimation)
except:
self.index = 0
else:
self.index = index
dec = self.decimations[self.index]
self.rx_control = dec // 64 - 1 # Second stage decimation less one
self.NewUdpStatus()
return int(float(self.conf.rx_udp_clock) / dec + 0.5)
def VarDecimRange(self):
return (48000, 960000)
def NewUdpStatus(self, do_tx=False):
s = "St"
s = s + struct.pack("<L", self.rx_phase)
s = s + struct.pack("<L", self.tx_phase)
s = s + chr(self.tx_level) + chr(self.tx_control)
s = s + chr(self.rx_control)
if self.firmware_version: # Add the version
s = s + chr(self.firmware_version) # The firmware version will be returned
if self.tx_control & 0x04: # Use extra HiQSDR control bytes
s = s + chr(self.HiQSDR_Connector_X1)
s = s + chr(self.HiQSDR_Attenuator)
s = s + chr(self.HiQSDR_Bits)
s = s + chr(0)
else:
s = s + chr(0) * 4
s = s + struct.pack("<H", self.vna_count)
s = s + chr(0) * 2
else: # firmware version 0 or None
s = s + chr(0) # assume version 0
self.want_udp_status = s
if do_tx:
try:
self.rx_udp_socket.send(s)
except:
pass
def SetVNA(self, key_down=None, vna_start=None, vna_stop=None, vna_count=None, do_tx=False):
if key_down is None:
pass
elif key_down:
self.tx_control |= 0x08
else:
self.tx_control &= ~0x08
if vna_count is not None:
self.vna_count = vna_count # Number of scan points
if vna_start is not None: # Set the start and stop frequencies. The tx_phase is the frequency delta.
self.rx_phase = int(float(vna_start) / self.conf.rx_udp_clock * 2.0**32 + 0.5) & 0xFFFFFFFF
self.tx_phase = int(float(vna_stop - vna_start) / self.vna_count / self.conf.rx_udp_clock * 2.0**32 + 0.5) & 0xFFFFFFFF
self.tx_control &= ~0x03 # Erase last two bits
self.rx_control = 40 - 1
self.tx_level = 255
self.NewUdpStatus(do_tx)
start = int(float(self.rx_phase) * self.conf.rx_udp_clock / 2.0**32 + 0.5)
stop = int(start + float(self.tx_phase) * self.vna_count * self.conf.rx_udp_clock / 2.0**32 + 0.5)
return start, stop # return the start and stop frequencies after integer rounding
| gpl-2.0 |
jeezybrick/django | django/conf/locale/lt/formats.py | 504 | 1830 | # -*- encoding: utf-8 -*-
# This file is distributed under the same license as the Django package.
#
from __future__ import unicode_literals
# The *_FORMAT strings use the Django date format syntax,
# see http://docs.djangoproject.com/en/dev/ref/templates/builtins/#date
DATE_FORMAT = r'Y \m. E j \d.'
TIME_FORMAT = 'H:i'
DATETIME_FORMAT = r'Y \m. E j \d., H:i'
YEAR_MONTH_FORMAT = r'Y \m. F'
MONTH_DAY_FORMAT = r'E j \d.'
SHORT_DATE_FORMAT = 'Y-m-d'
SHORT_DATETIME_FORMAT = 'Y-m-d H:i'
FIRST_DAY_OF_WEEK = 1 # Monday
# The *_INPUT_FORMATS strings use the Python strftime format syntax,
# see http://docs.python.org/library/datetime.html#strftime-strptime-behavior
DATE_INPUT_FORMATS = [
'%Y-%m-%d', '%d.%m.%Y', '%d.%m.%y', # '2006-10-25', '25.10.2006', '25.10.06'
]
TIME_INPUT_FORMATS = [
'%H:%M:%S', # '14:30:59'
'%H:%M:%S.%f', # '14:30:59.000200'
'%H:%M', # '14:30'
'%H.%M.%S', # '14.30.59'
'%H.%M.%S.%f', # '14.30.59.000200'
'%H.%M', # '14.30'
]
DATETIME_INPUT_FORMATS = [
'%Y-%m-%d %H:%M:%S', # '2006-10-25 14:30:59'
'%Y-%m-%d %H:%M:%S.%f', # '2006-10-25 14:30:59.000200'
'%Y-%m-%d %H:%M', # '2006-10-25 14:30'
'%d.%m.%Y %H:%M:%S', # '25.10.2006 14:30:59'
'%d.%m.%Y %H:%M:%S.%f', # '25.10.2006 14:30:59.000200'
'%d.%m.%Y %H:%M', # '25.10.2006 14:30'
'%d.%m.%Y', # '25.10.2006'
'%d.%m.%y %H:%M:%S', # '25.10.06 14:30:59'
'%d.%m.%y %H:%M:%S.%f', # '25.10.06 14:30:59.000200'
'%d.%m.%y %H:%M', # '25.10.06 14:30'
'%d.%m.%y %H.%M.%S', # '25.10.06 14.30.59'
'%d.%m.%y %H.%M.%S.%f', # '25.10.06 14.30.59.000200'
'%d.%m.%y %H.%M', # '25.10.06 14.30'
'%d.%m.%y', # '25.10.06'
]
DECIMAL_SEPARATOR = ','
THOUSAND_SEPARATOR = '.'
NUMBER_GROUPING = 3
| bsd-3-clause |
dmoliveira/networkx | networkx/generators/community.py | 30 | 11963 | """Generators for classes of graphs used in studying social networks."""
import itertools
import math
import random
import networkx as nx
# Copyright(C) 2011 by
# Ben Edwards <[email protected]>
# Aric Hagberg <[email protected]>
# All rights reserved.
# BSD license.
__author__ = """\n""".join(['Ben Edwards ([email protected])',
'Aric Hagberg ([email protected])'])
__all__ = ['caveman_graph', 'connected_caveman_graph',
'relaxed_caveman_graph', 'random_partition_graph',
'planted_partition_graph', 'gaussian_random_partition_graph']
def caveman_graph(l, k):
"""Returns a caveman graph of ``l`` cliques of size ``k``.
Parameters
----------
l : int
Number of cliques
k : int
Size of cliques
Returns
-------
G : NetworkX Graph
caveman graph
Notes
-----
This returns an undirected graph, it can be converted to a directed
graph using :func:`nx.to_directed`, or a multigraph using
``nx.MultiGraph(nx.caveman_graph(l, k))``. Only the undirected version is
described in [1]_ and it is unclear which of the directed
generalizations is most useful.
Examples
--------
>>> G = nx.caveman_graph(3, 3)
See also
--------
connected_caveman_graph
References
----------
.. [1] Watts, D. J. 'Networks, Dynamics, and the Small-World Phenomenon.'
Amer. J. Soc. 105, 493-527, 1999.
"""
# l disjoint cliques of size k
G = nx.empty_graph(l*k)
G.name = "caveman_graph(%s,%s)" % (l*k, k)
if k > 1:
for start in range(0, l*k, k):
edges = itertools.combinations(range(start, start+k), 2)
G.add_edges_from(edges)
return G
def connected_caveman_graph(l, k):
"""Returns a connected caveman graph of ``l`` cliques of size ``k``.
The connected caveman graph is formed by creating ``n`` cliques of size
``k``, then a single edge in each clique is rewired to a node in an
adjacent clique.
Parameters
----------
l : int
number of cliques
k : int
size of cliques
Returns
-------
G : NetworkX Graph
connected caveman graph
Notes
-----
This returns an undirected graph, it can be converted to a directed
graph using :func:`nx.to_directed`, or a multigraph using
``nx.MultiGraph(nx.caveman_graph(l, k))``. Only the undirected version is
described in [1]_ and it is unclear which of the directed
generalizations is most useful.
Examples
--------
>>> G = nx.connected_caveman_graph(3, 3)
References
----------
.. [1] Watts, D. J. 'Networks, Dynamics, and the Small-World Phenomenon.'
Amer. J. Soc. 105, 493-527, 1999.
"""
G = nx.caveman_graph(l, k)
G.name = "connected_caveman_graph(%s,%s)" % (l, k)
for start in range(0, l*k, k):
G.remove_edge(start, start+1)
G.add_edge(start, (start-1) % (l*k))
return G
def relaxed_caveman_graph(l, k, p, seed=None):
"""Return a relaxed caveman graph.
A relaxed caveman graph starts with ``l`` cliques of size ``k``. Edges are
then randomly rewired with probability ``p`` to link different cliques.
Parameters
----------
l : int
Number of groups
k : int
Size of cliques
p : float
Probabilty of rewiring each edge.
seed : int,optional
Seed for random number generator(default=None)
Returns
-------
G : NetworkX Graph
Relaxed Caveman Graph
Raises
------
NetworkXError:
If p is not in [0,1]
Examples
--------
>>> G = nx.relaxed_caveman_graph(2, 3, 0.1, seed=42)
References
----------
.. [1] Santo Fortunato, Community Detection in Graphs,
Physics Reports Volume 486, Issues 3-5, February 2010, Pages 75-174.
http://arxiv.org/abs/0906.0612
"""
if not seed is None:
random.seed(seed)
G = nx.caveman_graph(l, k)
nodes = G.nodes()
G.name = "relaxed_caveman_graph (%s,%s,%s)" % (l, k, p)
for (u, v) in G.edges():
if random.random() < p: # rewire the edge
x = random.choice(nodes)
if G.has_edge(u, x):
continue
G.remove_edge(u, v)
G.add_edge(u, x)
return G
def random_partition_graph(sizes, p_in, p_out, seed=None, directed=False):
"""Return the random partition graph with a partition of sizes.
A partition graph is a graph of communities with sizes defined by
s in sizes. Nodes in the same group are connected with probability
p_in and nodes of different groups are connected with probability
p_out.
Parameters
----------
sizes : list of ints
Sizes of groups
p_in : float
probability of edges with in groups
p_out : float
probability of edges between groups
directed : boolean optional, default=False
Whether to create a directed graph
seed : int optional, default None
A seed for the random number generator
Returns
-------
G : NetworkX Graph or DiGraph
random partition graph of size sum(gs)
Raises
------
NetworkXError
If p_in or p_out is not in [0,1]
Examples
--------
>>> G = nx.random_partition_graph([10,10,10],.25,.01)
>>> len(G)
30
>>> partition = G.graph['partition']
>>> len(partition)
3
Notes
-----
This is a generalization of the planted-l-partition described in
[1]_. It allows for the creation of groups of any size.
The partition is store as a graph attribute 'partition'.
References
----------
.. [1] Santo Fortunato 'Community Detection in Graphs' Physical Reports
Volume 486, Issue 3-5 p. 75-174. http://arxiv.org/abs/0906.0612
http://arxiv.org/abs/0906.0612
"""
# Use geometric method for O(n+m) complexity algorithm
# partition=nx.community_sets(nx.get_node_attributes(G,'affiliation'))
if not seed is None:
random.seed(seed)
if not 0.0 <= p_in <= 1.0:
raise nx.NetworkXError("p_in must be in [0,1]")
if not 0.0 <= p_out <= 1.0:
raise nx.NetworkXError("p_out must be in [0,1]")
if directed:
G = nx.DiGraph()
else:
G = nx.Graph()
G.graph['partition'] = []
n = sum(sizes)
G.add_nodes_from(range(n))
# start with len(sizes) groups of gnp random graphs with parameter p_in
# graphs are unioned together with node labels starting at
# 0, sizes[0], sizes[0]+sizes[1], ...
next_group = {} # maps node key (int) to first node in next group
start = 0
group = 0
for n in sizes:
edges = ((u+start, v+start)
for u, v in
nx.fast_gnp_random_graph(n, p_in, directed=directed).edges())
G.add_edges_from(edges)
next_group.update(dict.fromkeys(range(start, start+n), start+n))
G.graph['partition'].append(set(range(start, start+n)))
group += 1
start += n
# handle edge cases
if p_out == 0:
return G
if p_out == 1:
for n in next_group:
targets = range(next_group[n], len(G))
G.add_edges_from(zip([n]*len(targets), targets))
if directed:
G.add_edges_from(zip(targets, [n]*len(targets)))
return G
# connect each node in group randomly with the nodes not in group
# use geometric method like fast_gnp_random_graph()
lp = math.log(1.0 - p_out)
n = len(G)
if directed:
for u in range(n):
v = 0
while v < n:
lr = math.log(1.0 - random.random())
v += int(lr/lp)
# skip over nodes in the same group as v, including self loops
if next_group.get(v, n) == next_group[u]:
v = next_group[u]
if v < n:
G.add_edge(u, v)
v += 1
else:
for u in range(n-1):
v = next_group[u] # start with next node not in this group
while v < n:
lr = math.log(1.0 - random.random())
v += int(lr/lp)
if v < n:
G.add_edge(u, v)
v += 1
return G
def planted_partition_graph(l, k, p_in, p_out, seed=None, directed=False):
"""Return the planted l-partition graph.
This model partitions a graph with n=l*k vertices in
l groups with k vertices each. Vertices of the same
group are linked with a probability p_in, and vertices
of different groups are linked with probability p_out.
Parameters
----------
l : int
Number of groups
k : int
Number of vertices in each group
p_in : float
probability of connecting vertices within a group
p_out : float
probability of connected vertices between groups
seed : int,optional
Seed for random number generator(default=None)
directed : bool,optional (default=False)
If True return a directed graph
Returns
-------
G : NetworkX Graph or DiGraph
planted l-partition graph
Raises
------
NetworkXError:
If p_in,p_out are not in [0,1] or
Examples
--------
>>> G = nx.planted_partition_graph(4, 3, 0.5, 0.1,seed=42)
See Also
--------
random_partition_model
References
----------
.. [1] A. Condon, R.M. Karp, Algorithms for graph partitioning
on the planted partition model,
Random Struct. Algor. 18 (2001) 116-140.
.. [2] Santo Fortunato 'Community Detection in Graphs' Physical Reports
Volume 486, Issue 3-5 p. 75-174. http://arxiv.org/abs/0906.0612
"""
return random_partition_graph([k]*l, p_in, p_out, seed, directed)
def gaussian_random_partition_graph(n, s, v, p_in, p_out, directed=False,
seed=None):
"""Generate a Gaussian random partition graph.
A Gaussian random partition graph is created by creating k partitions
each with a size drawn from a normal distribution with mean s and variance
s/v. Nodes are connected within clusters with probability p_in and
between clusters with probability p_out[1]
Parameters
----------
n : int
Number of nodes in the graph
s : float
Mean cluster size
v : float
Shape parameter. The variance of cluster size distribution is s/v.
p_in : float
Probabilty of intra cluster connection.
p_out : float
Probability of inter cluster connection.
directed : boolean, optional default=False
Whether to create a directed graph or not
seed : int
Seed value for random number generator
Returns
-------
G : NetworkX Graph or DiGraph
gaussian random partition graph
Raises
------
NetworkXError
If s is > n
If p_in or p_out is not in [0,1]
Notes
-----
Note the number of partitions is dependent on s,v and n, and that the
last partition may be considerably smaller, as it is sized to simply
fill out the nodes [1]
See Also
--------
random_partition_graph
Examples
--------
>>> G = nx.gaussian_random_partition_graph(100,10,10,.25,.1)
>>> len(G)
100
References
----------
.. [1] Ulrik Brandes, Marco Gaertler, Dorothea Wagner,
Experiments on Graph Clustering Algorithms,
In the proceedings of the 11th Europ. Symp. Algorithms, 2003.
"""
if s > n:
raise nx.NetworkXError("s must be <= n")
assigned = 0
sizes = []
while True:
size = int(random.normalvariate(s, float(s) / v + 0.5))
if size < 1: # how to handle 0 or negative sizes?
continue
if assigned + size >= n:
sizes.append(n-assigned)
break
assigned += size
sizes.append(size)
return random_partition_graph(sizes, p_in, p_out, directed, seed)
| bsd-3-clause |
hjarmstrong/Odme-plusplus | 3rd/build/tools/build/v2/test/TestCmd.py | 44 | 20065 | """
TestCmd.py: a testing framework for commands and scripts.
The TestCmd module provides a framework for portable automated testing of
executable commands and scripts (in any language, not just Python), especially
commands and scripts that require file system interaction.
In addition to running tests and evaluating conditions, the TestCmd module
manages and cleans up one or more temporary workspace directories, and provides
methods for creating files and directories in those workspace directories from
in-line data, here-documents), allowing tests to be completely self-contained.
A TestCmd environment object is created via the usual invocation:
test = TestCmd()
The TestCmd module provides pass_test(), fail_test(), and no_result() unbound
methods that report test results for use with the Aegis change management
system. These methods terminate the test immediately, reporting PASSED, FAILED
or NO RESULT respectively and exiting with status 0 (success), 1 or 2
respectively. This allows for a distinction between an actual failed test and a
test that could not be properly evaluated because of an external condition (such
as a full file system or incorrect permissions).
"""
# Copyright 2000 Steven Knight
# This module is free software, and you may redistribute it and/or modify
# it under the same terms as Python itself, so long as this copyright message
# and disclaimer are retained in their original form.
#
# IN NO EVENT SHALL THE AUTHOR BE LIABLE TO ANY PARTY FOR DIRECT, INDIRECT,
# SPECIAL, INCIDENTAL, OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE USE OF
# THIS CODE, EVEN IF THE AUTHOR HAS BEEN ADVISED OF THE POSSIBILITY OF SUCH
# DAMAGE.
#
# THE AUTHOR SPECIFICALLY DISCLAIMS ANY WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
# PARTICULAR PURPOSE. THE CODE PROVIDED HEREUNDER IS ON AN "AS IS" BASIS,
# AND THERE IS NO OBLIGATION WHATSOEVER TO PROVIDE MAINTENANCE,
# SUPPORT, UPDATES, ENHANCEMENTS, OR MODIFICATIONS.
# Copyright 2002-2003 Vladimir Prus.
# Copyright 2002-2003 Dave Abrahams.
# Copyright 2006 Rene Rivera.
# Distributed under the Boost Software License, Version 1.0.
# (See accompanying file LICENSE_1_0.txt or copy at
# http://www.boost.org/LICENSE_1_0.txt)
from string import join, split
__author__ = "Steven Knight <[email protected]>"
__revision__ = "TestCmd.py 0.D002 2001/08/31 14:56:12 software"
__version__ = "0.02"
from types import *
import os
import os.path
import re
import shutil
import stat
import subprocess
import sys
import tempfile
import traceback
tempfile.template = 'testcmd.'
_Cleanup = []
def _clean():
global _Cleanup
list = _Cleanup[:]
_Cleanup = []
list.reverse()
for test in list:
test.cleanup()
sys.exitfunc = _clean
def caller(tblist, skip):
string = ""
arr = []
for file, line, name, text in tblist:
if file[-10:] == "TestCmd.py":
break
arr = [(file, line, name, text)] + arr
atfrom = "at"
for file, line, name, text in arr[skip:]:
if name == "?":
name = ""
else:
name = " (" + name + ")"
string = string + ("%s line %d of %s%s\n" % (atfrom, line, file, name))
atfrom = "\tfrom"
return string
def fail_test(self=None, condition=True, function=None, skip=0):
"""Cause the test to fail.
By default, the fail_test() method reports that the test FAILED and exits
with a status of 1. If a condition argument is supplied, the test fails
only if the condition is true.
"""
if not condition:
return
if not function is None:
function()
of = ""
desc = ""
sep = " "
if not self is None:
if self.program:
of = " of " + join(self.program, " ")
sep = "\n\t"
if self.description:
desc = " [" + self.description + "]"
sep = "\n\t"
at = caller(traceback.extract_stack(), skip)
sys.stderr.write("FAILED test" + of + desc + sep + at + """
in directory: """ + os.getcwd() )
sys.exit(1)
def no_result(self=None, condition=True, function=None, skip=0):
"""Causes a test to exit with no valid result.
By default, the no_result() method reports NO RESULT for the test and
exits with a status of 2. If a condition argument is supplied, the test
fails only if the condition is true.
"""
if not condition:
return
if not function is None:
function()
of = ""
desc = ""
sep = " "
if not self is None:
if self.program:
of = " of " + self.program
sep = "\n\t"
if self.description:
desc = " [" + self.description + "]"
sep = "\n\t"
at = caller(traceback.extract_stack(), skip)
sys.stderr.write("NO RESULT for test" + of + desc + sep + at)
sys.exit(2)
def pass_test(self=None, condition=True, function=None):
"""Causes a test to pass.
By default, the pass_test() method reports PASSED for the test and exits
with a status of 0. If a condition argument is supplied, the test passes
only if the condition is true.
"""
if not condition:
return
if not function is None:
function()
sys.stderr.write("PASSED\n")
sys.exit(0)
def match_exact(lines=None, matches=None):
"""
Returns whether the given lists or strings containing lines separated
using newline characters contain exactly the same data.
"""
if not type(lines) is ListType:
lines = split(lines, "\n")
if not type(matches) is ListType:
matches = split(matches, "\n")
if len(lines) != len(matches):
return
for i in range(len(lines)):
if lines[i] != matches[i]:
return
return 1
def match_re(lines=None, res=None):
"""
Given lists or strings contain lines separated using newline characters.
This function matches those lines one by one, interpreting the lines in the
res parameter as regular expressions.
"""
if not type(lines) is ListType:
lines = split(lines, "\n")
if not type(res) is ListType:
res = split(res, "\n")
if len(lines) != len(res):
return
for i in range(len(lines)):
if not re.compile("^" + res[i] + "$").search(lines[i]):
return
return 1
class TestCmd:
def __init__(self, description=None, program=None, workdir=None,
subdir=None, verbose=False, match=None, inpath=None):
self._cwd = os.getcwd()
self.description_set(description)
self.program_set(program, inpath)
self.verbose_set(verbose)
if match is None:
self.match_func = match_re
else:
self.match_func = match
self._dirlist = []
self._preserve = {'pass_test': 0, 'fail_test': 0, 'no_result': 0}
env = os.environ.get('PRESERVE')
if env:
self._preserve['pass_test'] = env
self._preserve['fail_test'] = env
self._preserve['no_result'] = env
else:
env = os.environ.get('PRESERVE_PASS')
if env is not None:
self._preserve['pass_test'] = env
env = os.environ.get('PRESERVE_FAIL')
if env is not None:
self._preserve['fail_test'] = env
env = os.environ.get('PRESERVE_PASS')
if env is not None:
self._preserve['PRESERVE_NO_RESULT'] = env
self._stdout = []
self._stderr = []
self.status = None
self.condition = 'no_result'
self.workdir_set(workdir)
self.subdir(subdir)
def __del__(self):
self.cleanup()
def __repr__(self):
return "%x" % id(self)
def cleanup(self, condition=None):
"""
Removes any temporary working directories for the specified TestCmd
environment. If the environment variable PRESERVE was set when the
TestCmd environment was created, temporary working directories are not
removed. If any of the environment variables PRESERVE_PASS,
PRESERVE_FAIL or PRESERVE_NO_RESULT were set when the TestCmd
environment was created, then temporary working directories are not
removed if the test passed, failed or had no result, respectively.
Temporary working directories are also preserved for conditions
specified via the preserve method.
Typically, this method is not called directly, but is used when the
script exits to clean up temporary working directories as appropriate
for the exit status.
"""
if not self._dirlist:
return
if condition is None:
condition = self.condition
if self._preserve[condition]:
for dir in self._dirlist:
print("Preserved directory %s" % dir)
else:
list = self._dirlist[:]
list.reverse()
for dir in list:
self.writable(dir, 1)
shutil.rmtree(dir, ignore_errors=1)
self._dirlist = []
self.workdir = None
os.chdir(self._cwd)
try:
global _Cleanup
_Cleanup.remove(self)
except (AttributeError, ValueError):
pass
def description_set(self, description):
"""Set the description of the functionality being tested."""
self.description = description
def fail_test(self, condition=True, function=None, skip=0):
"""Cause the test to fail."""
if not condition:
return
self.condition = 'fail_test'
fail_test(self = self,
condition = condition,
function = function,
skip = skip)
def match(self, lines, matches):
"""Compare actual and expected file contents."""
return self.match_func(lines, matches)
def match_exact(self, lines, matches):
"""Compare actual and expected file content exactly."""
return match_exact(lines, matches)
def match_re(self, lines, res):
"""Compare file content with a regular expression."""
return match_re(lines, res)
def no_result(self, condition=True, function=None, skip=0):
"""Report that the test could not be run."""
if not condition:
return
self.condition = 'no_result'
no_result(self = self,
condition = condition,
function = function,
skip = skip)
def pass_test(self, condition=True, function=None):
"""Cause the test to pass."""
if not condition:
return
self.condition = 'pass_test'
pass_test(self, condition, function)
def preserve(self, *conditions):
"""
Arrange for the temporary working directories for the specified
TestCmd environment to be preserved for one or more conditions. If no
conditions are specified, arranges for the temporary working
directories to be preserved for all conditions.
"""
if conditions is ():
conditions = ('pass_test', 'fail_test', 'no_result')
for cond in conditions:
self._preserve[cond] = 1
def program_set(self, program, inpath):
"""Set the executable program or script to be tested."""
if not inpath and program and not os.path.isabs(program[0]):
program[0] = os.path.join(self._cwd, program[0])
self.program = program
def read(self, file, mode='rb'):
"""
Reads and returns the contents of the specified file name. The file
name may be a list, in which case the elements are concatenated with
the os.path.join() method. The file is assumed to be under the
temporary working directory unless it is an absolute path name. The I/O
mode for the file may be specified and must begin with an 'r'. The
default is 'rb' (binary read).
"""
if type(file) is ListType:
file = apply(os.path.join, tuple(file))
if not os.path.isabs(file):
file = os.path.join(self.workdir, file)
if mode[0] != 'r':
raise ValueError, "mode must begin with 'r'"
return open(file, mode).read()
def run(self, program=None, arguments=None, chdir=None, stdin=None,
universal_newlines=True):
"""
Runs a test of the program or script for the test environment.
Standard output and error output are saved for future retrieval via the
stdout() and stderr() methods.
'universal_newlines' parameter controls how the child process
input/output streams are opened as defined for the same named Python
subprocess.POpen constructor parameter.
"""
if chdir:
if not os.path.isabs(chdir):
chdir = os.path.join(self.workpath(chdir))
if self.verbose:
sys.stderr.write("chdir(" + chdir + ")\n")
else:
chdir = self.workdir
cmd = []
if program and program[0]:
if program[0] != self.program[0] and not os.path.isabs(program[0]):
program[0] = os.path.join(self._cwd, program[0])
cmd += program
else:
cmd += self.program
if arguments:
cmd += arguments.split(" ")
if self.verbose:
sys.stderr.write(join(cmd, " ") + "\n")
p = subprocess.Popen(cmd, stdin=subprocess.PIPE,
stdout=subprocess.PIPE, stderr=subprocess.PIPE, cwd=chdir,
universal_newlines=universal_newlines)
if stdin:
if type(stdin) is ListType:
for line in stdin:
p.tochild.write(line)
else:
p.tochild.write(stdin)
out, err = p.communicate()
self._stdout.append(out)
self._stderr.append(err)
self.status = p.returncode
if self.verbose:
sys.stdout.write(self._stdout[-1])
sys.stderr.write(self._stderr[-1])
def stderr(self, run=None):
"""
Returns the error output from the specified run number. If there is
no specified run number, then returns the error output of the last run.
If the run number is less than zero, then returns the error output from
that many runs back from the current run.
"""
if not run:
run = len(self._stderr)
elif run < 0:
run = len(self._stderr) + run
run -= 1
if run < 0:
return ''
return self._stderr[run]
def stdout(self, run=None):
"""
Returns the standard output from the specified run number. If there
is no specified run number, then returns the standard output of the
last run. If the run number is less than zero, then returns the
standard output from that many runs back from the current run.
"""
if not run:
run = len(self._stdout)
elif run < 0:
run = len(self._stdout) + run
run -= 1
if run < 0:
return ''
return self._stdout[run]
def subdir(self, *subdirs):
"""
Create new subdirectories under the temporary working directory, one
for each argument. An argument may be a list, in which case the list
elements are concatenated using the os.path.join() method.
Subdirectories multiple levels deep must be created using a separate
argument for each level:
test.subdir('sub', ['sub', 'dir'], ['sub', 'dir', 'ectory'])
Returns the number of subdirectories actually created.
"""
count = 0
for sub in subdirs:
if sub is None:
continue
if type(sub) is ListType:
sub = apply(os.path.join, tuple(sub))
new = os.path.join(self.workdir, sub)
try:
os.mkdir(new)
except:
pass
else:
count += 1
return count
def unlink(self, file):
"""
Unlinks the specified file name. The file name may be a list, in
which case the elements are concatenated using the os.path.join()
method. The file is assumed to be under the temporary working directory
unless it is an absolute path name.
"""
if type(file) is ListType:
file = apply(os.path.join, tuple(file))
if not os.path.isabs(file):
file = os.path.join(self.workdir, file)
os.unlink(file)
def verbose_set(self, verbose):
"""Set the verbose level."""
self.verbose = verbose
def workdir_set(self, path):
"""
Creates a temporary working directory with the specified path name.
If the path is a null string (''), a unique directory name is created.
"""
if os.path.isabs(path):
self.workdir = path
else:
if path != None:
if path == '':
path = tempfile.mktemp()
if path != None:
os.mkdir(path)
self._dirlist.append(path)
global _Cleanup
try:
_Cleanup.index(self)
except ValueError:
_Cleanup.append(self)
# We would like to set self.workdir like this:
# self.workdir = path
# But symlinks in the path will report things differently from
# os.getcwd(), so chdir there and back to fetch the canonical
# path.
cwd = os.getcwd()
os.chdir(path)
self.workdir = os.getcwd()
os.chdir(cwd)
else:
self.workdir = None
def workpath(self, *args):
"""
Returns the absolute path name to a subdirectory or file within the
current temporary working directory. Concatenates the temporary working
directory name with the specified arguments using os.path.join().
"""
return apply(os.path.join, (self.workdir,) + tuple(args))
def writable(self, top, write):
"""
Make the specified directory tree writable (write == 1) or not
(write == None).
"""
def _walk_chmod(arg, dirname, names):
st = os.stat(dirname)
os.chmod(dirname, arg(st[stat.ST_MODE]))
for name in names:
fullname = os.path.join(dirname, name)
st = os.stat(fullname)
os.chmod(fullname, arg(st[stat.ST_MODE]))
_mode_writable = lambda mode: stat.S_IMODE(mode|0200)
_mode_non_writable = lambda mode: stat.S_IMODE(mode&~0200)
if write:
f = _mode_writable
else:
f = _mode_non_writable
try:
os.path.walk(top, _walk_chmod, f)
except:
pass # Ignore any problems changing modes.
def write(self, file, content, mode='wb'):
"""
Writes the specified content text (second argument) to the specified
file name (first argument). The file name may be a list, in which case
the elements are concatenated using the os.path.join() method. The file
is created under the temporary working directory. Any subdirectories in
the path must already exist. The I/O mode for the file may be specified
and must begin with a 'w'. The default is 'wb' (binary write).
"""
if type(file) is ListType:
file = apply(os.path.join, tuple(file))
if not os.path.isabs(file):
file = os.path.join(self.workdir, file)
if mode[0] != 'w':
raise ValueError, "mode must begin with 'w'"
open(file, mode).write(content)
| mit |
staranjeet/fjord | vendor/packages/translate-toolkit/translate/search/indexing/CommonIndexer.py | 3 | 25337 | # -*- coding: utf-8 -*-
#
# Copyright 2008 Zuza Software Foundation
#
# This file is part of translate.
#
# translate is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# translate is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, see <http://www.gnu.org/licenses/>.
#
"""
base class for interfaces to indexing engines for pootle
"""
import os
import translate.lang.data
def is_available():
"""Check if this indexing engine interface is usable.
This function must exist in every module that contains indexing engine
interfaces.
:return: is this interface usable?
:rtype: bool
"""
return False
class CommonDatabase(object):
"""Base class for indexing support.
Any real implementation must override most methods of this class.
"""
field_analyzers = {}
"""mapping of field names and analyzers - see
:meth:`~.CommonDatabase.set_field_analyzers`"""
ANALYZER_EXACT = 0
"""exact matching: the query string must equal the whole term string"""
ANALYZER_PARTIAL = 1 << 1
"""partial matching: a document matches, even if the query string only
matches the beginning of the term value."""
ANALYZER_TOKENIZE = 1 << 2
"""tokenize terms and queries automatically"""
ANALYZER_DEFAULT = ANALYZER_TOKENIZE | ANALYZER_PARTIAL
"""the default analyzer to be used if nothing is configured"""
QUERY_TYPE = None
"""override this with the query class of the implementation"""
INDEX_DIRECTORY_NAME = None
"""override this with a string to be used as the name of the indexing
directory/file in the filesystem
"""
def __init__(self, basedir, analyzer=None, create_allowed=True):
"""initialize or open an indexing database
Any derived class must override ``__init__``.
Any implementation can rely on the "self.location" attribute to be set
by the ``__init__`` function of the super class.
:raise ValueError: the given location exists, but the database type
is incompatible (e.g. created by a different
indexing engine)
:raise OSError: the database failed to initialize
:param basedir: the parent directory of the database
:type basedir: str
:param analyzer: bitwise combination of possible analyzer flags
to be used as the default analyzer for this
database. Leave it empty to use the system
default analyzer (``self.ANALYZER_DEFAULT``).
see :attr:`CommonDatabase.ANALYZER_TOKENIZE`,
:attr:`CommonDatabase.ANALYZER_PARTIAL`, ...
:type analyzer: int
:param create_allowed: create the database, if necessary.
:type create_allowed: bool
"""
# just do some checks
if self.QUERY_TYPE is None:
raise NotImplementedError("Incomplete indexer implementation: " \
+ "'QUERY_TYPE' is undefined")
if self.INDEX_DIRECTORY_NAME is None:
raise NotImplementedError("Incomplete indexer implementation: " \
+ "'INDEX_DIRECTORY_NAME' is undefined")
self.location = os.path.join(basedir, self.INDEX_DIRECTORY_NAME)
if (not create_allowed) and (not os.path.exists(self.location)):
raise OSError("Indexer: the database does not exist - and I am" \
+ " not configured to create it.")
if analyzer is None:
self.analyzer = self.ANALYZER_DEFAULT
else:
self.analyzer = analyzer
self.field_analyzers = {}
def flush(self, optimize=False):
"""Flush the content of the database - to force changes to be written
to disk.
Some databases also support index optimization.
:param optimize: should the index be optimized if possible?
:type optimize: bool
"""
raise NotImplementedError("Incomplete indexer implementation: " \
+ "'flush' is missing")
def make_query(self, args, require_all=True, analyzer=None):
"""Create simple queries (strings or field searches) or
combine multiple queries (AND/OR).
To specifiy rules for field searches, you may want to take a look at
:meth:`~.CommonDatabase.set_field_analyzers`. The parameter
'match_text_partial' can override the previously defined
default setting.
:param args: queries or search string or description of field query
examples::
[xapian.Query("foo"), xapian.Query("bar")]
xapian.Query("foo")
"bar"
{"foo": "bar", "foobar": "foo"}
:type args: list of queries | single query | str | dict
:param require_all: boolean operator
(True -> AND (default) / False -> OR)
:type require_all: boolean
:param analyzer: (only applicable for 'dict' or 'str')
Define query options (partial matching, exact
matching, tokenizing, ...) as bitwise
combinations of *CommonIndexer.ANALYZER_???*.
This can override previously defined field
analyzer settings.
If analyzer is ``None`` (default), then the
configured analyzer for the field is used.
:type analyzer: int
:return: the combined query
:rtype: query type of the specific implementation
"""
# turn a dict into a list if necessary
if isinstance(args, dict):
args = args.items()
# turn 'args' into a list if necessary
if not isinstance(args, list):
args = [args]
# combine all given queries
result = []
for query in args:
# just add precompiled queries
if isinstance(query, self.QUERY_TYPE):
result.append(self._create_query_for_query(query))
# create field/value queries out of a tuple
elif isinstance(query, tuple):
field, value = query
# perform unicode normalization
field = translate.lang.data.normalize(unicode(field))
value = translate.lang.data.normalize(unicode(value))
# check for the choosen match type
if analyzer is None:
analyzer = self.get_field_analyzers(field)
result.append(self._create_query_for_field(field, value,
analyzer=analyzer))
# parse plaintext queries
elif isinstance(query, basestring):
if analyzer is None:
analyzer = self.analyzer
# perform unicode normalization
query = translate.lang.data.normalize(unicode(query))
result.append(self._create_query_for_string(query,
require_all=require_all, analyzer=analyzer))
else:
# other types of queries are not supported
raise ValueError("Unable to handle query type: %s" \
% str(type(query)))
# return the combined query
return self._create_query_combined(result, require_all)
def _create_query_for_query(self, query):
"""Generate a query based on an existing query object.
Basically this function should just create a copy of the original.
:param query: the original query object
:type query: ``xapian.Query``
:return: the resulting query object
:rtype: ``xapian.Query`` | ``PyLucene.Query``
"""
raise NotImplementedError("Incomplete indexer implementation: " \
+ "'_create_query_for_query' is missing")
def _create_query_for_string(self, text, require_all=True,
analyzer=None):
"""Generate a query for a plain term of a string query.
Basically this function parses the string and returns the resulting
query.
:param text: the query string
:type text: str
:param require_all: boolean operator
(True -> AND (default) / False -> OR)
:type require_all: bool
:param analyzer: Define query options (partial matching, exact matching,
tokenizing, ...) as bitwise combinations of
*CommonIndexer.ANALYZER_???*.
This can override previously defined field
analyzer settings.
If analyzer is None (default), then the configured
analyzer for the field is used.
:type analyzer: int
:return: resulting query object
:rtype: xapian.Query | PyLucene.Query
"""
raise NotImplementedError("Incomplete indexer implementation: " \
+ "'_create_query_for_string' is missing")
def _create_query_for_field(self, field, value, analyzer=None):
"""Generate a field query.
This functions creates a field->value query.
:param field: the fieldname to be used
:type field: str
:param value: the wanted value of the field
:type value: str
:param analyzer: Define query options (partial matching, exact matching,
tokenizing, ...) as bitwise combinations of
*CommonIndexer.ANALYZER_???*.
This can override previously defined field
analyzer settings.
If analyzer is None (default), then the configured
analyzer for the field is used.
:type analyzer: int
:return: resulting query object
:rtype: ``xapian.Query`` | ``PyLucene.Query``
"""
raise NotImplementedError("Incomplete indexer implementation: " \
+ "'_create_query_for_field' is missing")
def _create_query_combined(self, queries, require_all=True):
"""generate a combined query
:param queries: list of the original queries
:type queries: list of xapian.Query
:param require_all: boolean operator
(True -> AND (default) / False -> OR)
:type require_all: bool
:return: the resulting combined query object
:rtype: ``xapian.Query`` | ``PyLucene.Query``
"""
raise NotImplementedError("Incomplete indexer implementation: " \
+ "'_create_query_combined' is missing")
def index_document(self, data):
"""Add the given data to the database.
:param data: the data to be indexed.
A dictionary will be treated as ``fieldname:value``
combinations.
If the fieldname is None then the value will be
interpreted as a plain term or as a list of plain terms.
Lists of terms are indexed separately.
Lists of strings are treated as plain terms.
:type data: dict | list of str
"""
doc = self._create_empty_document()
if isinstance(data, dict):
data = data.items()
# add all data
for dataset in data:
if isinstance(dataset, tuple):
# the dataset tuple consists of '(key, value)'
key, value = dataset
if key is None:
if isinstance(value, list):
terms = value[:]
elif isinstance(value, basestring):
terms = [value]
else:
raise ValueError("Invalid data type to be indexed: %s" \
% str(type(data)))
for one_term in terms:
self._add_plain_term(doc, self._decode(one_term),
(self.ANALYZER_DEFAULT & self.ANALYZER_TOKENIZE > 0))
else:
analyze_settings = self.get_field_analyzers(key)
# handle multiple terms
if not isinstance(value, list):
value = [value]
for one_term in value:
self._add_field_term(doc, key, self._decode(one_term),
(analyze_settings & self.ANALYZER_TOKENIZE > 0))
elif isinstance(dataset, basestring):
self._add_plain_term(doc, self._decode(dataset),
(self.ANALYZER_DEFAULT & self.ANALYZER_TOKENIZE > 0))
else:
raise ValueError("Invalid data type to be indexed: %s" \
% str(type(data)))
self._add_document_to_index(doc)
def _create_empty_document(self):
"""Create an empty document to be filled and added to the index later.
:return: the new document object
:rtype: ``xapian.Document`` | ``PyLucene.Document``
"""
raise NotImplementedError("Incomplete indexer implementation: " \
+ "'_create_empty_document' is missing")
def _add_plain_term(self, document, term, tokenize=True):
"""Add a term to a document.
:param document: the document to be changed
:type document: ``xapian.Document`` | ``PyLucene.Document``
:param term: a single term to be added
:type term: str
:param tokenize: should the term be tokenized automatically
:type tokenize: bool
"""
raise NotImplementedError("Incomplete indexer implementation: " \
+ "'_add_plain_term' is missing")
def _add_field_term(self, document, field, term, tokenize=True):
"""Add a field term to a document.
:param document: the document to be changed
:type document: ``xapian.Document`` | ``PyLucene.Document``
:param field: name of the field
:type field: str
:param term: term to be associated to the field
:type term: str
:param tokenize: should the term be tokenized automatically
:type tokenize: bool
"""
raise NotImplementedError("Incomplete indexer implementation: " \
+ "'_add_field_term' is missing")
def _add_document_to_index(self, document):
"""Add a prepared document to the index database.
:param document: the document to be added
:type document: xapian.Document | PyLucene.Document
"""
raise NotImplementedError("Incomplete indexer implementation: " \
+ "'_add_document_to_index' is missing")
def begin_transaction(self):
"""begin a transaction
You can group multiple modifications of a database as a transaction.
This prevents time-consuming database flushing and helps, if you want
that a changeset is committed either completely or not at all.
No changes will be written to disk until 'commit_transaction'.
'cancel_transaction' can be used to revert an ongoing transaction.
Database types that do not support transactions may silently ignore it.
"""
raise NotImplementedError("Incomplete indexer implementation: " \
+ "'begin_transaction' is missing")
def cancel_transaction(self):
"""cancel an ongoing transaction
See 'start_transaction' for details.
"""
raise NotImplementedError("Incomplete indexer implementation: " \
+ "'cancel_transaction' is missing")
def commit_transaction(self):
"""Submit the currently ongoing transaction and write changes to disk.
See 'start_transaction' for details.
"""
raise NotImplementedError("Incomplete indexer implementation: " \
+ "'commit_transaction' is missing")
def get_query_result(self, query):
"""return an object containing the results of a query
:param query: a pre-compiled query
:type query: a query object of the real implementation
:return: an object that allows access to the results
:rtype: subclass of CommonEnquire
"""
raise NotImplementedError("Incomplete indexer implementation: " \
+ "'get_query_result' is missing")
def delete_document_by_id(self, docid):
"""Delete a specified document.
:param docid: the document ID to be deleted
:type docid: int
"""
raise NotImplementedError("Incomplete indexer implementation: " \
+ "'delete_document_by_id' is missing")
def search(self, query, fieldnames):
"""Return a list of the contents of specified fields for all
matches of a query.
:param query: the query to be issued
:type query: a query object of the real implementation
:param fieldnames: the name(s) of a field of the document content
:type fieldnames: string | list of strings
:return: a list of dicts containing the specified field(s)
:rtype: list of dicts
"""
raise NotImplementedError("Incomplete indexer implementation: " \
+ "'search' is missing")
def delete_doc(self, ident):
"""Delete the documents returned by a query.
:param ident: [list of] document IDs | dict describing a query | query
:type ident: int | list of tuples | dict | list of dicts |
query (e.g. xapian.Query) | list of queries
"""
# turn a doc-ID into a list of doc-IDs
if isinstance(ident, list):
# it is already a list
ident_list = ident
else:
ident_list = [ident]
if len(ident_list) == 0:
# no matching items
return 0
if isinstance(ident_list[0], int) or isinstance(ident_list[0], long):
# create a list of IDs of all successfully removed documents
success_delete = [match for match in ident_list
if self.delete_document_by_id(match)]
return len(success_delete)
if isinstance(ident_list[0], dict):
# something like: { "msgid": "foobar" }
# assemble all queries
query = self.make_query([self.make_query(query_dict,
require_all=True) for query_dict in ident_list],
require_all=True)
elif isinstance(ident_list[0], object):
# assume a query object (with 'AND')
query = self.make_query(ident_list, require_all=True)
else:
# invalid element type in list (not necessarily caught in the
# lines above)
raise TypeError("description of documents to-be-deleted is not " \
+ "supported: list of %s" % type(ident_list[0]))
# we successfully created a query - now iterate through the result
# no documents deleted so far ...
remove_list = []
# delete all resulting documents step by step
def add_docid_to_list(match):
"""Collect every document ID."""
remove_list.append(match["docid"])
self._walk_matches(query, add_docid_to_list)
return self.delete_doc(remove_list)
def _walk_matches(self, query, function, arg_for_function=None):
"""Use this function if you want to do something with every single match
of a query.
Example::
self._walk_matches(query, function_for_match, arg_for_func)
*function_for_match* expects only one argument: the matched object
:param query: a query object of the real implementation
:type query: xapian.Query | PyLucene.Query
:param function: the function to execute with every match
:type function: function
:param arg_for_function: an optional argument for the function
:type arg_for_function: anything
"""
# execute the query
enquire = self.get_query_result(query)
# start with the first element
start = 0
# do the loop at least once
size, avail = (0, 1)
# how many results per 'get_matches'?
steps = 2
while start < avail:
(size, avail, matches) = enquire.get_matches(start, steps)
for match in matches:
if arg_for_function is None:
function(match)
else:
function(match, arg_for_function)
start += size
def set_field_analyzers(self, field_analyzers):
"""Set the analyzers for different fields of the database documents.
All bitwise combinations of *CommonIndexer.ANALYZER_???* are possible.
:param field_analyzers: mapping of field names and analyzers
:type field_analyzers: dict containing field names and analyzers
:raise TypeError: invalid values in *field_analyzers*
"""
for field, analyzer in field_analyzers.items():
# check for invald input types
if not isinstance(field, (str, unicode)):
raise TypeError("field name must be a string")
if not isinstance(analyzer, int):
raise TypeError("the analyzer must be a whole number (int)")
# map the analyzer to the field name
self.field_analyzers[field] = analyzer
def get_field_analyzers(self, fieldnames=None):
"""Return the analyzer that was mapped to a specific field.
See :meth:`~.CommonDatabase.set_field_analyzers` for details.
:param fieldnames: the analyzer of this field (or all/multiple fields)
is requested; leave empty (or *None*) to
request all fields.
:type fieldnames: str | list of str | None
:return: The analyzer setting of the field - see
*CommonDatabase.ANALYZER_???* or a dict of field names
and analyzers
:rtype: int | dict
"""
# all field analyzers are requested
if fieldnames is None:
# return a copy
return dict(self.field_analyzers)
# one field is requested
if isinstance(fieldnames, (str, unicode)):
if fieldnames in self.field_analyzers:
return self.field_analyzers[fieldnames]
else:
return self.analyzer
# a list of fields is requested
if isinstance(fieldnames, list):
result = {}
for field in fieldnames:
result[field] = self.get_field_analyzers(field)
return result
return self.analyzer
def _decode(self, text):
"""Decode the string from utf-8 or charmap perform
unicode normalization."""
if isinstance(text, str):
try:
result = unicode(text.decode("UTF-8"))
except UnicodeEncodeError, e:
result = unicode(text.decode("charmap"))
elif not isinstance(text, unicode):
result = unicode(text)
else:
result = text
# perform unicode normalization
return translate.lang.data.normalize(result)
class CommonEnquire(object):
"""An enquire object contains the information about the result of a request.
"""
def __init__(self, enquire):
"""Intialization of a wrapper around enquires of different backends
:param enquire: a previous enquire
:type enquire: xapian.Enquire | pylucene-enquire
"""
self.enquire = enquire
def get_matches(self, start, number):
"""Return a specified number of qualified matches of a previous query.
:param start: index of the first match to return (starting from zero)
:type start: int
:param number: the number of matching entries to return
:type number: int
:return: a set of matching entries and some statistics
:rtype: tuple of (returned number, available number, matches)
"matches" is a dictionary of::
["rank", "percent", "document", "docid"]
"""
raise NotImplementedError("Incomplete indexing implementation: " \
+ "'get_matches' for the 'Enquire' class is missing")
def get_matches_count(self):
"""Return the estimated number of matches.
Use :meth:`translate.search.indexing.CommonIndexer.search`
to retrieve the exact number of matches
:return: The estimated number of matches
:rtype: int
"""
(returned, estimate_count, matches) = self.get_matches(0, 1)
return estimate_count
| bsd-3-clause |
dmeulen/home-assistant | tests/conftest.py | 8 | 1542 | """Setup some common test helper things."""
import functools
import logging
import pytest
import requests_mock as _requests_mock
from homeassistant import util
from homeassistant.util import location
from .common import async_test_home_assistant
from .test_util.aiohttp import mock_aiohttp_client
logging.basicConfig()
logging.getLogger('sqlalchemy.engine').setLevel(logging.INFO)
def test_real(func):
"""Force a function to require a keyword _test_real to be passed in."""
@functools.wraps(func)
def guard_func(*args, **kwargs):
real = kwargs.pop('_test_real', None)
if not real:
raise Exception('Forgot to mock or pass "_test_real=True" to %s',
func.__name__)
return func(*args, **kwargs)
return guard_func
# Guard a few functions that would make network connections
location.detect_location_info = test_real(location.detect_location_info)
location.elevation = test_real(location.elevation)
util.get_local_ip = lambda: '127.0.0.1'
@pytest.fixture
def hass(loop):
"""Fixture to provide a test instance of HASS."""
hass = loop.run_until_complete(async_test_home_assistant(loop))
yield hass
loop.run_until_complete(hass.async_stop())
@pytest.fixture
def requests_mock():
"""Fixture to provide a requests mocker."""
with _requests_mock.mock() as m:
yield m
@pytest.fixture
def aioclient_mock():
"""Fixture to mock aioclient calls."""
with mock_aiohttp_client() as mock_session:
yield mock_session
| mit |
thezawad/flexx | flexx/ui/_formlayout.py | 20 | 8438 | """
Example:
.. UIExample:: 200
from flexx import ui
class Example(ui.Widget):
def init(self):
with ui.FormLayout():
ui.Label(text='Pet name:')
self.b1 = ui.LineEdit()
ui.Label(text='Pet Age:')
self.b2 = ui.LineEdit()
ui.Label(text="Pet's Favorite color:")
self.b3 = ui.LineEdit()
ui.Widget(flex=1)
"""
from .. import react
from . import Widget, Layout
class BaseTableLayout(Layout):
""" Abstract base class for layouts that use an HTML table.
Layouts that use this approach are rather bad in performance when
resizing. This is not so much a problem when it is a leaf layout,
but we don't recommend embedding such layouts in each-other.
"""
CSS = """
/* Clear any styling on this table (rendered_html is an IPython thing) */
.flx-basetablelayout, .flx-basetablelayout td, .flx-basetablelayout tr,
.rendered_html .flx-basetablelayout {
border: 0px;
padding: initial;
margin: initial;
background: initial;
}
/* Behave well inside hbox/vbox,
we assume no layouts to be nested inside a table layout */
.flx-hbox > .flx-basetablelayout {
width: auto;
}
.flx-vbox > .flx-basetablelayout {
height: auto;
}
/* In flexed cells, occupy the full space */
td.vflex > .flx-widget {
height: 100%;
}
td.hflex > .flx-widget {
width: 100%;
}
"""
class JS:
def _apply_table_layout(self):
table = self.node
AUTOFLEX = 729 # magic number unlikely to occur in practice
# Get table dimensions
nrows = len(table.children)
ncols = 0
for i in range(len(table.children)):
row = table.children[i]
ncols = max(ncols, len(row.children))
if ncols == 0 and nrows == 0:
return
# Collect flexes
vflexes = []
hflexes = []
for i in range(nrows):
row = table.children[i]
for j in range(ncols):
col = row.children[j]
if (col is undefined) or (len(col.children) == 0):
continue
vflexes[i] = max(vflexes[i] or 0, col.children[0].vflex or 0)
hflexes[j] = max(hflexes[j] or 0, col.children[0].hflex or 0)
# What is the cumulative "flex-value"?
cum_vflex = vflexes.reduce(lambda pv, cv: pv + cv, 0)
cum_hflex = hflexes.reduce(lambda pv, cv: pv + cv, 0)
# If no flexes are given; assign each equal
if (cum_vflex == 0):
for i in range(len(vflexes)):
vflexes[i] = AUTOFLEX
cum_vflex = len(vflexes) * AUTOFLEX
if (cum_hflex == 0):
for i in range(len(hflexes)):
hflexes[i] = AUTOFLEX
cum_hflex = len(hflexes) * AUTOFLEX
# Assign css class and height/weight to cells
for i in range(nrows):
row = table.children[i]
row.vflex = vflexes[i] or 0 # Store for use during resizing
for j in range(ncols):
col = row.children[j];
if (col is undefined) or (col.children.length is 0):
continue
self._apply_cell_layout(row, col, vflexes[i], hflexes[j], cum_vflex, cum_hflex)
@react.connect('actual_size')
def _adapt_to_size_change(self, size):
""" This function adapts the height (in percent) of the flexible rows
of a layout. This is needed because the percent-height applies to the
total height of the table. This function is called whenever the
table resizes, and adjusts the percent-height, taking the available
remaining table height into account. This is not necesary for the
width, since percent-width in colums *does* apply to available width.
"""
table = self.node # or event.target
#print('heigh changed', event.heightChanged, event.owner.__id)
if not self.actual_size.last_value or (self.actual_size.value[1] !=
self.actual_size.last_value[1]):
# Set one flex row to max, so that non-flex rows have their
# minimum size. The table can already have been stretched
# a bit, causing the total row-height in % to not be
# sufficient from keeping the non-flex rows from growing.
for i in range(len(table.children)):
row = table.children[i]
if (row.vflex > 0):
row.style.height = '100%'
break
# Get remaining height: subtract height of each non-flex row
remainingHeight = table.clientHeight
cum_vflex = 0
for i in range(len(table.children)):
row = table.children[i]
cum_vflex += row.vflex
if (row.vflex == 0) and (row.children.length > 0):
remainingHeight -= row.children[0].clientHeight
# Apply height % for each flex row
remainingPercentage = 100 * remainingHeight / table.clientHeight
for i in range(len(table.children)):
row = table.children[i]
if row.vflex > 0:
row.style.height = round(row.vflex / cum_vflex * remainingPercentage) + 1 + '%'
def _apply_cell_layout(self, row, col, vflex, hflex, cum_vflex, cum_hflex):
raise NotImplementedError()
class FormLayout(BaseTableLayout):
""" A form layout organizes pairs of widgets vertically.
Note: the API may change. maybe the label can be derived from the
widgets' ``title`` property?
"""
CSS = """
.flx-formlayout > tr > td > .flx-label {
text-align: right;
}
"""
class JS:
def _create_node(self):
this.node = document.createElement('table')
this.node.appendChild(document.createElement('tr'))
def _add_child(self, widget):
# Get row, create if necessary
row = this.node.children[-1]
itemsInRow = row.children.length
if itemsInRow >= 2:
row = document.createElement('tr')
self.node.appendChild(row)
# Create td and add widget to it
td = document.createElement("td")
row.appendChild(td)
td.appendChild(widget.node)
#
self._update_layout()
self._apply_table_layout()
# do not call super!
def _update_layout(self):
""" Set hflex and vflex on node.
"""
i = 0
for widget in self.children():
i += 1
widget.node.hflex = 0 if (i % 2) else 1
widget.node.vflex = widget.flex()
self._apply_table_layout()
def _remove_child(self, widget):
pass
# do not call super!
def _apply_cell_layout(self, row, col, vflex, hflex, cum_vflex, cum_hflex):
AUTOFLEX = 729
className = ''
if (vflex == AUTOFLEX) or (vflex == 0):
row.style.height = 'auto'
className += ''
else:
row.style.height = vflex * 100 / cum_vflex + '%'
className += 'vflex'
className += ' '
if (hflex == 0):
col.style.width = 'auto'
className += ''
else:
col.style.width = '100%'
className += 'hflex'
col.className = className
class GridLayout(BaseTableLayout):
""" Not implemented.
Do we even need it? If we do implement it, we need a way to specify
the vertical flex value.
"""
| bsd-2-clause |
louietsai/python-for-android | python-build/python-libs/gdata/build/lib/gdata/tlslite/X509CertChain.py | 238 | 6861 | """Class representing an X.509 certificate chain."""
from utils import cryptomath
class X509CertChain:
"""This class represents a chain of X.509 certificates.
@type x509List: list
@ivar x509List: A list of L{tlslite.X509.X509} instances,
starting with the end-entity certificate and with every
subsequent certificate certifying the previous.
"""
def __init__(self, x509List=None):
"""Create a new X509CertChain.
@type x509List: list
@param x509List: A list of L{tlslite.X509.X509} instances,
starting with the end-entity certificate and with every
subsequent certificate certifying the previous.
"""
if x509List:
self.x509List = x509List
else:
self.x509List = []
def getNumCerts(self):
"""Get the number of certificates in this chain.
@rtype: int
"""
return len(self.x509List)
def getEndEntityPublicKey(self):
"""Get the public key from the end-entity certificate.
@rtype: L{tlslite.utils.RSAKey.RSAKey}
"""
if self.getNumCerts() == 0:
raise AssertionError()
return self.x509List[0].publicKey
def getFingerprint(self):
"""Get the hex-encoded fingerprint of the end-entity certificate.
@rtype: str
@return: A hex-encoded fingerprint.
"""
if self.getNumCerts() == 0:
raise AssertionError()
return self.x509List[0].getFingerprint()
def getCommonName(self):
"""Get the Subject's Common Name from the end-entity certificate.
The cryptlib_py module must be installed in order to use this
function.
@rtype: str or None
@return: The CN component of the certificate's subject DN, if
present.
"""
if self.getNumCerts() == 0:
raise AssertionError()
return self.x509List[0].getCommonName()
def validate(self, x509TrustList):
"""Check the validity of the certificate chain.
This checks that every certificate in the chain validates with
the subsequent one, until some certificate validates with (or
is identical to) one of the passed-in root certificates.
The cryptlib_py module must be installed in order to use this
function.
@type x509TrustList: list of L{tlslite.X509.X509}
@param x509TrustList: A list of trusted root certificates. The
certificate chain must extend to one of these certificates to
be considered valid.
"""
import cryptlib_py
c1 = None
c2 = None
lastC = None
rootC = None
try:
rootFingerprints = [c.getFingerprint() for c in x509TrustList]
#Check that every certificate in the chain validates with the
#next one
for cert1, cert2 in zip(self.x509List, self.x509List[1:]):
#If we come upon a root certificate, we're done.
if cert1.getFingerprint() in rootFingerprints:
return True
c1 = cryptlib_py.cryptImportCert(cert1.writeBytes(),
cryptlib_py.CRYPT_UNUSED)
c2 = cryptlib_py.cryptImportCert(cert2.writeBytes(),
cryptlib_py.CRYPT_UNUSED)
try:
cryptlib_py.cryptCheckCert(c1, c2)
except:
return False
cryptlib_py.cryptDestroyCert(c1)
c1 = None
cryptlib_py.cryptDestroyCert(c2)
c2 = None
#If the last certificate is one of the root certificates, we're
#done.
if self.x509List[-1].getFingerprint() in rootFingerprints:
return True
#Otherwise, find a root certificate that the last certificate
#chains to, and validate them.
lastC = cryptlib_py.cryptImportCert(self.x509List[-1].writeBytes(),
cryptlib_py.CRYPT_UNUSED)
for rootCert in x509TrustList:
rootC = cryptlib_py.cryptImportCert(rootCert.writeBytes(),
cryptlib_py.CRYPT_UNUSED)
if self._checkChaining(lastC, rootC):
try:
cryptlib_py.cryptCheckCert(lastC, rootC)
return True
except:
return False
return False
finally:
if not (c1 is None):
cryptlib_py.cryptDestroyCert(c1)
if not (c2 is None):
cryptlib_py.cryptDestroyCert(c2)
if not (lastC is None):
cryptlib_py.cryptDestroyCert(lastC)
if not (rootC is None):
cryptlib_py.cryptDestroyCert(rootC)
def _checkChaining(self, lastC, rootC):
import cryptlib_py
import array
def compareNames(name):
try:
length = cryptlib_py.cryptGetAttributeString(lastC, name, None)
lastName = array.array('B', [0] * length)
cryptlib_py.cryptGetAttributeString(lastC, name, lastName)
lastName = lastName.tostring()
except cryptlib_py.CryptException, e:
if e[0] == cryptlib_py.CRYPT_ERROR_NOTFOUND:
lastName = None
try:
length = cryptlib_py.cryptGetAttributeString(rootC, name, None)
rootName = array.array('B', [0] * length)
cryptlib_py.cryptGetAttributeString(rootC, name, rootName)
rootName = rootName.tostring()
except cryptlib_py.CryptException, e:
if e[0] == cryptlib_py.CRYPT_ERROR_NOTFOUND:
rootName = None
return lastName == rootName
cryptlib_py.cryptSetAttribute(lastC,
cryptlib_py.CRYPT_CERTINFO_ISSUERNAME,
cryptlib_py.CRYPT_UNUSED)
if not compareNames(cryptlib_py.CRYPT_CERTINFO_COUNTRYNAME):
return False
if not compareNames(cryptlib_py.CRYPT_CERTINFO_LOCALITYNAME):
return False
if not compareNames(cryptlib_py.CRYPT_CERTINFO_ORGANIZATIONNAME):
return False
if not compareNames(cryptlib_py.CRYPT_CERTINFO_ORGANIZATIONALUNITNAME):
return False
if not compareNames(cryptlib_py.CRYPT_CERTINFO_COMMONNAME):
return False
return True | apache-2.0 |
miniconfig/home-assistant | homeassistant/components/switch/netio.py | 15 | 5672 | """
The Netio switch component.
For more details about this platform, please refer to the documentation at
https://home-assistant.io/components/switch.netio/
"""
import logging
from collections import namedtuple
from datetime import timedelta
import voluptuous as vol
from homeassistant.core import callback
from homeassistant import util
from homeassistant.components.http import HomeAssistantView
from homeassistant.const import (
CONF_HOST, CONF_PORT, CONF_USERNAME, CONF_PASSWORD,
EVENT_HOMEASSISTANT_STOP, STATE_ON)
from homeassistant.components.switch import (SwitchDevice, PLATFORM_SCHEMA)
import homeassistant.helpers.config_validation as cv
REQUIREMENTS = ['pynetio==0.1.6']
_LOGGER = logging.getLogger(__name__)
ATTR_CURRENT_POWER_MWH = 'current_power_mwh'
ATTR_CURRENT_POWER_W = 'current_power_w'
ATTR_START_DATE = 'start_date'
ATTR_TODAY_MWH = 'today_mwh'
ATTR_TOTAL_CONSUMPTION_KWH = 'total_energy_kwh'
CONF_OUTLETS = 'outlets'
DEFAULT_PORT = 1234
DEFAULT_USERNAME = 'admin'
DEPENDENCIES = ['http']
Device = namedtuple('device', ['netio', 'entities'])
DEVICES = {}
MIN_TIME_BETWEEN_SCANS = timedelta(seconds=10)
REQ_CONF = [CONF_HOST, CONF_OUTLETS]
URL_API_NETIO_EP = '/api/netio/{host}'
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend({
vol.Required(CONF_HOST): cv.string,
vol.Required(CONF_PORT, default=DEFAULT_PORT): cv.port,
vol.Required(CONF_USERNAME, default=DEFAULT_USERNAME): cv.string,
vol.Required(CONF_PASSWORD): cv.string,
vol.Optional(CONF_OUTLETS): {cv.string: cv.string},
})
def setup_platform(hass, config, add_devices, discovery_info=None):
"""Configure the Netio platform."""
from pynetio import Netio
host = config.get(CONF_HOST)
username = config.get(CONF_USERNAME)
password = config.get(CONF_PASSWORD)
port = config.get(CONF_PORT)
if len(DEVICES) == 0:
hass.http.register_view(NetioApiView)
dev = Netio(host, port, username, password)
DEVICES[host] = Device(dev, [])
# Throttle the update for all NetioSwitches of one Netio
dev.update = util.Throttle(MIN_TIME_BETWEEN_SCANS)(dev.update)
for key in config[CONF_OUTLETS]:
switch = NetioSwitch(
DEVICES[host].netio, key, config[CONF_OUTLETS][key])
DEVICES[host].entities.append(switch)
add_devices(DEVICES[host].entities)
hass.bus.listen_once(EVENT_HOMEASSISTANT_STOP, dispose)
return True
def dispose(event):
"""Close connections to Netio Devices."""
for _, value in DEVICES.items():
value.netio.stop()
class NetioApiView(HomeAssistantView):
"""WSGI handler class."""
url = URL_API_NETIO_EP
name = 'api:netio'
@callback
def get(self, request, host):
"""Request handler."""
hass = request.app['hass']
data = request.GET
states, consumptions, cumulated_consumptions, start_dates = \
[], [], [], []
for i in range(1, 5):
out = 'output%d' % i
states.append(data.get('%s_state' % out) == STATE_ON)
consumptions.append(float(data.get('%s_consumption' % out, 0)))
cumulated_consumptions.append(
float(data.get('%s_cumulatedConsumption' % out, 0)) / 1000)
start_dates.append(data.get('%s_consumptionStart' % out, ""))
_LOGGER.debug('%s: %s, %s, %s since %s', host, states,
consumptions, cumulated_consumptions, start_dates)
ndev = DEVICES[host].netio
ndev.consumptions = consumptions
ndev.cumulated_consumptions = cumulated_consumptions
ndev.states = states
ndev.start_dates = start_dates
for dev in DEVICES[host].entities:
hass.async_add_job(dev.async_update_ha_state())
return self.json(True)
class NetioSwitch(SwitchDevice):
"""Provide a netio linked switch."""
def __init__(self, netio, outlet, name):
"""Defined to handle throttle."""
self._name = name
self.outlet = outlet
self.netio = netio
@property
def name(self):
"""Netio device's name."""
return self._name
@property
def available(self):
"""Return True if entity is available."""
return not hasattr(self, 'telnet')
def turn_on(self):
"""Turn switch on."""
self._set(True)
def turn_off(self):
"""Turn switch off."""
self._set(False)
def _set(self, value):
val = list('uuuu')
val[self.outlet - 1] = '1' if value else '0'
self.netio.get('port list %s' % ''.join(val))
self.netio.states[self.outlet - 1] = value
self.schedule_update_ha_state()
@property
def is_on(self):
"""Return switch's status."""
return self.netio.states[self.outlet - 1]
def update(self):
"""Called by Home Assistant."""
self.netio.update()
@property
def state_attributes(self):
"""Return optional state attributes."""
return {
ATTR_CURRENT_POWER_W: self.current_power_w,
ATTR_TOTAL_CONSUMPTION_KWH: self.cumulated_consumption_kwh,
ATTR_START_DATE: self.start_date.split('|')[0]
}
@property
def current_power_w(self):
"""Return actual power."""
return self.netio.consumptions[self.outlet - 1]
@property
def cumulated_consumption_kwh(self):
"""Total enerygy consumption since start_date."""
return self.netio.cumulated_consumptions[self.outlet - 1]
@property
def start_date(self):
"""Point in time when the energy accumulation started."""
return self.netio.start_dates[self.outlet - 1]
| mit |
TaiSakuma/AlphaTwirl | tests/unit/roottree/test_EventBuilder.py | 1 | 2175 | import unittest
import sys
from alphatwirl.roottree import EventBuilderConfig
##__________________________________________________________________||
hasROOT = False
try:
import ROOT
hasROOT = True
except ImportError:
pass
if hasROOT:
from alphatwirl.roottree.EventBuilder import EventBuilder
##__________________________________________________________________||
class MockTChain(object):
def __init__(self, name):
self.treeName = name
self.paths = [ ]
def Add(self, name):
self.paths.append(name)
##__________________________________________________________________||
class MockROOT(object):
def __init__(self):
self.TChain = MockTChain
##__________________________________________________________________||
class MockEvents(object):
def __init__(self, tree, maxEvents, start = 0):
self.tree = tree
self.maxEvents = maxEvents
self.start = start
##__________________________________________________________________||
@unittest.skipUnless(hasROOT, "has no ROOT")
class TestEventBuilder(unittest.TestCase):
def setUp(self):
self.module = sys.modules['alphatwirl.roottree.EventBuilder']
self.orgROOT = self.module.ROOT
self.module.ROOT = MockROOT()
self.orgEvents = self.module.Events
self.module.Events = MockEvents
def tearDown(self):
self.module.ROOT = self.orgROOT
self.module.BEvents = self.orgEvents
def test_build(self):
config = EventBuilderConfig(
inputPaths = ['/heppyresult/dir/TTJets/treeProducerSusyAlphaT/tree.root'],
treeName = 'tree',
maxEvents = 123,
start = 11,
name = 'TTJets'
)
obj = EventBuilder(config)
events = obj()
self.assertEqual(['/heppyresult/dir/TTJets/treeProducerSusyAlphaT/tree.root'], events.tree.paths)
self.assertIsInstance(events, MockEvents)
self.assertEqual('tree', events.tree.treeName)
self.assertEqual(11, events.start)
self.assertEqual(123, events.maxEvents)
##__________________________________________________________________||
| bsd-3-clause |
mpare002/HackTech_2017 | env/Lib/site-packages/werkzeug/contrib/profiler.py | 362 | 5151 | # -*- coding: utf-8 -*-
"""
werkzeug.contrib.profiler
~~~~~~~~~~~~~~~~~~~~~~~~~
This module provides a simple WSGI profiler middleware for finding
bottlenecks in web application. It uses the :mod:`profile` or
:mod:`cProfile` module to do the profiling and writes the stats to the
stream provided (defaults to stderr).
Example usage::
from werkzeug.contrib.profiler import ProfilerMiddleware
app = ProfilerMiddleware(app)
:copyright: (c) 2014 by the Werkzeug Team, see AUTHORS for more details.
:license: BSD, see LICENSE for more details.
"""
import sys
import time
import os.path
try:
try:
from cProfile import Profile
except ImportError:
from profile import Profile
from pstats import Stats
available = True
except ImportError:
available = False
class MergeStream(object):
"""An object that redirects `write` calls to multiple streams.
Use this to log to both `sys.stdout` and a file::
f = open('profiler.log', 'w')
stream = MergeStream(sys.stdout, f)
profiler = ProfilerMiddleware(app, stream)
"""
def __init__(self, *streams):
if not streams:
raise TypeError('at least one stream must be given')
self.streams = streams
def write(self, data):
for stream in self.streams:
stream.write(data)
class ProfilerMiddleware(object):
"""Simple profiler middleware. Wraps a WSGI application and profiles
a request. This intentionally buffers the response so that timings are
more exact.
By giving the `profile_dir` argument, pstat.Stats files are saved to that
directory, one file per request. Without it, a summary is printed to
`stream` instead.
For the exact meaning of `sort_by` and `restrictions` consult the
:mod:`profile` documentation.
.. versionadded:: 0.9
Added support for `restrictions` and `profile_dir`.
:param app: the WSGI application to profile.
:param stream: the stream for the profiled stats. defaults to stderr.
:param sort_by: a tuple of columns to sort the result by.
:param restrictions: a tuple of profiling strictions, not used if dumping
to `profile_dir`.
:param profile_dir: directory name to save pstat files
"""
def __init__(self, app, stream=None,
sort_by=('time', 'calls'), restrictions=(), profile_dir=None):
if not available:
raise RuntimeError('the profiler is not available because '
'profile or pstat is not installed.')
self._app = app
self._stream = stream or sys.stdout
self._sort_by = sort_by
self._restrictions = restrictions
self._profile_dir = profile_dir
def __call__(self, environ, start_response):
response_body = []
def catching_start_response(status, headers, exc_info=None):
start_response(status, headers, exc_info)
return response_body.append
def runapp():
appiter = self._app(environ, catching_start_response)
response_body.extend(appiter)
if hasattr(appiter, 'close'):
appiter.close()
p = Profile()
start = time.time()
p.runcall(runapp)
body = b''.join(response_body)
elapsed = time.time() - start
if self._profile_dir is not None:
prof_filename = os.path.join(self._profile_dir,
'%s.%s.%06dms.%d.prof' % (
environ['REQUEST_METHOD'],
environ.get('PATH_INFO').strip(
'/').replace('/', '.') or 'root',
elapsed * 1000.0,
time.time()
))
p.dump_stats(prof_filename)
else:
stats = Stats(p, stream=self._stream)
stats.sort_stats(*self._sort_by)
self._stream.write('-' * 80)
self._stream.write('\nPATH: %r\n' % environ.get('PATH_INFO'))
stats.print_stats(*self._restrictions)
self._stream.write('-' * 80 + '\n\n')
return [body]
def make_action(app_factory, hostname='localhost', port=5000,
threaded=False, processes=1, stream=None,
sort_by=('time', 'calls'), restrictions=()):
"""Return a new callback for :mod:`werkzeug.script` that starts a local
server with the profiler enabled.
::
from werkzeug.contrib import profiler
action_profile = profiler.make_action(make_app)
"""
def action(hostname=('h', hostname), port=('p', port),
threaded=threaded, processes=processes):
"""Start a new development server."""
from werkzeug.serving import run_simple
app = ProfilerMiddleware(app_factory(), stream, sort_by, restrictions)
run_simple(hostname, port, app, False, None, threaded, processes)
return action
| mit |
TheWaunaKeeganOrganization/Yahtzee | src/yahtzee_categories.py | 1 | 1373 | from collections import Counter
def ones(d):
return 1*d.count(1)
def twos(d):
return 2*d.count(2)
def threes(d):
return 3*d.count(3)
def fours(d):
return 4*d.count(4)
def fives(d):
return 5*d.count(5)
def sixes(d):
return 6*d.count(6)
def threeOfAKind(d):
if max(Counter(d).itervalues())>=3:
return sum(d)
return 0
def fourOfAKind(d):
if max(Counter(d).itervalues())>=4:
return sum(d)
return 0
def fullHouse(d):
if (list(Counter(d).itervalues())[0]==3 and list(Counter(d).itervalues())[1]==2) or (list(Counter(d).itervalues())[0]==2 and list(Counter(d).itervalues())[1]==3):
return 25
return 0
def smallStraight(d):
s=min(d)
if s+1 in d and s+2 in d and s+3 in d:
return 30
return 0
def largeStraight(d):
s=min(d)
if s+1 in d and s+2 in d and s+3 in d and s+4 in d:
return 30
return 0
def yahtzee(d):
if d.count(d[0])==5:
return 50
return 0
def chance(d):
return sum(d)
def allCategories(d):
scores={}
scores["ones"]=ones(d)
scores["twos"]=twos(d)
scores["threes"]=threes(d)
scores["fours"]=fours(d)
scores["fives"]=fives(d)
scores["sixes"]=sixes(d)
scores["threeOfAKind"]=threeOfAKind(d)
scores["fourOfAKind"]=fourOfAKind(d)
scores["fullHouse"]=fullHouse(d)
scores["smallStraight"]=smallStraight(d)
scores["largeStraight"]=largeStraight(d)
scores["yahtzee"]=yahtzee(d)
scores["chance"]=chance(d)
return scores
| gpl-3.0 |
chaos-adept/timelyb | appengine_config.py | 1 | 1421 | #!/usr/bin/env python
#
# Copyright 2010 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""Sample remote_api appengine_config for copying datastore across apps.
For more information, see
http://code.google.com/appengine/docs/adminconsole/
Note that this appengine_config.py file is the same one that you would
use for appstats; if you are bundling this with your existing app you may
wish to copy the version from
google/appengine/ext/appstats/sample_appenigne_config.py instead.
"""
#########################################
# Remote_API Authentication configuration.
#
# See google/appengine/ext/remote_api/handler.py for more information.
# For datastore_admin datastore copy, you should set the source appid
# value. 'HTTP_X_APPENGINE_INBOUND_APPID', ['trusted source appid here']
#
remoteapi_CUSTOM_ENVIRONMENT_AUTHENTICATION = (
'HTTP_X_APPENGINE_INBOUND_APPID', ['timelyb-helloword-server'])
| gpl-3.0 |
glenux/contrib-mitro | browser-ext/third_party/firefox-addon-sdk/python-lib/simplejson/encoder.py | 67 | 13492 | """
Implementation of JSONEncoder
"""
import re
try:
from simplejson._speedups import encode_basestring_ascii as c_encode_basestring_ascii
except ImportError:
pass
ESCAPE = re.compile(r'[\x00-\x1f\\"\b\f\n\r\t]')
ESCAPE_ASCII = re.compile(r'([\\"]|[^\ -~])')
HAS_UTF8 = re.compile(r'[\x80-\xff]')
ESCAPE_DCT = {
'\\': '\\\\',
'"': '\\"',
'\b': '\\b',
'\f': '\\f',
'\n': '\\n',
'\r': '\\r',
'\t': '\\t',
}
for i in range(0x20):
ESCAPE_DCT.setdefault(chr(i), '\\u%04x' % (i,))
# Assume this produces an infinity on all machines (probably not guaranteed)
INFINITY = float('1e66666')
FLOAT_REPR = repr
def floatstr(o, allow_nan=True):
# Check for specials. Note that this type of test is processor- and/or
# platform-specific, so do tests which don't depend on the internals.
if o != o:
text = 'NaN'
elif o == INFINITY:
text = 'Infinity'
elif o == -INFINITY:
text = '-Infinity'
else:
return FLOAT_REPR(o)
if not allow_nan:
raise ValueError("Out of range float values are not JSON compliant: %r"
% (o,))
return text
def encode_basestring(s):
"""
Return a JSON representation of a Python string
"""
def replace(match):
return ESCAPE_DCT[match.group(0)]
return '"' + ESCAPE.sub(replace, s) + '"'
def py_encode_basestring_ascii(s):
if isinstance(s, str) and HAS_UTF8.search(s) is not None:
s = s.decode('utf-8')
def replace(match):
s = match.group(0)
try:
return ESCAPE_DCT[s]
except KeyError:
n = ord(s)
if n < 0x10000:
return '\\u%04x' % (n,)
else:
# surrogate pair
n -= 0x10000
s1 = 0xd800 | ((n >> 10) & 0x3ff)
s2 = 0xdc00 | (n & 0x3ff)
return '\\u%04x\\u%04x' % (s1, s2)
return '"' + str(ESCAPE_ASCII.sub(replace, s)) + '"'
try:
encode_basestring_ascii = c_encode_basestring_ascii
except NameError:
encode_basestring_ascii = py_encode_basestring_ascii
class JSONEncoder(object):
"""
Extensible JSON <http://json.org> encoder for Python data structures.
Supports the following objects and types by default:
+-------------------+---------------+
| Python | JSON |
+===================+===============+
| dict | object |
+-------------------+---------------+
| list, tuple | array |
+-------------------+---------------+
| str, unicode | string |
+-------------------+---------------+
| int, long, float | number |
+-------------------+---------------+
| True | true |
+-------------------+---------------+
| False | false |
+-------------------+---------------+
| None | null |
+-------------------+---------------+
To extend this to recognize other objects, subclass and implement a
``.default()`` method with another method that returns a serializable
object for ``o`` if possible, otherwise it should call the superclass
implementation (to raise ``TypeError``).
"""
__all__ = ['__init__', 'default', 'encode', 'iterencode']
item_separator = ', '
key_separator = ': '
def __init__(self, skipkeys=False, ensure_ascii=True,
check_circular=True, allow_nan=True, sort_keys=False,
indent=None, separators=None, encoding='utf-8', default=None):
"""
Constructor for JSONEncoder, with sensible defaults.
If skipkeys is False, then it is a TypeError to attempt
encoding of keys that are not str, int, long, float or None. If
skipkeys is True, such items are simply skipped.
If ensure_ascii is True, the output is guaranteed to be str
objects with all incoming unicode characters escaped. If
ensure_ascii is false, the output will be unicode object.
If check_circular is True, then lists, dicts, and custom encoded
objects will be checked for circular references during encoding to
prevent an infinite recursion (which would cause an OverflowError).
Otherwise, no such check takes place.
If allow_nan is True, then NaN, Infinity, and -Infinity will be
encoded as such. This behavior is not JSON specification compliant,
but is consistent with most JavaScript based encoders and decoders.
Otherwise, it will be a ValueError to encode such floats.
If sort_keys is True, then the output of dictionaries will be
sorted by key; this is useful for regression tests to ensure
that JSON serializations can be compared on a day-to-day basis.
If indent is a non-negative integer, then JSON array
elements and object members will be pretty-printed with that
indent level. An indent level of 0 will only insert newlines.
None is the most compact representation.
If specified, separators should be a (item_separator, key_separator)
tuple. The default is (', ', ': '). To get the most compact JSON
representation you should specify (',', ':') to eliminate whitespace.
If specified, default is a function that gets called for objects
that can't otherwise be serialized. It should return a JSON encodable
version of the object or raise a ``TypeError``.
If encoding is not None, then all input strings will be
transformed into unicode using that encoding prior to JSON-encoding.
The default is UTF-8.
"""
self.skipkeys = skipkeys
self.ensure_ascii = ensure_ascii
self.check_circular = check_circular
self.allow_nan = allow_nan
self.sort_keys = sort_keys
self.indent = indent
self.current_indent_level = 0
if separators is not None:
self.item_separator, self.key_separator = separators
if default is not None:
self.default = default
self.encoding = encoding
def _newline_indent(self):
return '\n' + (' ' * (self.indent * self.current_indent_level))
def _iterencode_list(self, lst, markers=None):
if not lst:
yield '[]'
return
if markers is not None:
markerid = id(lst)
if markerid in markers:
raise ValueError("Circular reference detected")
markers[markerid] = lst
yield '['
if self.indent is not None:
self.current_indent_level += 1
newline_indent = self._newline_indent()
separator = self.item_separator + newline_indent
yield newline_indent
else:
newline_indent = None
separator = self.item_separator
first = True
for value in lst:
if first:
first = False
else:
yield separator
for chunk in self._iterencode(value, markers):
yield chunk
if newline_indent is not None:
self.current_indent_level -= 1
yield self._newline_indent()
yield ']'
if markers is not None:
del markers[markerid]
def _iterencode_dict(self, dct, markers=None):
if not dct:
yield '{}'
return
if markers is not None:
markerid = id(dct)
if markerid in markers:
raise ValueError("Circular reference detected")
markers[markerid] = dct
yield '{'
key_separator = self.key_separator
if self.indent is not None:
self.current_indent_level += 1
newline_indent = self._newline_indent()
item_separator = self.item_separator + newline_indent
yield newline_indent
else:
newline_indent = None
item_separator = self.item_separator
first = True
if self.ensure_ascii:
encoder = encode_basestring_ascii
else:
encoder = encode_basestring
allow_nan = self.allow_nan
if self.sort_keys:
keys = dct.keys()
keys.sort()
items = [(k, dct[k]) for k in keys]
else:
items = dct.iteritems()
_encoding = self.encoding
_do_decode = (_encoding is not None
and not (_encoding == 'utf-8'))
for key, value in items:
if isinstance(key, str):
if _do_decode:
key = key.decode(_encoding)
elif isinstance(key, basestring):
pass
# JavaScript is weakly typed for these, so it makes sense to
# also allow them. Many encoders seem to do something like this.
elif isinstance(key, float):
key = floatstr(key, allow_nan)
elif isinstance(key, (int, long)):
key = str(key)
elif key is True:
key = 'true'
elif key is False:
key = 'false'
elif key is None:
key = 'null'
elif self.skipkeys:
continue
else:
raise TypeError("key %r is not a string" % (key,))
if first:
first = False
else:
yield item_separator
yield encoder(key)
yield key_separator
for chunk in self._iterencode(value, markers):
yield chunk
if newline_indent is not None:
self.current_indent_level -= 1
yield self._newline_indent()
yield '}'
if markers is not None:
del markers[markerid]
def _iterencode(self, o, markers=None):
if isinstance(o, basestring):
if self.ensure_ascii:
encoder = encode_basestring_ascii
else:
encoder = encode_basestring
_encoding = self.encoding
if (_encoding is not None and isinstance(o, str)
and not (_encoding == 'utf-8')):
o = o.decode(_encoding)
yield encoder(o)
elif o is None:
yield 'null'
elif o is True:
yield 'true'
elif o is False:
yield 'false'
elif isinstance(o, (int, long)):
yield str(o)
elif isinstance(o, float):
yield floatstr(o, self.allow_nan)
elif isinstance(o, (list, tuple)):
for chunk in self._iterencode_list(o, markers):
yield chunk
elif isinstance(o, dict):
for chunk in self._iterencode_dict(o, markers):
yield chunk
else:
if markers is not None:
markerid = id(o)
if markerid in markers:
raise ValueError("Circular reference detected")
markers[markerid] = o
for chunk in self._iterencode_default(o, markers):
yield chunk
if markers is not None:
del markers[markerid]
def _iterencode_default(self, o, markers=None):
newobj = self.default(o)
return self._iterencode(newobj, markers)
def default(self, o):
"""
Implement this method in a subclass such that it returns
a serializable object for ``o``, or calls the base implementation
(to raise a ``TypeError``).
For example, to support arbitrary iterators, you could
implement default like this::
def default(self, o):
try:
iterable = iter(o)
except TypeError:
pass
else:
return list(iterable)
return JSONEncoder.default(self, o)
"""
raise TypeError("%r is not JSON serializable" % (o,))
def encode(self, o):
"""
Return a JSON string representation of a Python data structure.
>>> JSONEncoder().encode({"foo": ["bar", "baz"]})
'{"foo": ["bar", "baz"]}'
"""
# This is for extremely simple cases and benchmarks.
if isinstance(o, basestring):
if isinstance(o, str):
_encoding = self.encoding
if (_encoding is not None
and not (_encoding == 'utf-8')):
o = o.decode(_encoding)
if self.ensure_ascii:
return encode_basestring_ascii(o)
else:
return encode_basestring(o)
# This doesn't pass the iterator directly to ''.join() because the
# exceptions aren't as detailed. The list call should be roughly
# equivalent to the PySequence_Fast that ''.join() would do.
chunks = list(self.iterencode(o))
return ''.join(chunks)
def iterencode(self, o):
"""
Encode the given object and yield each string
representation as available.
For example::
for chunk in JSONEncoder().iterencode(bigobject):
mysocket.write(chunk)
"""
if self.check_circular:
markers = {}
else:
markers = None
return self._iterencode(o, markers)
__all__ = ['JSONEncoder']
| gpl-3.0 |
westial/NdArrayIndexer.py | tests/test_NdArrayIndexer_3axes.py | 1 | 4489 | #!/usr/bin/env python
#
# Testing ndarray with 3 axes
#
import numpy as np
from NdArrayIndexer import NdArrayIndexer
# Structure of unsorted list to be converted in the same shape as testing_array
testing_list = [
[[[7, 2, 76], [132, 32, 1], [201, 23, 224], [201, 23, 224]],
[[101, 102, 103], [111, 112, 113], [121, 122, 123], [201, 23, 224]],
[[1, 20, 203], [51, 212, 13], [21, 102, 1], [201, 23, 224]],
[[4, 5, 6], [11, 12, 13], [21, 22, 23], [201, 23, 224]],
[[101, 102, 103], [111, 112, 113], [121, 122, 123], [201, 23, 224]],
[[201, 202, 203], [211, 212, 213], [221, 222, 223], [201, 23, 224]]],
[[[7, 2, 76], [132, 32, 1], [201, 23, 224], [201, 23, 224]],
[[101, 102, 103], [111, 112, 113], [121, 122, 123], [201, 23, 224]],
[[1, 20, 203], [51, 212, 13], [21, 102, 1], [201, 23, 224]],
[[4, 5, 6], [11, 12, 13], [21, 22, 23], [201, 23, 224]],
[[101, 102, 103], [111, 112, 113], [121, 122, 123], [201, 23, 224]],
[[201, 202, 203], [211, 212, 213], [221, 222, 223], [201, 23, 224]]],
[[[7, 2, 76], [132, 32, 1], [201, 23, 224], [201, 23, 224]],
[[101, 102, 103], [111, 112, 113], [121, 122, 123], [201, 23, 224]],
[[1, 20, 203], [51, 212, 13], [21, 102, 1], [201, 23, 224]],
[[4, 5, 6], [11, 12, 13], [21, 22, 23], [201, 23, 224]],
[[101, 102, 103], [111, 112, 113], [121, 122, 123], [201, 23, 224]],
[[201, 202, 203], [211, 212, 213], [221, 222, 223], [201, 23, 224]]],
[[[7, 2, 76], [132, 32, 1], [201, 23, 224], [201, 23, 224]],
[[101, 102, 103], [111, 112, 113], [121, 122, 123], [201, 23, 224]],
[[1, 20, 203], [51, 212, 13], [21, 102, 1], [201, 23, 224]],
[[4, 5, 6], [11, 12, 13], [21, 22, 23], [201, 23, 224]],
[[101, 102, 103], [111, 112, 113], [121, 122, 123], [201, 23, 224]],
[[201, 202, 203], [211, 212, 213], [221, 222, 223], [201, 23, 224]]],
[[[7, 2, 76], [132, 32, 1], [201, 23, 224], [201, 23, 224]],
[[101, 102, 103], [111, 112, 113], [121, 122, 123], [201, 23, 224]],
[[1, 20, 203], [51, 212, 13], [21, 102, 1], [201, 23, 224]],
[[4, 5, 6], [11, 12, 13], [21, 22, 23], [201, 23, 224]],
[[101, 102, 103], [111, 112, 113], [121, 122, 123], [201, 23, 224]],
[[201, 202, 203], [211, 212, 213], [221, 222, 223], [201, 23, 224]]],
[[[7, 2, 76], [132, 32, 1], [201, 23, 224], [201, 23, 224]],
[[101, 102, 103], [111, 112, 113], [121, 122, 123], [201, 23, 224]],
[[1, 20, 203], [51, 212, 13], [21, 102, 1], [201, 23, 224]],
[[4, 5, 6], [11, 12, 13], [21, 22, 23], [201, 23, 224]],
[[101, 102, 103], [111, 112, 113], [121, 122, 123], [201, 23, 224]],
[[201, 202, 203], [211, 212, 213], [221, 222, 223], [201, 23, 224]]],
[[[7, 2, 76], [132, 32, 1], [201, 23, 224], [201, 23, 224]],
[[101, 102, 103], [111, 112, 113], [121, 122, 123], [201, 23, 224]],
[[1, 20, 203], [51, 212, 13], [21, 102, 1], [201, 23, 224]],
[[4, 5, 6], [11, 12, 13], [21, 22, 23], [201, 23, 224]],
[[101, 102, 103], [111, 112, 113], [121, 122, 123], [201, 23, 224]],
[[201, 202, 203], [211, 212, 213], [221, 222, 223], [201, 23, 224]]],
[[[7, 2, 76], [132, 32, 1], [201, 23, 224], [201, 23, 224]],
[[101, 102, 103], [111, 112, 113], [121, 122, 123], [201, 23, 224]],
[[1, 20, 203], [51, 212, 13], [21, 102, 1], [201, 23, 224]],
[[4, 5, 6], [11, 12, 13], [21, 22, 23], [201, 23, 224]],
[[101, 102, 103], [111, 112, 113], [121, 122, 123], [201, 23, 224]],
[[201, 202, 203], [211, 212, 213], [221, 222, 223], [201, 23, 224]]],
[[[7, 2, 76], [132, 32, 1], [201, 23, 224], [201, 23, 224]],
[[101, 102, 103], [111, 112, 113], [121, 122, 123], [201, 23, 224]],
[[1, 20, 203], [51, 212, 13], [21, 102, 1], [201, 23, 224]],
[[4, 5, 6], [11, 12, 13], [21, 22, 23], [201, 23, 224]],
[[101, 102, 103], [111, 112, 113], [121, 122, 123], [201, 23, 224]],
[[201, 202, 203], [211, 212, 213], [221, 222, 223], [201, 23, 224]]],
[[[7, 2, 76], [132, 32, 1], [201, 23, 224], [201, 23, 224]],
[[101, 102, 103], [111, 112, 113], [121, 122, 123], [201, 23, 224]],
[[1, 20, 203], [51, 212, 13], [21, 102, 1], [201, 23, 224]],
[[4, 5, 6], [11, 12, 13], [21, 22, 23], [201, 23, 224]],
[[101, 102, 103], [111, 112, 113], [121, 122, 123], [201, 23, 224]],
[[201, 202, 203], [211, 212, 213], [221, 222, 223], [201, 23, 224]]]
]
testing_array = np.array(testing_list)
print testing_array
print "------------------"
arr = NdArrayIndexer(testing_array)
arr.run()
print arr.get() | gpl-3.0 |
gerryhd/diabot-assistant | lib/python2.7/site-packages/jinja2/compiler.py | 117 | 62929 | # -*- coding: utf-8 -*-
"""
jinja2.compiler
~~~~~~~~~~~~~~~
Compiles nodes into python code.
:copyright: (c) 2017 by the Jinja Team.
:license: BSD, see LICENSE for more details.
"""
from itertools import chain
from copy import deepcopy
from keyword import iskeyword as is_python_keyword
from functools import update_wrapper
from jinja2 import nodes
from jinja2.nodes import EvalContext
from jinja2.visitor import NodeVisitor
from jinja2.optimizer import Optimizer
from jinja2.exceptions import TemplateAssertionError
from jinja2.utils import Markup, concat, escape
from jinja2._compat import range_type, text_type, string_types, \
iteritems, NativeStringIO, imap, izip
from jinja2.idtracking import Symbols, VAR_LOAD_PARAMETER, \
VAR_LOAD_RESOLVE, VAR_LOAD_ALIAS, VAR_LOAD_UNDEFINED
operators = {
'eq': '==',
'ne': '!=',
'gt': '>',
'gteq': '>=',
'lt': '<',
'lteq': '<=',
'in': 'in',
'notin': 'not in'
}
# what method to iterate over items do we want to use for dict iteration
# in generated code? on 2.x let's go with iteritems, on 3.x with items
if hasattr(dict, 'iteritems'):
dict_item_iter = 'iteritems'
else:
dict_item_iter = 'items'
code_features = ['division']
# does this python version support generator stops? (PEP 0479)
try:
exec('from __future__ import generator_stop')
code_features.append('generator_stop')
except SyntaxError:
pass
# does this python version support yield from?
try:
exec('def f(): yield from x()')
except SyntaxError:
supports_yield_from = False
else:
supports_yield_from = True
def optimizeconst(f):
def new_func(self, node, frame, **kwargs):
# Only optimize if the frame is not volatile
if self.optimized and not frame.eval_ctx.volatile:
new_node = self.optimizer.visit(node, frame.eval_ctx)
if new_node != node:
return self.visit(new_node, frame)
return f(self, node, frame, **kwargs)
return update_wrapper(new_func, f)
def generate(node, environment, name, filename, stream=None,
defer_init=False, optimized=True):
"""Generate the python source for a node tree."""
if not isinstance(node, nodes.Template):
raise TypeError('Can\'t compile non template nodes')
generator = environment.code_generator_class(environment, name, filename,
stream, defer_init,
optimized)
generator.visit(node)
if stream is None:
return generator.stream.getvalue()
def has_safe_repr(value):
"""Does the node have a safe representation?"""
if value is None or value is NotImplemented or value is Ellipsis:
return True
if type(value) in (bool, int, float, complex, range_type, Markup) + string_types:
return True
if type(value) in (tuple, list, set, frozenset):
for item in value:
if not has_safe_repr(item):
return False
return True
elif type(value) is dict:
for key, value in iteritems(value):
if not has_safe_repr(key):
return False
if not has_safe_repr(value):
return False
return True
return False
def find_undeclared(nodes, names):
"""Check if the names passed are accessed undeclared. The return value
is a set of all the undeclared names from the sequence of names found.
"""
visitor = UndeclaredNameVisitor(names)
try:
for node in nodes:
visitor.visit(node)
except VisitorExit:
pass
return visitor.undeclared
class MacroRef(object):
def __init__(self, node):
self.node = node
self.accesses_caller = False
self.accesses_kwargs = False
self.accesses_varargs = False
class Frame(object):
"""Holds compile time information for us."""
def __init__(self, eval_ctx, parent=None):
self.eval_ctx = eval_ctx
self.symbols = Symbols(parent and parent.symbols or None)
# a toplevel frame is the root + soft frames such as if conditions.
self.toplevel = False
# the root frame is basically just the outermost frame, so no if
# conditions. This information is used to optimize inheritance
# situations.
self.rootlevel = False
# in some dynamic inheritance situations the compiler needs to add
# write tests around output statements.
self.require_output_check = parent and parent.require_output_check
# inside some tags we are using a buffer rather than yield statements.
# this for example affects {% filter %} or {% macro %}. If a frame
# is buffered this variable points to the name of the list used as
# buffer.
self.buffer = None
# the name of the block we're in, otherwise None.
self.block = parent and parent.block or None
# the parent of this frame
self.parent = parent
if parent is not None:
self.buffer = parent.buffer
def copy(self):
"""Create a copy of the current one."""
rv = object.__new__(self.__class__)
rv.__dict__.update(self.__dict__)
rv.symbols = self.symbols.copy()
return rv
def inner(self):
"""Return an inner frame."""
return Frame(self.eval_ctx, self)
def soft(self):
"""Return a soft frame. A soft frame may not be modified as
standalone thing as it shares the resources with the frame it
was created of, but it's not a rootlevel frame any longer.
This is only used to implement if-statements.
"""
rv = self.copy()
rv.rootlevel = False
return rv
__copy__ = copy
class VisitorExit(RuntimeError):
"""Exception used by the `UndeclaredNameVisitor` to signal a stop."""
class DependencyFinderVisitor(NodeVisitor):
"""A visitor that collects filter and test calls."""
def __init__(self):
self.filters = set()
self.tests = set()
def visit_Filter(self, node):
self.generic_visit(node)
self.filters.add(node.name)
def visit_Test(self, node):
self.generic_visit(node)
self.tests.add(node.name)
def visit_Block(self, node):
"""Stop visiting at blocks."""
class UndeclaredNameVisitor(NodeVisitor):
"""A visitor that checks if a name is accessed without being
declared. This is different from the frame visitor as it will
not stop at closure frames.
"""
def __init__(self, names):
self.names = set(names)
self.undeclared = set()
def visit_Name(self, node):
if node.ctx == 'load' and node.name in self.names:
self.undeclared.add(node.name)
if self.undeclared == self.names:
raise VisitorExit()
else:
self.names.discard(node.name)
def visit_Block(self, node):
"""Stop visiting a blocks."""
class CompilerExit(Exception):
"""Raised if the compiler encountered a situation where it just
doesn't make sense to further process the code. Any block that
raises such an exception is not further processed.
"""
class CodeGenerator(NodeVisitor):
def __init__(self, environment, name, filename, stream=None,
defer_init=False, optimized=True):
if stream is None:
stream = NativeStringIO()
self.environment = environment
self.name = name
self.filename = filename
self.stream = stream
self.created_block_context = False
self.defer_init = defer_init
self.optimized = optimized
if optimized:
self.optimizer = Optimizer(environment)
# aliases for imports
self.import_aliases = {}
# a registry for all blocks. Because blocks are moved out
# into the global python scope they are registered here
self.blocks = {}
# the number of extends statements so far
self.extends_so_far = 0
# some templates have a rootlevel extends. In this case we
# can safely assume that we're a child template and do some
# more optimizations.
self.has_known_extends = False
# the current line number
self.code_lineno = 1
# registry of all filters and tests (global, not block local)
self.tests = {}
self.filters = {}
# the debug information
self.debug_info = []
self._write_debug_info = None
# the number of new lines before the next write()
self._new_lines = 0
# the line number of the last written statement
self._last_line = 0
# true if nothing was written so far.
self._first_write = True
# used by the `temporary_identifier` method to get new
# unique, temporary identifier
self._last_identifier = 0
# the current indentation
self._indentation = 0
# Tracks toplevel assignments
self._assign_stack = []
# Tracks parameter definition blocks
self._param_def_block = []
# -- Various compilation helpers
def fail(self, msg, lineno):
"""Fail with a :exc:`TemplateAssertionError`."""
raise TemplateAssertionError(msg, lineno, self.name, self.filename)
def temporary_identifier(self):
"""Get a new unique identifier."""
self._last_identifier += 1
return 't_%d' % self._last_identifier
def buffer(self, frame):
"""Enable buffering for the frame from that point onwards."""
frame.buffer = self.temporary_identifier()
self.writeline('%s = []' % frame.buffer)
def return_buffer_contents(self, frame, force_unescaped=False):
"""Return the buffer contents of the frame."""
if not force_unescaped:
if frame.eval_ctx.volatile:
self.writeline('if context.eval_ctx.autoescape:')
self.indent()
self.writeline('return Markup(concat(%s))' % frame.buffer)
self.outdent()
self.writeline('else:')
self.indent()
self.writeline('return concat(%s)' % frame.buffer)
self.outdent()
return
elif frame.eval_ctx.autoescape:
self.writeline('return Markup(concat(%s))' % frame.buffer)
return
self.writeline('return concat(%s)' % frame.buffer)
def indent(self):
"""Indent by one."""
self._indentation += 1
def outdent(self, step=1):
"""Outdent by step."""
self._indentation -= step
def start_write(self, frame, node=None):
"""Yield or write into the frame buffer."""
if frame.buffer is None:
self.writeline('yield ', node)
else:
self.writeline('%s.append(' % frame.buffer, node)
def end_write(self, frame):
"""End the writing process started by `start_write`."""
if frame.buffer is not None:
self.write(')')
def simple_write(self, s, frame, node=None):
"""Simple shortcut for start_write + write + end_write."""
self.start_write(frame, node)
self.write(s)
self.end_write(frame)
def blockvisit(self, nodes, frame):
"""Visit a list of nodes as block in a frame. If the current frame
is no buffer a dummy ``if 0: yield None`` is written automatically.
"""
try:
self.writeline('pass')
for node in nodes:
self.visit(node, frame)
except CompilerExit:
pass
def write(self, x):
"""Write a string into the output stream."""
if self._new_lines:
if not self._first_write:
self.stream.write('\n' * self._new_lines)
self.code_lineno += self._new_lines
if self._write_debug_info is not None:
self.debug_info.append((self._write_debug_info,
self.code_lineno))
self._write_debug_info = None
self._first_write = False
self.stream.write(' ' * self._indentation)
self._new_lines = 0
self.stream.write(x)
def writeline(self, x, node=None, extra=0):
"""Combination of newline and write."""
self.newline(node, extra)
self.write(x)
def newline(self, node=None, extra=0):
"""Add one or more newlines before the next write."""
self._new_lines = max(self._new_lines, 1 + extra)
if node is not None and node.lineno != self._last_line:
self._write_debug_info = node.lineno
self._last_line = node.lineno
def signature(self, node, frame, extra_kwargs=None):
"""Writes a function call to the stream for the current node.
A leading comma is added automatically. The extra keyword
arguments may not include python keywords otherwise a syntax
error could occour. The extra keyword arguments should be given
as python dict.
"""
# if any of the given keyword arguments is a python keyword
# we have to make sure that no invalid call is created.
kwarg_workaround = False
for kwarg in chain((x.key for x in node.kwargs), extra_kwargs or ()):
if is_python_keyword(kwarg):
kwarg_workaround = True
break
for arg in node.args:
self.write(', ')
self.visit(arg, frame)
if not kwarg_workaround:
for kwarg in node.kwargs:
self.write(', ')
self.visit(kwarg, frame)
if extra_kwargs is not None:
for key, value in iteritems(extra_kwargs):
self.write(', %s=%s' % (key, value))
if node.dyn_args:
self.write(', *')
self.visit(node.dyn_args, frame)
if kwarg_workaround:
if node.dyn_kwargs is not None:
self.write(', **dict({')
else:
self.write(', **{')
for kwarg in node.kwargs:
self.write('%r: ' % kwarg.key)
self.visit(kwarg.value, frame)
self.write(', ')
if extra_kwargs is not None:
for key, value in iteritems(extra_kwargs):
self.write('%r: %s, ' % (key, value))
if node.dyn_kwargs is not None:
self.write('}, **')
self.visit(node.dyn_kwargs, frame)
self.write(')')
else:
self.write('}')
elif node.dyn_kwargs is not None:
self.write(', **')
self.visit(node.dyn_kwargs, frame)
def pull_dependencies(self, nodes):
"""Pull all the dependencies."""
visitor = DependencyFinderVisitor()
for node in nodes:
visitor.visit(node)
for dependency in 'filters', 'tests':
mapping = getattr(self, dependency)
for name in getattr(visitor, dependency):
if name not in mapping:
mapping[name] = self.temporary_identifier()
self.writeline('%s = environment.%s[%r]' %
(mapping[name], dependency, name))
def enter_frame(self, frame):
undefs = []
for target, (action, param) in iteritems(frame.symbols.loads):
if action == VAR_LOAD_PARAMETER:
pass
elif action == VAR_LOAD_RESOLVE:
self.writeline('%s = resolve(%r)' %
(target, param))
elif action == VAR_LOAD_ALIAS:
self.writeline('%s = %s' % (target, param))
elif action == VAR_LOAD_UNDEFINED:
undefs.append(target)
else:
raise NotImplementedError('unknown load instruction')
if undefs:
self.writeline('%s = missing' % ' = '.join(undefs))
def leave_frame(self, frame, with_python_scope=False):
if not with_python_scope:
undefs = []
for target, _ in iteritems(frame.symbols.loads):
undefs.append(target)
if undefs:
self.writeline('%s = missing' % ' = '.join(undefs))
def func(self, name):
if self.environment.is_async:
return 'async def %s' % name
return 'def %s' % name
def macro_body(self, node, frame):
"""Dump the function def of a macro or call block."""
frame = frame.inner()
frame.symbols.analyze_node(node)
macro_ref = MacroRef(node)
explicit_caller = None
skip_special_params = set()
args = []
for idx, arg in enumerate(node.args):
if arg.name == 'caller':
explicit_caller = idx
if arg.name in ('kwargs', 'varargs'):
skip_special_params.add(arg.name)
args.append(frame.symbols.ref(arg.name))
undeclared = find_undeclared(node.body, ('caller', 'kwargs', 'varargs'))
if 'caller' in undeclared:
# In older Jinja2 versions there was a bug that allowed caller
# to retain the special behavior even if it was mentioned in
# the argument list. However thankfully this was only really
# working if it was the last argument. So we are explicitly
# checking this now and error out if it is anywhere else in
# the argument list.
if explicit_caller is not None:
try:
node.defaults[explicit_caller - len(node.args)]
except IndexError:
self.fail('When defining macros or call blocks the '
'special "caller" argument must be omitted '
'or be given a default.', node.lineno)
else:
args.append(frame.symbols.declare_parameter('caller'))
macro_ref.accesses_caller = True
if 'kwargs' in undeclared and not 'kwargs' in skip_special_params:
args.append(frame.symbols.declare_parameter('kwargs'))
macro_ref.accesses_kwargs = True
if 'varargs' in undeclared and not 'varargs' in skip_special_params:
args.append(frame.symbols.declare_parameter('varargs'))
macro_ref.accesses_varargs = True
# macros are delayed, they never require output checks
frame.require_output_check = False
frame.symbols.analyze_node(node)
self.writeline('%s(%s):' % (self.func('macro'), ', '.join(args)), node)
self.indent()
self.buffer(frame)
self.enter_frame(frame)
self.push_parameter_definitions(frame)
for idx, arg in enumerate(node.args):
ref = frame.symbols.ref(arg.name)
self.writeline('if %s is missing:' % ref)
self.indent()
try:
default = node.defaults[idx - len(node.args)]
except IndexError:
self.writeline('%s = undefined(%r, name=%r)' % (
ref,
'parameter %r was not provided' % arg.name,
arg.name))
else:
self.writeline('%s = ' % ref)
self.visit(default, frame)
self.mark_parameter_stored(ref)
self.outdent()
self.pop_parameter_definitions()
self.blockvisit(node.body, frame)
self.return_buffer_contents(frame, force_unescaped=True)
self.leave_frame(frame, with_python_scope=True)
self.outdent()
return frame, macro_ref
def macro_def(self, macro_ref, frame):
"""Dump the macro definition for the def created by macro_body."""
arg_tuple = ', '.join(repr(x.name) for x in macro_ref.node.args)
name = getattr(macro_ref.node, 'name', None)
if len(macro_ref.node.args) == 1:
arg_tuple += ','
self.write('Macro(environment, macro, %r, (%s), %r, %r, %r, '
'context.eval_ctx.autoescape)' %
(name, arg_tuple, macro_ref.accesses_kwargs,
macro_ref.accesses_varargs, macro_ref.accesses_caller))
def position(self, node):
"""Return a human readable position for the node."""
rv = 'line %d' % node.lineno
if self.name is not None:
rv += ' in ' + repr(self.name)
return rv
def dump_local_context(self, frame):
return '{%s}' % ', '.join(
'%r: %s' % (name, target) for name, target
in iteritems(frame.symbols.dump_stores()))
def write_commons(self):
"""Writes a common preamble that is used by root and block functions.
Primarily this sets up common local helpers and enforces a generator
through a dead branch.
"""
self.writeline('resolve = context.resolve_or_missing')
self.writeline('undefined = environment.undefined')
self.writeline('if 0: yield None')
def push_parameter_definitions(self, frame):
"""Pushes all parameter targets from the given frame into a local
stack that permits tracking of yet to be assigned parameters. In
particular this enables the optimization from `visit_Name` to skip
undefined expressions for parameters in macros as macros can reference
otherwise unbound parameters.
"""
self._param_def_block.append(frame.symbols.dump_param_targets())
def pop_parameter_definitions(self):
"""Pops the current parameter definitions set."""
self._param_def_block.pop()
def mark_parameter_stored(self, target):
"""Marks a parameter in the current parameter definitions as stored.
This will skip the enforced undefined checks.
"""
if self._param_def_block:
self._param_def_block[-1].discard(target)
def parameter_is_undeclared(self, target):
"""Checks if a given target is an undeclared parameter."""
if not self._param_def_block:
return False
return target in self._param_def_block[-1]
def push_assign_tracking(self):
"""Pushes a new layer for assignment tracking."""
self._assign_stack.append(set())
def pop_assign_tracking(self, frame):
"""Pops the topmost level for assignment tracking and updates the
context variables if necessary.
"""
vars = self._assign_stack.pop()
if not frame.toplevel or not vars:
return
public_names = [x for x in vars if x[:1] != '_']
if len(vars) == 1:
name = next(iter(vars))
ref = frame.symbols.ref(name)
self.writeline('context.vars[%r] = %s' % (name, ref))
else:
self.writeline('context.vars.update({')
for idx, name in enumerate(vars):
if idx:
self.write(', ')
ref = frame.symbols.ref(name)
self.write('%r: %s' % (name, ref))
self.write('})')
if public_names:
if len(public_names) == 1:
self.writeline('context.exported_vars.add(%r)' %
public_names[0])
else:
self.writeline('context.exported_vars.update((%s))' %
', '.join(imap(repr, public_names)))
# -- Statement Visitors
def visit_Template(self, node, frame=None):
assert frame is None, 'no root frame allowed'
eval_ctx = EvalContext(self.environment, self.name)
from jinja2.runtime import __all__ as exported
self.writeline('from __future__ import %s' % ', '.join(code_features))
self.writeline('from jinja2.runtime import ' + ', '.join(exported))
if self.environment.is_async:
self.writeline('from jinja2.asyncsupport import auto_await, '
'auto_aiter, make_async_loop_context')
# if we want a deferred initialization we cannot move the
# environment into a local name
envenv = not self.defer_init and ', environment=environment' or ''
# do we have an extends tag at all? If not, we can save some
# overhead by just not processing any inheritance code.
have_extends = node.find(nodes.Extends) is not None
# find all blocks
for block in node.find_all(nodes.Block):
if block.name in self.blocks:
self.fail('block %r defined twice' % block.name, block.lineno)
self.blocks[block.name] = block
# find all imports and import them
for import_ in node.find_all(nodes.ImportedName):
if import_.importname not in self.import_aliases:
imp = import_.importname
self.import_aliases[imp] = alias = self.temporary_identifier()
if '.' in imp:
module, obj = imp.rsplit('.', 1)
self.writeline('from %s import %s as %s' %
(module, obj, alias))
else:
self.writeline('import %s as %s' % (imp, alias))
# add the load name
self.writeline('name = %r' % self.name)
# generate the root render function.
self.writeline('%s(context, missing=missing%s):' %
(self.func('root'), envenv), extra=1)
self.indent()
self.write_commons()
# process the root
frame = Frame(eval_ctx)
if 'self' in find_undeclared(node.body, ('self',)):
ref = frame.symbols.declare_parameter('self')
self.writeline('%s = TemplateReference(context)' % ref)
frame.symbols.analyze_node(node)
frame.toplevel = frame.rootlevel = True
frame.require_output_check = have_extends and not self.has_known_extends
if have_extends:
self.writeline('parent_template = None')
self.enter_frame(frame)
self.pull_dependencies(node.body)
self.blockvisit(node.body, frame)
self.leave_frame(frame, with_python_scope=True)
self.outdent()
# make sure that the parent root is called.
if have_extends:
if not self.has_known_extends:
self.indent()
self.writeline('if parent_template is not None:')
self.indent()
if supports_yield_from and not self.environment.is_async:
self.writeline('yield from parent_template.'
'root_render_func(context)')
else:
self.writeline('%sfor event in parent_template.'
'root_render_func(context):' %
(self.environment.is_async and 'async ' or ''))
self.indent()
self.writeline('yield event')
self.outdent()
self.outdent(1 + (not self.has_known_extends))
# at this point we now have the blocks collected and can visit them too.
for name, block in iteritems(self.blocks):
self.writeline('%s(context, missing=missing%s):' %
(self.func('block_' + name), envenv),
block, 1)
self.indent()
self.write_commons()
# It's important that we do not make this frame a child of the
# toplevel template. This would cause a variety of
# interesting issues with identifier tracking.
block_frame = Frame(eval_ctx)
undeclared = find_undeclared(block.body, ('self', 'super'))
if 'self' in undeclared:
ref = block_frame.symbols.declare_parameter('self')
self.writeline('%s = TemplateReference(context)' % ref)
if 'super' in undeclared:
ref = block_frame.symbols.declare_parameter('super')
self.writeline('%s = context.super(%r, '
'block_%s)' % (ref, name, name))
block_frame.symbols.analyze_node(block)
block_frame.block = name
self.enter_frame(block_frame)
self.pull_dependencies(block.body)
self.blockvisit(block.body, block_frame)
self.leave_frame(block_frame, with_python_scope=True)
self.outdent()
self.writeline('blocks = {%s}' % ', '.join('%r: block_%s' % (x, x)
for x in self.blocks),
extra=1)
# add a function that returns the debug info
self.writeline('debug_info = %r' % '&'.join('%s=%s' % x for x
in self.debug_info))
def visit_Block(self, node, frame):
"""Call a block and register it for the template."""
level = 0
if frame.toplevel:
# if we know that we are a child template, there is no need to
# check if we are one
if self.has_known_extends:
return
if self.extends_so_far > 0:
self.writeline('if parent_template is None:')
self.indent()
level += 1
context = node.scoped and (
'context.derived(%s)' % self.dump_local_context(frame)) or 'context'
if supports_yield_from and not self.environment.is_async and \
frame.buffer is None:
self.writeline('yield from context.blocks[%r][0](%s)' % (
node.name, context), node)
else:
loop = self.environment.is_async and 'async for' or 'for'
self.writeline('%s event in context.blocks[%r][0](%s):' % (
loop, node.name, context), node)
self.indent()
self.simple_write('event', frame)
self.outdent()
self.outdent(level)
def visit_Extends(self, node, frame):
"""Calls the extender."""
if not frame.toplevel:
self.fail('cannot use extend from a non top-level scope',
node.lineno)
# if the number of extends statements in general is zero so
# far, we don't have to add a check if something extended
# the template before this one.
if self.extends_so_far > 0:
# if we have a known extends we just add a template runtime
# error into the generated code. We could catch that at compile
# time too, but i welcome it not to confuse users by throwing the
# same error at different times just "because we can".
if not self.has_known_extends:
self.writeline('if parent_template is not None:')
self.indent()
self.writeline('raise TemplateRuntimeError(%r)' %
'extended multiple times')
# if we have a known extends already we don't need that code here
# as we know that the template execution will end here.
if self.has_known_extends:
raise CompilerExit()
else:
self.outdent()
self.writeline('parent_template = environment.get_template(', node)
self.visit(node.template, frame)
self.write(', %r)' % self.name)
self.writeline('for name, parent_block in parent_template.'
'blocks.%s():' % dict_item_iter)
self.indent()
self.writeline('context.blocks.setdefault(name, []).'
'append(parent_block)')
self.outdent()
# if this extends statement was in the root level we can take
# advantage of that information and simplify the generated code
# in the top level from this point onwards
if frame.rootlevel:
self.has_known_extends = True
# and now we have one more
self.extends_so_far += 1
def visit_Include(self, node, frame):
"""Handles includes."""
if node.ignore_missing:
self.writeline('try:')
self.indent()
func_name = 'get_or_select_template'
if isinstance(node.template, nodes.Const):
if isinstance(node.template.value, string_types):
func_name = 'get_template'
elif isinstance(node.template.value, (tuple, list)):
func_name = 'select_template'
elif isinstance(node.template, (nodes.Tuple, nodes.List)):
func_name = 'select_template'
self.writeline('template = environment.%s(' % func_name, node)
self.visit(node.template, frame)
self.write(', %r)' % self.name)
if node.ignore_missing:
self.outdent()
self.writeline('except TemplateNotFound:')
self.indent()
self.writeline('pass')
self.outdent()
self.writeline('else:')
self.indent()
skip_event_yield = False
if node.with_context:
loop = self.environment.is_async and 'async for' or 'for'
self.writeline('%s event in template.root_render_func('
'template.new_context(context.get_all(), True, '
'%s)):' % (loop, self.dump_local_context(frame)))
elif self.environment.is_async:
self.writeline('for event in (await '
'template._get_default_module_async())'
'._body_stream:')
else:
if supports_yield_from:
self.writeline('yield from template._get_default_module()'
'._body_stream')
skip_event_yield = True
else:
self.writeline('for event in template._get_default_module()'
'._body_stream:')
if not skip_event_yield:
self.indent()
self.simple_write('event', frame)
self.outdent()
if node.ignore_missing:
self.outdent()
def visit_Import(self, node, frame):
"""Visit regular imports."""
self.writeline('%s = ' % frame.symbols.ref(node.target), node)
if frame.toplevel:
self.write('context.vars[%r] = ' % node.target)
if self.environment.is_async:
self.write('await ')
self.write('environment.get_template(')
self.visit(node.template, frame)
self.write(', %r).' % self.name)
if node.with_context:
self.write('make_module%s(context.get_all(), True, %s)'
% (self.environment.is_async and '_async' or '',
self.dump_local_context(frame)))
elif self.environment.is_async:
self.write('_get_default_module_async()')
else:
self.write('_get_default_module()')
if frame.toplevel and not node.target.startswith('_'):
self.writeline('context.exported_vars.discard(%r)' % node.target)
def visit_FromImport(self, node, frame):
"""Visit named imports."""
self.newline(node)
self.write('included_template = %senvironment.get_template('
% (self.environment.is_async and 'await ' or ''))
self.visit(node.template, frame)
self.write(', %r).' % self.name)
if node.with_context:
self.write('make_module%s(context.get_all(), True, %s)'
% (self.environment.is_async and '_async' or '',
self.dump_local_context(frame)))
elif self.environment.is_async:
self.write('_get_default_module_async()')
else:
self.write('_get_default_module()')
var_names = []
discarded_names = []
for name in node.names:
if isinstance(name, tuple):
name, alias = name
else:
alias = name
self.writeline('%s = getattr(included_template, '
'%r, missing)' % (frame.symbols.ref(alias), name))
self.writeline('if %s is missing:' % frame.symbols.ref(alias))
self.indent()
self.writeline('%s = undefined(%r %% '
'included_template.__name__, '
'name=%r)' %
(frame.symbols.ref(alias),
'the template %%r (imported on %s) does '
'not export the requested name %s' % (
self.position(node),
repr(name)
), name))
self.outdent()
if frame.toplevel:
var_names.append(alias)
if not alias.startswith('_'):
discarded_names.append(alias)
if var_names:
if len(var_names) == 1:
name = var_names[0]
self.writeline('context.vars[%r] = %s' %
(name, frame.symbols.ref(name)))
else:
self.writeline('context.vars.update({%s})' % ', '.join(
'%r: %s' % (name, frame.symbols.ref(name)) for name in var_names
))
if discarded_names:
if len(discarded_names) == 1:
self.writeline('context.exported_vars.discard(%r)' %
discarded_names[0])
else:
self.writeline('context.exported_vars.difference_'
'update((%s))' % ', '.join(imap(repr, discarded_names)))
def visit_For(self, node, frame):
loop_frame = frame.inner()
test_frame = frame.inner()
else_frame = frame.inner()
# try to figure out if we have an extended loop. An extended loop
# is necessary if the loop is in recursive mode if the special loop
# variable is accessed in the body.
extended_loop = node.recursive or 'loop' in \
find_undeclared(node.iter_child_nodes(
only=('body',)), ('loop',))
loop_ref = None
if extended_loop:
loop_ref = loop_frame.symbols.declare_parameter('loop')
loop_frame.symbols.analyze_node(node, for_branch='body')
if node.else_:
else_frame.symbols.analyze_node(node, for_branch='else')
if node.test:
loop_filter_func = self.temporary_identifier()
test_frame.symbols.analyze_node(node, for_branch='test')
self.writeline('%s(fiter):' % self.func(loop_filter_func), node.test)
self.indent()
self.enter_frame(test_frame)
self.writeline(self.environment.is_async and 'async for ' or 'for ')
self.visit(node.target, loop_frame)
self.write(' in ')
self.write(self.environment.is_async and 'auto_aiter(fiter)' or 'fiter')
self.write(':')
self.indent()
self.writeline('if ', node.test)
self.visit(node.test, test_frame)
self.write(':')
self.indent()
self.writeline('yield ')
self.visit(node.target, loop_frame)
self.outdent(3)
self.leave_frame(test_frame, with_python_scope=True)
# if we don't have an recursive loop we have to find the shadowed
# variables at that point. Because loops can be nested but the loop
# variable is a special one we have to enforce aliasing for it.
if node.recursive:
self.writeline('%s(reciter, loop_render_func, depth=0):' %
self.func('loop'), node)
self.indent()
self.buffer(loop_frame)
# Use the same buffer for the else frame
else_frame.buffer = loop_frame.buffer
# make sure the loop variable is a special one and raise a template
# assertion error if a loop tries to write to loop
if extended_loop:
self.writeline('%s = missing' % loop_ref)
for name in node.find_all(nodes.Name):
if name.ctx == 'store' and name.name == 'loop':
self.fail('Can\'t assign to special loop variable '
'in for-loop target', name.lineno)
if node.else_:
iteration_indicator = self.temporary_identifier()
self.writeline('%s = 1' % iteration_indicator)
self.writeline(self.environment.is_async and 'async for ' or 'for ', node)
self.visit(node.target, loop_frame)
if extended_loop:
if self.environment.is_async:
self.write(', %s in await make_async_loop_context(' % loop_ref)
else:
self.write(', %s in LoopContext(' % loop_ref)
else:
self.write(' in ')
if node.test:
self.write('%s(' % loop_filter_func)
if node.recursive:
self.write('reciter')
else:
if self.environment.is_async and not extended_loop:
self.write('auto_aiter(')
self.visit(node.iter, frame)
if self.environment.is_async and not extended_loop:
self.write(')')
if node.test:
self.write(')')
if node.recursive:
self.write(', loop_render_func, depth):')
else:
self.write(extended_loop and '):' or ':')
self.indent()
self.enter_frame(loop_frame)
self.blockvisit(node.body, loop_frame)
if node.else_:
self.writeline('%s = 0' % iteration_indicator)
self.outdent()
self.leave_frame(loop_frame, with_python_scope=node.recursive
and not node.else_)
if node.else_:
self.writeline('if %s:' % iteration_indicator)
self.indent()
self.enter_frame(else_frame)
self.blockvisit(node.else_, else_frame)
self.leave_frame(else_frame)
self.outdent()
# if the node was recursive we have to return the buffer contents
# and start the iteration code
if node.recursive:
self.return_buffer_contents(loop_frame)
self.outdent()
self.start_write(frame, node)
if self.environment.is_async:
self.write('await ')
self.write('loop(')
if self.environment.is_async:
self.write('auto_aiter(')
self.visit(node.iter, frame)
if self.environment.is_async:
self.write(')')
self.write(', loop)')
self.end_write(frame)
def visit_If(self, node, frame):
if_frame = frame.soft()
self.writeline('if ', node)
self.visit(node.test, if_frame)
self.write(':')
self.indent()
self.blockvisit(node.body, if_frame)
self.outdent()
if node.else_:
self.writeline('else:')
self.indent()
self.blockvisit(node.else_, if_frame)
self.outdent()
def visit_Macro(self, node, frame):
macro_frame, macro_ref = self.macro_body(node, frame)
self.newline()
if frame.toplevel:
if not node.name.startswith('_'):
self.write('context.exported_vars.add(%r)' % node.name)
ref = frame.symbols.ref(node.name)
self.writeline('context.vars[%r] = ' % node.name)
self.write('%s = ' % frame.symbols.ref(node.name))
self.macro_def(macro_ref, macro_frame)
def visit_CallBlock(self, node, frame):
call_frame, macro_ref = self.macro_body(node, frame)
self.writeline('caller = ')
self.macro_def(macro_ref, call_frame)
self.start_write(frame, node)
self.visit_Call(node.call, frame, forward_caller=True)
self.end_write(frame)
def visit_FilterBlock(self, node, frame):
filter_frame = frame.inner()
filter_frame.symbols.analyze_node(node)
self.enter_frame(filter_frame)
self.buffer(filter_frame)
self.blockvisit(node.body, filter_frame)
self.start_write(frame, node)
self.visit_Filter(node.filter, filter_frame)
self.end_write(frame)
self.leave_frame(filter_frame)
def visit_With(self, node, frame):
with_frame = frame.inner()
with_frame.symbols.analyze_node(node)
self.enter_frame(with_frame)
for idx, (target, expr) in enumerate(izip(node.targets, node.values)):
self.newline()
self.visit(target, with_frame)
self.write(' = ')
self.visit(expr, frame)
self.blockvisit(node.body, with_frame)
self.leave_frame(with_frame)
def visit_ExprStmt(self, node, frame):
self.newline(node)
self.visit(node.node, frame)
def visit_Output(self, node, frame):
# if we have a known extends statement, we don't output anything
# if we are in a require_output_check section
if self.has_known_extends and frame.require_output_check:
return
allow_constant_finalize = True
if self.environment.finalize:
func = self.environment.finalize
if getattr(func, 'contextfunction', False) or \
getattr(func, 'evalcontextfunction', False):
allow_constant_finalize = False
elif getattr(func, 'environmentfunction', False):
finalize = lambda x: text_type(
self.environment.finalize(self.environment, x))
else:
finalize = lambda x: text_type(self.environment.finalize(x))
else:
finalize = text_type
# if we are inside a frame that requires output checking, we do so
outdent_later = False
if frame.require_output_check:
self.writeline('if parent_template is None:')
self.indent()
outdent_later = True
# try to evaluate as many chunks as possible into a static
# string at compile time.
body = []
for child in node.nodes:
try:
if not allow_constant_finalize:
raise nodes.Impossible()
const = child.as_const(frame.eval_ctx)
except nodes.Impossible:
body.append(child)
continue
# the frame can't be volatile here, becaus otherwise the
# as_const() function would raise an Impossible exception
# at that point.
try:
if frame.eval_ctx.autoescape:
if hasattr(const, '__html__'):
const = const.__html__()
else:
const = escape(const)
const = finalize(const)
except Exception:
# if something goes wrong here we evaluate the node
# at runtime for easier debugging
body.append(child)
continue
if body and isinstance(body[-1], list):
body[-1].append(const)
else:
body.append([const])
# if we have less than 3 nodes or a buffer we yield or extend/append
if len(body) < 3 or frame.buffer is not None:
if frame.buffer is not None:
# for one item we append, for more we extend
if len(body) == 1:
self.writeline('%s.append(' % frame.buffer)
else:
self.writeline('%s.extend((' % frame.buffer)
self.indent()
for item in body:
if isinstance(item, list):
val = repr(concat(item))
if frame.buffer is None:
self.writeline('yield ' + val)
else:
self.writeline(val + ',')
else:
if frame.buffer is None:
self.writeline('yield ', item)
else:
self.newline(item)
close = 1
if frame.eval_ctx.volatile:
self.write('(escape if context.eval_ctx.autoescape'
' else to_string)(')
elif frame.eval_ctx.autoescape:
self.write('escape(')
else:
self.write('to_string(')
if self.environment.finalize is not None:
self.write('environment.finalize(')
if getattr(self.environment.finalize,
"contextfunction", False):
self.write('context, ')
close += 1
self.visit(item, frame)
self.write(')' * close)
if frame.buffer is not None:
self.write(',')
if frame.buffer is not None:
# close the open parentheses
self.outdent()
self.writeline(len(body) == 1 and ')' or '))')
# otherwise we create a format string as this is faster in that case
else:
format = []
arguments = []
for item in body:
if isinstance(item, list):
format.append(concat(item).replace('%', '%%'))
else:
format.append('%s')
arguments.append(item)
self.writeline('yield ')
self.write(repr(concat(format)) + ' % (')
self.indent()
for argument in arguments:
self.newline(argument)
close = 0
if frame.eval_ctx.volatile:
self.write('(escape if context.eval_ctx.autoescape else'
' to_string)(')
close += 1
elif frame.eval_ctx.autoescape:
self.write('escape(')
close += 1
if self.environment.finalize is not None:
self.write('environment.finalize(')
if getattr(self.environment.finalize,
'contextfunction', False):
self.write('context, ')
elif getattr(self.environment.finalize,
'evalcontextfunction', False):
self.write('context.eval_ctx, ')
elif getattr(self.environment.finalize,
'environmentfunction', False):
self.write('environment, ')
close += 1
self.visit(argument, frame)
self.write(')' * close + ', ')
self.outdent()
self.writeline(')')
if outdent_later:
self.outdent()
def visit_Assign(self, node, frame):
self.push_assign_tracking()
self.newline(node)
self.visit(node.target, frame)
self.write(' = ')
self.visit(node.node, frame)
self.pop_assign_tracking(frame)
def visit_AssignBlock(self, node, frame):
self.push_assign_tracking()
block_frame = frame.inner()
# This is a special case. Since a set block always captures we
# will disable output checks. This way one can use set blocks
# toplevel even in extended templates.
block_frame.require_output_check = False
block_frame.symbols.analyze_node(node)
self.enter_frame(block_frame)
self.buffer(block_frame)
self.blockvisit(node.body, block_frame)
self.newline(node)
self.visit(node.target, frame)
self.write(' = (Markup if context.eval_ctx.autoescape '
'else identity)(concat(%s))' % block_frame.buffer)
self.pop_assign_tracking(frame)
self.leave_frame(block_frame)
# -- Expression Visitors
def visit_Name(self, node, frame):
if node.ctx == 'store' and frame.toplevel:
if self._assign_stack:
self._assign_stack[-1].add(node.name)
ref = frame.symbols.ref(node.name)
# If we are looking up a variable we might have to deal with the
# case where it's undefined. We can skip that case if the load
# instruction indicates a parameter which are always defined.
if node.ctx == 'load':
load = frame.symbols.find_load(ref)
if not (load is not None and load[0] == VAR_LOAD_PARAMETER and \
not self.parameter_is_undeclared(ref)):
self.write('(undefined(name=%r) if %s is missing else %s)' %
(node.name, ref, ref))
return
self.write(ref)
def visit_Const(self, node, frame):
val = node.as_const(frame.eval_ctx)
if isinstance(val, float):
self.write(str(val))
else:
self.write(repr(val))
def visit_TemplateData(self, node, frame):
try:
self.write(repr(node.as_const(frame.eval_ctx)))
except nodes.Impossible:
self.write('(Markup if context.eval_ctx.autoescape else identity)(%r)'
% node.data)
def visit_Tuple(self, node, frame):
self.write('(')
idx = -1
for idx, item in enumerate(node.items):
if idx:
self.write(', ')
self.visit(item, frame)
self.write(idx == 0 and ',)' or ')')
def visit_List(self, node, frame):
self.write('[')
for idx, item in enumerate(node.items):
if idx:
self.write(', ')
self.visit(item, frame)
self.write(']')
def visit_Dict(self, node, frame):
self.write('{')
for idx, item in enumerate(node.items):
if idx:
self.write(', ')
self.visit(item.key, frame)
self.write(': ')
self.visit(item.value, frame)
self.write('}')
def binop(operator, interceptable=True):
@optimizeconst
def visitor(self, node, frame):
if self.environment.sandboxed and \
operator in self.environment.intercepted_binops:
self.write('environment.call_binop(context, %r, ' % operator)
self.visit(node.left, frame)
self.write(', ')
self.visit(node.right, frame)
else:
self.write('(')
self.visit(node.left, frame)
self.write(' %s ' % operator)
self.visit(node.right, frame)
self.write(')')
return visitor
def uaop(operator, interceptable=True):
@optimizeconst
def visitor(self, node, frame):
if self.environment.sandboxed and \
operator in self.environment.intercepted_unops:
self.write('environment.call_unop(context, %r, ' % operator)
self.visit(node.node, frame)
else:
self.write('(' + operator)
self.visit(node.node, frame)
self.write(')')
return visitor
visit_Add = binop('+')
visit_Sub = binop('-')
visit_Mul = binop('*')
visit_Div = binop('/')
visit_FloorDiv = binop('//')
visit_Pow = binop('**')
visit_Mod = binop('%')
visit_And = binop('and', interceptable=False)
visit_Or = binop('or', interceptable=False)
visit_Pos = uaop('+')
visit_Neg = uaop('-')
visit_Not = uaop('not ', interceptable=False)
del binop, uaop
@optimizeconst
def visit_Concat(self, node, frame):
if frame.eval_ctx.volatile:
func_name = '(context.eval_ctx.volatile and' \
' markup_join or unicode_join)'
elif frame.eval_ctx.autoescape:
func_name = 'markup_join'
else:
func_name = 'unicode_join'
self.write('%s((' % func_name)
for arg in node.nodes:
self.visit(arg, frame)
self.write(', ')
self.write('))')
@optimizeconst
def visit_Compare(self, node, frame):
self.visit(node.expr, frame)
for op in node.ops:
self.visit(op, frame)
def visit_Operand(self, node, frame):
self.write(' %s ' % operators[node.op])
self.visit(node.expr, frame)
@optimizeconst
def visit_Getattr(self, node, frame):
self.write('environment.getattr(')
self.visit(node.node, frame)
self.write(', %r)' % node.attr)
@optimizeconst
def visit_Getitem(self, node, frame):
# slices bypass the environment getitem method.
if isinstance(node.arg, nodes.Slice):
self.visit(node.node, frame)
self.write('[')
self.visit(node.arg, frame)
self.write(']')
else:
self.write('environment.getitem(')
self.visit(node.node, frame)
self.write(', ')
self.visit(node.arg, frame)
self.write(')')
def visit_Slice(self, node, frame):
if node.start is not None:
self.visit(node.start, frame)
self.write(':')
if node.stop is not None:
self.visit(node.stop, frame)
if node.step is not None:
self.write(':')
self.visit(node.step, frame)
@optimizeconst
def visit_Filter(self, node, frame):
if self.environment.is_async:
self.write('await auto_await(')
self.write(self.filters[node.name] + '(')
func = self.environment.filters.get(node.name)
if func is None:
self.fail('no filter named %r' % node.name, node.lineno)
if getattr(func, 'contextfilter', False):
self.write('context, ')
elif getattr(func, 'evalcontextfilter', False):
self.write('context.eval_ctx, ')
elif getattr(func, 'environmentfilter', False):
self.write('environment, ')
# if the filter node is None we are inside a filter block
# and want to write to the current buffer
if node.node is not None:
self.visit(node.node, frame)
elif frame.eval_ctx.volatile:
self.write('(context.eval_ctx.autoescape and'
' Markup(concat(%s)) or concat(%s))' %
(frame.buffer, frame.buffer))
elif frame.eval_ctx.autoescape:
self.write('Markup(concat(%s))' % frame.buffer)
else:
self.write('concat(%s)' % frame.buffer)
self.signature(node, frame)
self.write(')')
if self.environment.is_async:
self.write(')')
@optimizeconst
def visit_Test(self, node, frame):
self.write(self.tests[node.name] + '(')
if node.name not in self.environment.tests:
self.fail('no test named %r' % node.name, node.lineno)
self.visit(node.node, frame)
self.signature(node, frame)
self.write(')')
@optimizeconst
def visit_CondExpr(self, node, frame):
def write_expr2():
if node.expr2 is not None:
return self.visit(node.expr2, frame)
self.write('undefined(%r)' % ('the inline if-'
'expression on %s evaluated to false and '
'no else section was defined.' % self.position(node)))
self.write('(')
self.visit(node.expr1, frame)
self.write(' if ')
self.visit(node.test, frame)
self.write(' else ')
write_expr2()
self.write(')')
@optimizeconst
def visit_Call(self, node, frame, forward_caller=False):
if self.environment.is_async:
self.write('await auto_await(')
if self.environment.sandboxed:
self.write('environment.call(context, ')
else:
self.write('context.call(')
self.visit(node.node, frame)
extra_kwargs = forward_caller and {'caller': 'caller'} or None
self.signature(node, frame, extra_kwargs)
self.write(')')
if self.environment.is_async:
self.write(')')
def visit_Keyword(self, node, frame):
self.write(node.key + '=')
self.visit(node.value, frame)
# -- Unused nodes for extensions
def visit_MarkSafe(self, node, frame):
self.write('Markup(')
self.visit(node.expr, frame)
self.write(')')
def visit_MarkSafeIfAutoescape(self, node, frame):
self.write('(context.eval_ctx.autoescape and Markup or identity)(')
self.visit(node.expr, frame)
self.write(')')
def visit_EnvironmentAttribute(self, node, frame):
self.write('environment.' + node.name)
def visit_ExtensionAttribute(self, node, frame):
self.write('environment.extensions[%r].%s' % (node.identifier, node.name))
def visit_ImportedName(self, node, frame):
self.write(self.import_aliases[node.importname])
def visit_InternalName(self, node, frame):
self.write(node.name)
def visit_ContextReference(self, node, frame):
self.write('context')
def visit_Continue(self, node, frame):
self.writeline('continue', node)
def visit_Break(self, node, frame):
self.writeline('break', node)
def visit_Scope(self, node, frame):
scope_frame = frame.inner()
scope_frame.symbols.analyze_node(node)
self.enter_frame(scope_frame)
self.blockvisit(node.body, scope_frame)
self.leave_frame(scope_frame)
def visit_EvalContextModifier(self, node, frame):
for keyword in node.options:
self.writeline('context.eval_ctx.%s = ' % keyword.key)
self.visit(keyword.value, frame)
try:
val = keyword.value.as_const(frame.eval_ctx)
except nodes.Impossible:
frame.eval_ctx.volatile = True
else:
setattr(frame.eval_ctx, keyword.key, val)
def visit_ScopedEvalContextModifier(self, node, frame):
old_ctx_name = self.temporary_identifier()
saved_ctx = frame.eval_ctx.save()
self.writeline('%s = context.eval_ctx.save()' % old_ctx_name)
self.visit_EvalContextModifier(node, frame)
for child in node.body:
self.visit(child, frame)
frame.eval_ctx.revert(saved_ctx)
self.writeline('context.eval_ctx.revert(%s)' % old_ctx_name)
| gpl-3.0 |
OTWillems/GEO1005 | SpatialDecision/external/networkx/generators/tests/test_directed.py | 77 | 1313 | #!/usr/bin/env python
"""Generators - Directed Graphs
----------------------------
"""
from nose.tools import *
from networkx import *
from networkx.generators.directed import *
class TestGeneratorsDirected():
def test_smoke_test_random_graphs(self):
G=gn_graph(100)
G=gnr_graph(100,0.5)
G=gnc_graph(100)
G=scale_free_graph(100)
def test_create_using_keyword_arguments(self):
assert_raises(networkx.exception.NetworkXError,
gn_graph, 100, create_using=Graph())
assert_raises(networkx.exception.NetworkXError,
gnr_graph, 100, 0.5, create_using=Graph())
assert_raises(networkx.exception.NetworkXError,
gnc_graph, 100, create_using=Graph())
assert_raises(networkx.exception.NetworkXError,
scale_free_graph, 100, create_using=Graph())
G=gn_graph(100,seed=1)
MG=gn_graph(100,create_using=MultiDiGraph(),seed=1)
assert_equal(G.edges(), MG.edges())
G=gnr_graph(100,0.5,seed=1)
MG=gnr_graph(100,0.5,create_using=MultiDiGraph(),seed=1)
assert_equal(G.edges(), MG.edges())
G=gnc_graph(100,seed=1)
MG=gnc_graph(100,create_using=MultiDiGraph(),seed=1)
assert_equal(G.edges(), MG.edges())
| gpl-2.0 |
joequant/zipline | zipline/finance/blotter.py | 29 | 14087 | #
# Copyright 2014 Quantopian, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import math
import uuid
from copy import copy
from logbook import Logger
from collections import defaultdict
from six import text_type, iteritems
from six.moves import filter
import zipline.errors
import zipline.protocol as zp
from zipline.finance.slippage import (
VolumeShareSlippage,
transact_partial,
check_order_triggers
)
from zipline.finance.commission import PerShare
from zipline.utils.protocol_utils import Enum
from zipline.utils.serialization_utils import (
VERSION_LABEL
)
log = Logger('Blotter')
ORDER_STATUS = Enum(
'OPEN',
'FILLED',
'CANCELLED',
'REJECTED',
'HELD',
)
class Blotter(object):
def __init__(self):
self.transact = transact_partial(VolumeShareSlippage(), PerShare())
# these orders are aggregated by sid
self.open_orders = defaultdict(list)
# keep a dict of orders by their own id
self.orders = {}
# holding orders that have come in since the last
# event.
self.new_orders = []
self.current_dt = None
self.max_shares = int(1e+11)
def __repr__(self):
return """
{class_name}(
transact_partial={transact_partial},
open_orders={open_orders},
orders={orders},
new_orders={new_orders},
current_dt={current_dt})
""".strip().format(class_name=self.__class__.__name__,
transact_partial=self.transact.args,
open_orders=self.open_orders,
orders=self.orders,
new_orders=self.new_orders,
current_dt=self.current_dt)
def set_date(self, dt):
self.current_dt = dt
def order(self, sid, amount, style, order_id=None):
# something could be done with amount to further divide
# between buy by share count OR buy shares up to a dollar amount
# numeric == share count AND "$dollar.cents" == cost amount
"""
amount > 0 :: Buy/Cover
amount < 0 :: Sell/Short
Market order: order(sid, amount)
Limit order: order(sid, amount, style=LimitOrder(limit_price))
Stop order: order(sid, amount, style=StopOrder(stop_price))
StopLimit order: order(sid, amount, style=StopLimitOrder(limit_price,
stop_price))
"""
if amount == 0:
# Don't bother placing orders for 0 shares.
return
elif amount > self.max_shares:
# Arbitrary limit of 100 billion (US) shares will never be
# exceeded except by a buggy algorithm.
raise OverflowError("Can't order more than %d shares" %
self.max_shares)
is_buy = (amount > 0)
order = Order(
dt=self.current_dt,
sid=sid,
amount=amount,
stop=style.get_stop_price(is_buy),
limit=style.get_limit_price(is_buy),
id=order_id
)
self.open_orders[order.sid].append(order)
self.orders[order.id] = order
self.new_orders.append(order)
return order.id
def cancel(self, order_id):
if order_id not in self.orders:
return
cur_order = self.orders[order_id]
if cur_order.open:
order_list = self.open_orders[cur_order.sid]
if cur_order in order_list:
order_list.remove(cur_order)
if cur_order in self.new_orders:
self.new_orders.remove(cur_order)
cur_order.cancel()
cur_order.dt = self.current_dt
# we want this order's new status to be relayed out
# along with newly placed orders.
self.new_orders.append(cur_order)
def reject(self, order_id, reason=''):
"""
Mark the given order as 'rejected', which is functionally similar to
cancelled. The distinction is that rejections are involuntary (and
usually include a message from a broker indicating why the order was
rejected) while cancels are typically user-driven.
"""
if order_id not in self.orders:
return
cur_order = self.orders[order_id]
order_list = self.open_orders[cur_order.sid]
if cur_order in order_list:
order_list.remove(cur_order)
if cur_order in self.new_orders:
self.new_orders.remove(cur_order)
cur_order.reject(reason=reason)
cur_order.dt = self.current_dt
# we want this order's new status to be relayed out
# along with newly placed orders.
self.new_orders.append(cur_order)
def hold(self, order_id, reason=''):
"""
Mark the order with order_id as 'held'. Held is functionally similar
to 'open'. When a fill (full or partial) arrives, the status
will automatically change back to open/filled as necessary.
"""
if order_id not in self.orders:
return
cur_order = self.orders[order_id]
if cur_order.open:
if cur_order in self.new_orders:
self.new_orders.remove(cur_order)
cur_order.hold(reason=reason)
cur_order.dt = self.current_dt
# we want this order's new status to be relayed out
# along with newly placed orders.
self.new_orders.append(cur_order)
def process_split(self, split_event):
if split_event.sid not in self.open_orders:
return
orders_to_modify = self.open_orders[split_event.sid]
for order in orders_to_modify:
order.handle_split(split_event)
def process_benchmark(self, benchmark_event):
return
yield
def process_trade(self, trade_event):
if trade_event.sid not in self.open_orders:
return
if trade_event.volume < 1:
# there are zero volume trade_events bc some stocks trade
# less frequently than once per minute.
return
orders = self.open_orders[trade_event.sid]
orders.sort(key=lambda o: o.dt)
# Only use orders for the current day or before
current_orders = filter(
lambda o: o.dt <= trade_event.dt,
orders)
processed_orders = []
for txn, order in self.process_transactions(trade_event,
current_orders):
processed_orders.append(order)
yield txn, order
# remove closed orders. we should only have to check
# processed orders
def not_open(order):
return not order.open
closed_orders = filter(not_open, processed_orders)
for order in closed_orders:
orders.remove(order)
if len(orders) == 0:
del self.open_orders[trade_event.sid]
def process_transactions(self, trade_event, current_orders):
for order, txn in self.transact(trade_event, current_orders):
if txn.type == zp.DATASOURCE_TYPE.COMMISSION:
order.commission = (order.commission or 0.0) + txn.cost
else:
if txn.amount == 0:
raise zipline.errors.TransactionWithNoAmount(txn=txn)
if math.copysign(1, txn.amount) != order.direction:
raise zipline.errors.TransactionWithWrongDirection(
txn=txn, order=order)
if abs(txn.amount) > abs(self.orders[txn.order_id].amount):
raise zipline.errors.TransactionVolumeExceedsOrder(
txn=txn, order=order)
order.filled += txn.amount
if txn.commission is not None:
order.commission = ((order.commission or 0.0) +
txn.commission)
# mark the date of the order to match the transaction
# that is filling it.
order.dt = txn.dt
yield txn, order
def __getstate__(self):
state_to_save = ['new_orders', 'orders', '_status']
state_dict = {k: self.__dict__[k] for k in state_to_save
if k in self.__dict__}
# Have to handle defaultdicts specially
state_dict['open_orders'] = dict(self.open_orders)
STATE_VERSION = 1
state_dict[VERSION_LABEL] = STATE_VERSION
return state_dict
def __setstate__(self, state):
self.__init__()
OLDEST_SUPPORTED_STATE = 1
version = state.pop(VERSION_LABEL)
if version < OLDEST_SUPPORTED_STATE:
raise BaseException("Blotter saved is state too old.")
open_orders = defaultdict(list)
open_orders.update(state.pop('open_orders'))
self.open_orders = open_orders
self.__dict__.update(state)
class Order(object):
def __init__(self, dt, sid, amount, stop=None, limit=None, filled=0,
commission=None, id=None):
"""
@dt - datetime.datetime that the order was placed
@sid - stock sid of the order
@amount - the number of shares to buy/sell
a positive sign indicates a buy
a negative sign indicates a sell
@filled - how many shares of the order have been filled so far
"""
# get a string representation of the uuid.
self.id = id or self.make_id()
self.dt = dt
self.reason = None
self.created = dt
self.sid = sid
self.amount = amount
self.filled = filled
self.commission = commission
self._status = ORDER_STATUS.OPEN
self.stop = stop
self.limit = limit
self.stop_reached = False
self.limit_reached = False
self.direction = math.copysign(1, self.amount)
self.type = zp.DATASOURCE_TYPE.ORDER
def make_id(self):
return uuid.uuid4().hex
def to_dict(self):
py = copy(self.__dict__)
for field in ['type', 'direction', '_status']:
del py[field]
py['status'] = self.status
return py
def to_api_obj(self):
pydict = self.to_dict()
obj = zp.Order(initial_values=pydict)
return obj
def check_triggers(self, event):
"""
Update internal state based on price triggers and the
trade event's price.
"""
stop_reached, limit_reached, sl_stop_reached = \
check_order_triggers(self, event)
if (stop_reached, limit_reached) \
!= (self.stop_reached, self.limit_reached):
self.dt = event.dt
self.stop_reached = stop_reached
self.limit_reached = limit_reached
if sl_stop_reached:
# Change the STOP LIMIT order into a LIMIT order
self.stop = None
def handle_split(self, split_event):
ratio = split_event.ratio
# update the amount, limit_price, and stop_price
# by the split's ratio
# info here: http://finra.complinet.com/en/display/display_plain.html?
# rbid=2403&element_id=8950&record_id=12208&print=1
# new_share_amount = old_share_amount / ratio
# new_price = old_price * ratio
self.amount = int(self.amount / ratio)
if self.limit is not None:
self.limit = round(self.limit * ratio, 2)
if self.stop is not None:
self.stop = round(self.stop * ratio, 2)
@property
def status(self):
if not self.open_amount:
return ORDER_STATUS.FILLED
elif self._status == ORDER_STATUS.HELD and self.filled:
return ORDER_STATUS.OPEN
else:
return self._status
@status.setter
def status(self, status):
self._status = status
def cancel(self):
self.status = ORDER_STATUS.CANCELLED
def reject(self, reason=''):
self.status = ORDER_STATUS.REJECTED
self.reason = reason
def hold(self, reason=''):
self.status = ORDER_STATUS.HELD
self.reason = reason
@property
def open(self):
return self.status in [ORDER_STATUS.OPEN, ORDER_STATUS.HELD]
@property
def triggered(self):
"""
For a market order, True.
For a stop order, True IFF stop_reached.
For a limit order, True IFF limit_reached.
"""
if self.stop is not None and not self.stop_reached:
return False
if self.limit is not None and not self.limit_reached:
return False
return True
@property
def open_amount(self):
return self.amount - self.filled
def __repr__(self):
"""
String representation for this object.
"""
return "Order(%s)" % self.to_dict().__repr__()
def __unicode__(self):
"""
Unicode representation for this object.
"""
return text_type(repr(self))
def __getstate__(self):
state_dict = \
{k: v for k, v in iteritems(self.__dict__)
if not k.startswith('_')}
state_dict['_status'] = self._status
STATE_VERSION = 1
state_dict[VERSION_LABEL] = STATE_VERSION
return state_dict
def __setstate__(self, state):
OLDEST_SUPPORTED_STATE = 1
version = state.pop(VERSION_LABEL)
if version < OLDEST_SUPPORTED_STATE:
raise BaseException("Order saved state is too old.")
self.__dict__.update(state)
| apache-2.0 |
kangkot/arangodb | 3rdParty/V8-4.3.61/third_party/python_26/Lib/site-packages/pythonwin/pywin/Demos/app/demoutils.py | 34 | 1309 | # Utilities for the demos
import sys, win32api, win32con, win32ui
NotScriptMsg = """\
This demo program is not designed to be run as a Script, but is
probably used by some other test program. Please try another demo.
"""
NeedGUIMsg = """\
This demo program can only be run from inside of Pythonwin
You must start Pythonwin, and select 'Run' from the toolbar or File menu
"""
NeedAppMsg = """\
This demo program is a 'Pythonwin Application'.
It is more demo code than an example of Pythonwin's capabilities.
To run it, you must execute the command:
pythonwin.exe /app "%s"
Would you like to execute it now?
"""
def NotAScript():
import win32ui
win32ui.MessageBox(NotScriptMsg, "Demos")
def NeedGoodGUI():
from pywin.framework.app import HaveGoodGUI
rc = HaveGoodGUI()
if not rc:
win32ui.MessageBox(NeedGUIMsg, "Demos")
return rc
def NeedApp():
import win32ui
rc = win32ui.MessageBox(NeedAppMsg % sys.argv[0], "Demos", win32con.MB_YESNO)
if rc==win32con.IDYES:
try:
parent = win32ui.GetMainFrame().GetSafeHwnd()
win32api.ShellExecute(parent, None, 'pythonwin.exe', '/app "%s"' % sys.argv[0], None, 1)
except win32api.error, details:
win32ui.MessageBox("Error executing command - %s" % (details), "Demos")
if __name__=='__main__':
import demoutils
demoutils.NotAScript()
| apache-2.0 |
idegtiarov/gnocchi-rep | gnocchi/ceilometer/resources/ceph_account.py | 1 | 1072 | #
# Copyright 2015 Mirantis Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from gnocchi.ceilometer.resources import base
class CephAccount(base.ResourceBase):
@staticmethod
def get_resource_extra_attributes(sample):
return {}
@staticmethod
def get_metrics_names():
return ['radosgw.api.request',
'radosgw.objects.size',
'radosgw.objects',
'radosgw.objects.containers',
'radosgw.containers.objects',
'radosgw.containers.objects.size',
]
| apache-2.0 |
hassanabidpk/django | django/template/library.py | 348 | 12752 | import functools
import warnings
from importlib import import_module
from django.utils import six
from django.utils.deprecation import RemovedInDjango20Warning
from django.utils.html import conditional_escape
from django.utils.inspect import getargspec
from django.utils.itercompat import is_iterable
from .base import Node, Template, token_kwargs
from .exceptions import TemplateSyntaxError
class InvalidTemplateLibrary(Exception):
pass
class Library(object):
"""
A class for registering template tags and filters. Compiled filter and
template tag functions are stored in the filters and tags attributes.
The filter, simple_tag, and inclusion_tag methods provide a convenient
way to register callables as tags.
"""
def __init__(self):
self.filters = {}
self.tags = {}
def tag(self, name=None, compile_function=None):
if name is None and compile_function is None:
# @register.tag()
return self.tag_function
elif name is not None and compile_function is None:
if callable(name):
# @register.tag
return self.tag_function(name)
else:
# @register.tag('somename') or @register.tag(name='somename')
def dec(func):
return self.tag(name, func)
return dec
elif name is not None and compile_function is not None:
# register.tag('somename', somefunc)
self.tags[name] = compile_function
return compile_function
else:
raise ValueError(
"Unsupported arguments to Library.tag: (%r, %r)" %
(name, compile_function),
)
def tag_function(self, func):
self.tags[getattr(func, "_decorated_function", func).__name__] = func
return func
def filter(self, name=None, filter_func=None, **flags):
"""
Register a callable as a template filter. Example:
@register.filter
def lower(value):
return value.lower()
"""
if name is None and filter_func is None:
# @register.filter()
def dec(func):
return self.filter_function(func, **flags)
return dec
elif name is not None and filter_func is None:
if callable(name):
# @register.filter
return self.filter_function(name, **flags)
else:
# @register.filter('somename') or @register.filter(name='somename')
def dec(func):
return self.filter(name, func, **flags)
return dec
elif name is not None and filter_func is not None:
# register.filter('somename', somefunc)
self.filters[name] = filter_func
for attr in ('expects_localtime', 'is_safe', 'needs_autoescape'):
if attr in flags:
value = flags[attr]
# set the flag on the filter for FilterExpression.resolve
setattr(filter_func, attr, value)
# set the flag on the innermost decorated function
# for decorators that need it, e.g. stringfilter
if hasattr(filter_func, "_decorated_function"):
setattr(filter_func._decorated_function, attr, value)
filter_func._filter_name = name
return filter_func
else:
raise ValueError(
"Unsupported arguments to Library.filter: (%r, %r)" %
(name, filter_func),
)
def filter_function(self, func, **flags):
name = getattr(func, "_decorated_function", func).__name__
return self.filter(name, func, **flags)
def simple_tag(self, func=None, takes_context=None, name=None):
"""
Register a callable as a compiled template tag. Example:
@register.simple_tag
def hello(*args, **kwargs):
return 'world'
"""
def dec(func):
params, varargs, varkw, defaults = getargspec(func)
function_name = (name or getattr(func, '_decorated_function', func).__name__)
@functools.wraps(func)
def compile_func(parser, token):
bits = token.split_contents()[1:]
target_var = None
if len(bits) >= 2 and bits[-2] == 'as':
target_var = bits[-1]
bits = bits[:-2]
args, kwargs = parse_bits(parser, bits, params,
varargs, varkw, defaults, takes_context, function_name)
return SimpleNode(func, takes_context, args, kwargs, target_var)
self.tag(function_name, compile_func)
return func
if func is None:
# @register.simple_tag(...)
return dec
elif callable(func):
# @register.simple_tag
return dec(func)
else:
raise ValueError("Invalid arguments provided to simple_tag")
def assignment_tag(self, func=None, takes_context=None, name=None):
warnings.warn(
"assignment_tag() is deprecated. Use simple_tag() instead",
RemovedInDjango20Warning,
stacklevel=2,
)
return self.simple_tag(func, takes_context, name)
def inclusion_tag(self, filename, func=None, takes_context=None, name=None):
"""
Register a callable as an inclusion tag:
@register.inclusion_tag('results.html')
def show_results(poll):
choices = poll.choice_set.all()
return {'choices': choices}
"""
def dec(func):
params, varargs, varkw, defaults = getargspec(func)
function_name = (name or getattr(func, '_decorated_function', func).__name__)
@functools.wraps(func)
def compile_func(parser, token):
bits = token.split_contents()[1:]
args, kwargs = parse_bits(
parser, bits, params, varargs, varkw, defaults,
takes_context, function_name,
)
return InclusionNode(
func, takes_context, args, kwargs, filename,
)
self.tag(function_name, compile_func)
return func
return dec
class TagHelperNode(Node):
"""
Base class for tag helper nodes such as SimpleNode and InclusionNode.
Manages the positional and keyword arguments to be passed to the decorated
function.
"""
def __init__(self, func, takes_context, args, kwargs):
self.func = func
self.takes_context = takes_context
self.args = args
self.kwargs = kwargs
def get_resolved_arguments(self, context):
resolved_args = [var.resolve(context) for var in self.args]
if self.takes_context:
resolved_args = [context] + resolved_args
resolved_kwargs = {k: v.resolve(context) for k, v in self.kwargs.items()}
return resolved_args, resolved_kwargs
class SimpleNode(TagHelperNode):
def __init__(self, func, takes_context, args, kwargs, target_var):
super(SimpleNode, self).__init__(func, takes_context, args, kwargs)
self.target_var = target_var
def render(self, context):
resolved_args, resolved_kwargs = self.get_resolved_arguments(context)
output = self.func(*resolved_args, **resolved_kwargs)
if self.target_var is not None:
context[self.target_var] = output
return ''
if context.autoescape:
output = conditional_escape(output)
return output
class InclusionNode(TagHelperNode):
def __init__(self, func, takes_context, args, kwargs, filename):
super(InclusionNode, self).__init__(func, takes_context, args, kwargs)
self.filename = filename
def render(self, context):
"""
Render the specified template and context. Cache the template object
in render_context to avoid reparsing and loading when used in a for
loop.
"""
resolved_args, resolved_kwargs = self.get_resolved_arguments(context)
_dict = self.func(*resolved_args, **resolved_kwargs)
t = context.render_context.get(self)
if t is None:
if isinstance(self.filename, Template):
t = self.filename
elif isinstance(getattr(self.filename, 'template', None), Template):
t = self.filename.template
elif not isinstance(self.filename, six.string_types) and is_iterable(self.filename):
t = context.template.engine.select_template(self.filename)
else:
t = context.template.engine.get_template(self.filename)
context.render_context[self] = t
new_context = context.new(_dict)
# Copy across the CSRF token, if present, because inclusion tags are
# often used for forms, and we need instructions for using CSRF
# protection to be as simple as possible.
csrf_token = context.get('csrf_token')
if csrf_token is not None:
new_context['csrf_token'] = csrf_token
return t.render(new_context)
def parse_bits(parser, bits, params, varargs, varkw, defaults,
takes_context, name):
"""
Parse bits for template tag helpers simple_tag and inclusion_tag, in
particular by detecting syntax errors and by extracting positional and
keyword arguments.
"""
if takes_context:
if params[0] == 'context':
params = params[1:]
else:
raise TemplateSyntaxError(
"'%s' is decorated with takes_context=True so it must "
"have a first argument of 'context'" % name)
args = []
kwargs = {}
unhandled_params = list(params)
for bit in bits:
# First we try to extract a potential kwarg from the bit
kwarg = token_kwargs([bit], parser)
if kwarg:
# The kwarg was successfully extracted
param, value = kwarg.popitem()
if param not in params and varkw is None:
# An unexpected keyword argument was supplied
raise TemplateSyntaxError(
"'%s' received unexpected keyword argument '%s'" %
(name, param))
elif param in kwargs:
# The keyword argument has already been supplied once
raise TemplateSyntaxError(
"'%s' received multiple values for keyword argument '%s'" %
(name, param))
else:
# All good, record the keyword argument
kwargs[str(param)] = value
if param in unhandled_params:
# If using the keyword syntax for a positional arg, then
# consume it.
unhandled_params.remove(param)
else:
if kwargs:
raise TemplateSyntaxError(
"'%s' received some positional argument(s) after some "
"keyword argument(s)" % name)
else:
# Record the positional argument
args.append(parser.compile_filter(bit))
try:
# Consume from the list of expected positional arguments
unhandled_params.pop(0)
except IndexError:
if varargs is None:
raise TemplateSyntaxError(
"'%s' received too many positional arguments" %
name)
if defaults is not None:
# Consider the last n params handled, where n is the
# number of defaults.
unhandled_params = unhandled_params[:-len(defaults)]
if unhandled_params:
# Some positional arguments were not supplied
raise TemplateSyntaxError(
"'%s' did not receive value(s) for the argument(s): %s" %
(name, ", ".join("'%s'" % p for p in unhandled_params)))
return args, kwargs
def import_library(name):
"""
Load a Library object from a template tag module.
"""
try:
module = import_module(name)
except ImportError as e:
raise InvalidTemplateLibrary(
"Invalid template library specified. ImportError raised when "
"trying to load '%s': %s" % (name, e)
)
try:
return module.register
except AttributeError:
raise InvalidTemplateLibrary(
"Module %s does not have a variable named 'register'" % name,
)
| bsd-3-clause |
40223110/2015cda_0512 | static/Brython3.1.0-20150301-090019/Lib/_string.py | 625 | 1112 | """string helper module"""
import re
class __loader__(object):
pass
def formatter_field_name_split(fieldname):
"""split the argument as a field name"""
_list=[]
for _name in fieldname:
_parts = _name.split('.')
for _item in _parts:
is_attr=False #fix me
if re.match('\d+', _item):
_list.append((int(_item), is_attr))
else:
_list.append((_item, is_attr))
return _list[0][0], iter(_list[1:])
def formatter_parser(*args,**kw):
"""parse the argument as a format string"""
assert len(args)==1
assert isinstance(args[0], str)
_result=[]
for _match in re.finditer("([^{]*)?(\{[^}]*\})?", args[0]):
_pre, _fmt = _match.groups()
if _fmt is None:
_result.append((_pre, None, None, None))
elif _fmt == '{}':
_result.append((_pre, '', '', None))
else:
_m=re.match("\{([^!]*)!?(.*)?\}", _fmt)
_name=_m.groups(0)
_flags=_m.groups(1)
_result.append((_pre, _name, _flags, None))
return _result
| gpl-3.0 |
aexeagmbh/django-allauth | allauth/socialaccount/providers/paypal/views.py | 60 | 1606 | import requests
from allauth.socialaccount.providers.oauth2.views import (OAuth2Adapter,
OAuth2LoginView,
OAuth2CallbackView)
from .provider import PaypalProvider
class PaypalOAuth2Adapter(OAuth2Adapter):
provider_id = PaypalProvider.id
supports_state = False
@property
def authorize_url(self):
path = 'webapps/auth/protocol/openidconnect/v1/authorize'
return 'https://www.{0}/{1}'.format(self._get_endpoint(), path)
@property
def access_token_url(self):
path = "v1/identity/openidconnect/tokenservice"
return 'https://api.{0}/{1}'.format(self._get_endpoint(), path)
@property
def profile_url(self):
path = 'v1/identity/openidconnect/userinfo'
return 'https://api.{0}/{1}'.format(self._get_endpoint(), path)
def _get_endpoint(self):
settings = self.get_provider().get_settings()
if settings.get('MODE') == 'live':
return 'paypal.com'
else:
return 'sandbox.paypal.com'
def complete_login(self, request, app, token, **kwargs):
response = requests.post(self.profile_url,
params={'schema':'openid',
'access_token':token})
extra_data = response.json()
return self.get_provider().sociallogin_from_response(request, extra_data)
oauth2_login = OAuth2LoginView.adapter_view(PaypalOAuth2Adapter)
oauth2_callback = OAuth2CallbackView.adapter_view(PaypalOAuth2Adapter)
| mit |
hiroakis/ansible | v1/ansible/module_utils/cloudstack.py | 118 | 13221 | # -*- coding: utf-8 -*-
#
# (c) 2015, René Moser <[email protected]>
#
# This code is part of Ansible, but is an independent component.
# This particular file snippet, and this file snippet only, is BSD licensed.
# Modules you write using this snippet, which is embedded dynamically by Ansible
# still belong to the author of the module, and may assign their own license
# to the complete work.
#
# Redistribution and use in source and binary forms, with or without modification,
# are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
# IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
# USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
try:
from cs import CloudStack, CloudStackException, read_config
has_lib_cs = True
except ImportError:
has_lib_cs = False
class AnsibleCloudStack:
def __init__(self, module):
if not has_lib_cs:
module.fail_json(msg="python library cs required: pip install cs")
self.result = {
'changed': False,
}
self.module = module
self._connect()
self.domain = None
self.account = None
self.project = None
self.ip_address = None
self.zone = None
self.vm = None
self.os_type = None
self.hypervisor = None
self.capabilities = None
def _connect(self):
api_key = self.module.params.get('api_key')
api_secret = self.module.params.get('secret_key')
api_url = self.module.params.get('api_url')
api_http_method = self.module.params.get('api_http_method')
api_timeout = self.module.params.get('api_timeout')
if api_key and api_secret and api_url:
self.cs = CloudStack(
endpoint=api_url,
key=api_key,
secret=api_secret,
timeout=api_timeout,
method=api_http_method
)
else:
self.cs = CloudStack(**read_config())
def get_or_fallback(self, key=None, fallback_key=None):
value = self.module.params.get(key)
if not value:
value = self.module.params.get(fallback_key)
return value
# TODO: for backward compatibility only, remove if not used anymore
def _has_changed(self, want_dict, current_dict, only_keys=None):
return self.has_changed(want_dict=want_dict, current_dict=current_dict, only_keys=only_keys)
def has_changed(self, want_dict, current_dict, only_keys=None):
for key, value in want_dict.iteritems():
# Optionally limit by a list of keys
if only_keys and key not in only_keys:
continue;
# Skip None values
if value is None:
continue;
if key in current_dict:
# API returns string for int in some cases, just to make sure
if isinstance(value, int):
current_dict[key] = int(current_dict[key])
elif isinstance(value, str):
current_dict[key] = str(current_dict[key])
# Only need to detect a singe change, not every item
if value != current_dict[key]:
return True
return False
def _get_by_key(self, key=None, my_dict={}):
if key:
if key in my_dict:
return my_dict[key]
self.module.fail_json(msg="Something went wrong: %s not found" % key)
return my_dict
def get_project(self, key=None):
if self.project:
return self._get_by_key(key, self.project)
project = self.module.params.get('project')
if not project:
return None
args = {}
args['account'] = self.get_account(key='name')
args['domainid'] = self.get_domain(key='id')
projects = self.cs.listProjects(**args)
if projects:
for p in projects['project']:
if project.lower() in [ p['name'].lower(), p['id'] ]:
self.project = p
return self._get_by_key(key, self.project)
self.module.fail_json(msg="project '%s' not found" % project)
def get_ip_address(self, key=None):
if self.ip_address:
return self._get_by_key(key, self.ip_address)
ip_address = self.module.params.get('ip_address')
if not ip_address:
self.module.fail_json(msg="IP address param 'ip_address' is required")
args = {}
args['ipaddress'] = ip_address
args['account'] = self.get_account(key='name')
args['domainid'] = self.get_domain(key='id')
args['projectid'] = self.get_project(key='id')
ip_addresses = self.cs.listPublicIpAddresses(**args)
if not ip_addresses:
self.module.fail_json(msg="IP address '%s' not found" % args['ipaddress'])
self.ip_address = ip_addresses['publicipaddress'][0]
return self._get_by_key(key, self.ip_address)
def get_vm(self, key=None):
if self.vm:
return self._get_by_key(key, self.vm)
vm = self.module.params.get('vm')
if not vm:
self.module.fail_json(msg="Virtual machine param 'vm' is required")
args = {}
args['account'] = self.get_account(key='name')
args['domainid'] = self.get_domain(key='id')
args['projectid'] = self.get_project(key='id')
args['zoneid'] = self.get_zone(key='id')
vms = self.cs.listVirtualMachines(**args)
if vms:
for v in vms['virtualmachine']:
if vm in [ v['name'], v['displayname'], v['id'] ]:
self.vm = v
return self._get_by_key(key, self.vm)
self.module.fail_json(msg="Virtual machine '%s' not found" % vm)
def get_zone(self, key=None):
if self.zone:
return self._get_by_key(key, self.zone)
zone = self.module.params.get('zone')
zones = self.cs.listZones()
# use the first zone if no zone param given
if not zone:
self.zone = zones['zone'][0]
return self._get_by_key(key, self.zone)
if zones:
for z in zones['zone']:
if zone in [ z['name'], z['id'] ]:
self.zone = z
return self._get_by_key(key, self.zone)
self.module.fail_json(msg="zone '%s' not found" % zone)
def get_os_type(self, key=None):
if self.os_type:
return self._get_by_key(key, self.zone)
os_type = self.module.params.get('os_type')
if not os_type:
return None
os_types = self.cs.listOsTypes()
if os_types:
for o in os_types['ostype']:
if os_type in [ o['description'], o['id'] ]:
self.os_type = o
return self._get_by_key(key, self.os_type)
self.module.fail_json(msg="OS type '%s' not found" % os_type)
def get_hypervisor(self):
if self.hypervisor:
return self.hypervisor
hypervisor = self.module.params.get('hypervisor')
hypervisors = self.cs.listHypervisors()
# use the first hypervisor if no hypervisor param given
if not hypervisor:
self.hypervisor = hypervisors['hypervisor'][0]['name']
return self.hypervisor
for h in hypervisors['hypervisor']:
if hypervisor.lower() == h['name'].lower():
self.hypervisor = h['name']
return self.hypervisor
self.module.fail_json(msg="Hypervisor '%s' not found" % hypervisor)
def get_account(self, key=None):
if self.account:
return self._get_by_key(key, self.account)
account = self.module.params.get('account')
if not account:
return None
domain = self.module.params.get('domain')
if not domain:
self.module.fail_json(msg="Account must be specified with Domain")
args = {}
args['name'] = account
args['domainid'] = self.get_domain(key='id')
args['listall'] = True
accounts = self.cs.listAccounts(**args)
if accounts:
self.account = accounts['account'][0]
return self._get_by_key(key, self.account)
self.module.fail_json(msg="Account '%s' not found" % account)
def get_domain(self, key=None):
if self.domain:
return self._get_by_key(key, self.domain)
domain = self.module.params.get('domain')
if not domain:
return None
args = {}
args['listall'] = True
domains = self.cs.listDomains(**args)
if domains:
for d in domains['domain']:
if d['path'].lower() in [ domain.lower(), "root/" + domain.lower(), "root" + domain.lower() ]:
self.domain = d
return self._get_by_key(key, self.domain)
self.module.fail_json(msg="Domain '%s' not found" % domain)
def get_tags(self, resource=None):
existing_tags = self.cs.listTags(resourceid=resource['id'])
if existing_tags:
return existing_tags['tag']
return []
def _delete_tags(self, resource, resource_type, tags):
existing_tags = resource['tags']
tags_to_delete = []
for existing_tag in existing_tags:
if existing_tag['key'] in tags:
if existing_tag['value'] != tags[key]:
tags_to_delete.append(existing_tag)
else:
tags_to_delete.append(existing_tag)
if tags_to_delete:
self.result['changed'] = True
if not self.module.check_mode:
args = {}
args['resourceids'] = resource['id']
args['resourcetype'] = resource_type
args['tags'] = tags_to_delete
self.cs.deleteTags(**args)
def _create_tags(self, resource, resource_type, tags):
tags_to_create = []
for i, tag_entry in enumerate(tags):
tag = {
'key': tag_entry['key'],
'value': tag_entry['value'],
}
tags_to_create.append(tag)
if tags_to_create:
self.result['changed'] = True
if not self.module.check_mode:
args = {}
args['resourceids'] = resource['id']
args['resourcetype'] = resource_type
args['tags'] = tags_to_create
self.cs.createTags(**args)
def ensure_tags(self, resource, resource_type=None):
if not resource_type or not resource:
self.module.fail_json(msg="Error: Missing resource or resource_type for tags.")
if 'tags' in resource:
tags = self.module.params.get('tags')
if tags is not None:
self._delete_tags(resource, resource_type, tags)
self._create_tags(resource, resource_type, tags)
resource['tags'] = self.get_tags(resource)
return resource
def get_capabilities(self, key=None):
if self.capabilities:
return self._get_by_key(key, self.capabilities)
capabilities = self.cs.listCapabilities()
self.capabilities = capabilities['capability']
return self._get_by_key(key, self.capabilities)
# TODO: for backward compatibility only, remove if not used anymore
def _poll_job(self, job=None, key=None):
return self.poll_job(job=job, key=key)
def poll_job(self, job=None, key=None):
if 'jobid' in job:
while True:
res = self.cs.queryAsyncJobResult(jobid=job['jobid'])
if res['jobstatus'] != 0 and 'jobresult' in res:
if 'errortext' in res['jobresult']:
self.module.fail_json(msg="Failed: '%s'" % res['jobresult']['errortext'])
if key and key in res['jobresult']:
job = res['jobresult'][key]
break
time.sleep(2)
return job
| gpl-3.0 |
zasdfgbnm/tensorflow | tensorflow/python/ops/nn_batchnorm_test.py | 5 | 30554 | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for batch_norm related functionality in tensorflow.ops.nn."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from six.moves import xrange # pylint: disable=redefined-builtin
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import gen_nn_ops
from tensorflow.python.ops import gradient_checker
from tensorflow.python.ops import gradients_impl
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import nn_impl
import tensorflow.python.ops.nn_grad # pylint: disable=unused-import
from tensorflow.python.platform import test
@test_util.with_c_api
class BatchNormalizationTest(test.TestCase):
def _npBatchNorm(self, x, m, v, beta, gamma, epsilon,
scale_after_normalization, shift_after_normalization):
y = (x - m) / np.sqrt(v + epsilon)
y = y * gamma if scale_after_normalization else y
return y + beta if shift_after_normalization else y
def _opsBatchNorm(self, x, m, v, beta, gamma, epsilon,
scale_after_normalization, shift_after_normalization):
y = (x - m) * math_ops.rsqrt(v + epsilon)
if scale_after_normalization:
y = gamma * y
return y + beta if shift_after_normalization else y
def _tfBatchNormV1(self, x, m, v, beta, gamma, epsilon,
scale_after_normalization):
"""Original implementation."""
test_util.set_producer_version(ops.get_default_graph(), 8)
return gen_nn_ops._batch_norm_with_global_normalization(
x, m, v, beta, gamma, epsilon, scale_after_normalization)
# pylint: enable=protected-access
def _tfBatchNormV1BW(self, x, m, v, beta, gamma, epsilon,
scale_after_normalization):
"""Re-implementation of the original kernel for backward compatibility."""
return nn_impl.batch_norm_with_global_normalization(
x, m, v, beta, gamma, epsilon, scale_after_normalization)
def _tfBatchNormV2(self, x, m, v, beta, gamma, epsilon,
scale_after_normalization, shift_after_normalization):
"""New implementation."""
return nn_impl.batch_normalization(x, m, v, beta if
shift_after_normalization else None,
gamma if scale_after_normalization else
None, epsilon)
def testBatchNorm(self):
x_shape = [3, 5, 4, 2]
param_shape = [2]
x_val = np.random.random_sample(x_shape).astype(np.float32)
m_val = np.random.random_sample(param_shape).astype(np.float32)
v_val = np.random.random_sample(param_shape).astype(np.float32)
beta_val = np.random.random_sample(param_shape).astype(np.float32)
gamma_val = np.random.random_sample(param_shape).astype(np.float32)
for use_gpu in [True, False]:
with self.test_session(use_gpu=use_gpu) as sess:
x = constant_op.constant(x_val, name="x")
m = constant_op.constant(m_val, name="m")
v = constant_op.constant(v_val, name="v")
beta = constant_op.constant(beta_val, name="beta")
gamma = constant_op.constant(gamma_val, name="gamma")
epsilon = 0.001
for scale_after_normalization in [True, False]:
for shift_after_normalization in [True, False]:
bn2 = self._tfBatchNormV2(x, m, v, beta, gamma, epsilon,
scale_after_normalization,
shift_after_normalization)
bn1bw = self._tfBatchNormV1BW(x, m, v, beta, gamma, epsilon,
scale_after_normalization)
bn1 = self._tfBatchNormV1(x, m, v, beta, gamma, epsilon,
scale_after_normalization)
on = self._opsBatchNorm(x, m, v, beta, gamma, epsilon,
scale_after_normalization,
shift_after_normalization)
np_bn = self._npBatchNorm(x_val, m_val, v_val, beta_val, gamma_val,
epsilon, scale_after_normalization,
shift_after_normalization)
tf_bn_v2, tf_bn_v1bw, tf_bn_v1, ops_bn = sess.run(
[bn2, bn1bw, bn1, on])
self.assertAllClose(np_bn, ops_bn, atol=0.00001)
self.assertAllClose(np_bn, tf_bn_v2, atol=0.00001)
self.assertAllClose(tf_bn_v2, ops_bn, atol=0.00001)
# shift_after_normalization=False is not supported in v1.
if shift_after_normalization:
self.assertAllClose(np_bn, tf_bn_v1bw, atol=0.00001)
self.assertAllClose(np_bn, tf_bn_v1, atol=0.00001)
self.assertAllClose(tf_bn_v1, ops_bn, atol=0.00001)
self.assertAllClose(tf_bn_v1bw, ops_bn, atol=0.00001)
def _testBatchNormGradient(self,
param_index,
tag,
scale_after_normalization,
shift_after_normalization,
version,
err_tolerance=1e-11):
x_shape = [3, 5, 4, 5]
param_shape = [5]
np.random.seed(1) # Make it reproducible.
x_val = np.random.random_sample(x_shape).astype(np.float64)
m_val = np.random.random_sample(param_shape).astype(np.float64)
v_val = np.random.random_sample(param_shape).astype(np.float64)
beta_val = np.random.random_sample(param_shape).astype(np.float64)
gamma_val = np.random.random_sample(param_shape).astype(np.float64)
with self.test_session():
x = constant_op.constant(x_val, name="x")
m = constant_op.constant(m_val, name="m")
v = constant_op.constant(v_val, name="v")
beta = constant_op.constant(beta_val, name="beta")
gamma = constant_op.constant(gamma_val, name="gamma")
epsilon = 0.001
if version == 1:
output = self._tfBatchNormV1(x, m, v, beta, gamma, epsilon,
scale_after_normalization)
elif version == 2:
output = self._tfBatchNormV2(x, m, v, beta, gamma, epsilon,
scale_after_normalization,
shift_after_normalization)
else:
print("Invalid version", version)
raise ValueError()
all_params = [x, m, v, beta, gamma]
all_shapes = [x_shape, param_shape, param_shape, param_shape, param_shape]
err = gradient_checker.compute_gradient_error(all_params[param_index],
all_shapes[param_index],
output, x_shape)
print("Batch normalization v%d %s gradient %s scale and %s shift err = " %
(version, tag, "with" if scale_after_normalization else "without",
"with" if shift_after_normalization else "without"), err)
self.assertLess(err, err_tolerance)
def _testBatchNormGradientInAllNeedConfigs(self,
param_index,
tag,
err_tolerance=1e-11):
for scale_after_normalization in [True, False]:
for shift_after_normalization in [True, False]:
# shift_after_normalization=False is not supported in version 1.
for v in ([1, 2] if shift_after_normalization else [2]):
self._testBatchNormGradient(param_index, tag,
scale_after_normalization,
shift_after_normalization, v,
err_tolerance)
def testBatchNormInputGradient(self):
self._testBatchNormGradientInAllNeedConfigs(0, "x")
def testBatchNormMeanGradient(self):
self._testBatchNormGradientInAllNeedConfigs(1, "mean")
def testBatchNormVarianceGradient(self):
self._testBatchNormGradientInAllNeedConfigs(
2, "variance", err_tolerance=1e-03)
def testBatchNormBetaGradient(self):
# Since beta does not exist when scale_after_normalization=False, we only
# test for scale_after_normalization=True.
for scale_after_normalization in [True, False]:
for v in [1, 2]:
self._testBatchNormGradient(3, "beta", scale_after_normalization, True,
v)
def testBatchNormGammaGradient(self):
# If scale_after_normalization is False, backprop for gamma in v1
# will be 0. In version 2 of the API, if scale_after_normalization is False,
# gamma is not used at all, and the gradient is None, which displeases the
# gradient checker.
for scale_after_normalization in [True, False]:
self._testBatchNormGradient(4, "gamma", scale_after_normalization, True,
1)
for shift_after_normalization in [True, False]:
self._testBatchNormGradient(4, "gamma", True, shift_after_normalization,
2)
def testBatchNormGradImpl(self):
x_shape = [7, 5, 4, 6]
param_shape = [6]
np.random.seed(1) # Make it reproducible.
x_val = np.random.random_sample(x_shape).astype(np.float32)
m_val = np.random.random_sample(param_shape).astype(np.float32)
v_val = np.random.random_sample(param_shape).astype(np.float32)
beta_val = np.random.random_sample(param_shape).astype(np.float32)
gamma_val = np.random.random_sample(param_shape).astype(np.float32)
backprop_val = np.random.random_sample(x_shape).astype(np.float32)
for use_gpu in [False, True]:
with self.test_session(use_gpu=use_gpu) as sess:
x = constant_op.constant(x_val, name="x")
m = constant_op.constant(m_val, name="m")
v = constant_op.constant(v_val, name="v")
beta = constant_op.constant(beta_val, name="beta")
gamma = constant_op.constant(gamma_val, name="gamma")
backprop = constant_op.constant(backprop_val, name="backprop")
epsilon = 0.001
for scale_after_normalization in [True, False]:
# _batch_norm_with_global_normalization_grad is deprecated in v9
test_util.set_producer_version(ops.get_default_graph(), 8)
grad = gen_nn_ops._batch_norm_with_global_normalization_grad(
x, m, v, gamma, backprop, epsilon, scale_after_normalization)
dx, dm, dv, db, dg = grad
self.assertEqual(grad.dx, dx)
self.assertEqual(grad.dm, dm)
self.assertEqual(grad.dv, dv)
self.assertEqual(grad.db, db)
self.assertEqual(grad.dg, dg)
on = self._opsBatchNorm(x, m, v, beta, gamma, epsilon,
scale_after_normalization, True)
odx, odm, odv, odb, odg = gradients_impl.gradients(
[on], [x, m, v, beta, gamma], [backprop])
if scale_after_normalization:
all_grads = sess.run([dx, dm, dv, db, dg, odx, odm, odv, odb, odg])
to_check = ["dx", "dm", "dv", "db", "dg"]
else:
all_grads = sess.run([dx, dm, dv, db, odx, odm, odv, odb])
to_check = ["dx", "dm", "dv", "db"]
for i, _ in enumerate(to_check):
self.assertAllClose(
all_grads[i + len(to_check)], all_grads[i], atol=0.000001)
def testBatchNormKeepDims(self):
"""Test for tf.nn.moments(..., keep_dims=True / False).
Make sure that parameters with shape (1, 1, 1, depth) yield the same
result as parameters with shape (depth)
"""
x_shape = (3, 5, 4, 2)
param_shape = (2)
keep_dims_param_shape = (1, 1, 1, 2)
x_val = np.random.random_sample(x_shape).astype(np.float32)
m_val = np.random.random_sample(param_shape).astype(np.float32)
v_val = np.random.random_sample(param_shape).astype(np.float32)
beta_val = np.random.random_sample(param_shape).astype(np.float32)
gamma_val = np.random.random_sample(param_shape).astype(np.float32)
for use_gpu in [True, False]:
with self.test_session(use_gpu=use_gpu) as sess:
x = constant_op.constant(x_val, name="x")
m = constant_op.constant(m_val, name="m")
v = constant_op.constant(v_val, name="v")
beta = constant_op.constant(beta_val, name="beta")
gamma = constant_op.constant(gamma_val, name="gamma")
keep_dims_m = array_ops.reshape(
m, keep_dims_param_shape, name="keep_dims_m")
keep_dims_v = array_ops.reshape(
v, keep_dims_param_shape, name="keep_dims_v")
keep_dims_beta = array_ops.reshape(
beta, keep_dims_param_shape, name="keep_dims_beta")
keep_dims_gamma = array_ops.reshape(
gamma, keep_dims_param_shape, name="keep_dims_gamma")
epsilon = 0.001
for scale_after_normalization in [True, False]:
for shift_after_normalization in [True, False]:
bn = self._tfBatchNormV2(x, m, v, beta, gamma, epsilon,
scale_after_normalization,
shift_after_normalization)
keep_dims_bn = self._tfBatchNormV2(x, keep_dims_m, keep_dims_v,
keep_dims_beta, keep_dims_gamma,
epsilon,
scale_after_normalization,
shift_after_normalization)
tf_batch_norm, keep_dims_tf_batch_norm = sess.run(
[bn, keep_dims_bn])
self.assertEquals(x_shape, tf_batch_norm.shape)
self.assertEquals(x_shape, keep_dims_tf_batch_norm.shape)
self.assertAllClose(
tf_batch_norm, keep_dims_tf_batch_norm, atol=0.000001)
def _testBatchNormArbitraryShapes(self, x_shape, param_shape, atol=0.0001):
x_val = np.random.random_sample(x_shape).astype(np.float32)
m_val = np.random.random_sample(param_shape).astype(np.float32)
v_val = np.random.random_sample(param_shape).astype(np.float32)
beta_val = np.random.random_sample(param_shape).astype(np.float32)
gamma_val = np.random.random_sample(param_shape).astype(np.float32)
for use_gpu in [True, False]:
with self.test_session(use_gpu=use_gpu) as sess:
x = constant_op.constant(x_val, name="x")
m = constant_op.constant(m_val, name="m")
v = constant_op.constant(v_val, name="v")
beta = constant_op.constant(beta_val, name="beta")
gamma = constant_op.constant(gamma_val, name="gamma")
epsilon = 0.001
for scale_after_normalization in [True, False]:
for shift_after_normalization in [True, False]:
bn = self._tfBatchNormV2(x, m, v, beta, gamma, epsilon,
scale_after_normalization,
shift_after_normalization)
np_batch_norm = self._npBatchNorm(x_val, m_val, v_val, beta_val,
gamma_val, epsilon,
scale_after_normalization,
shift_after_normalization)
[tf_batch_norm] = sess.run([bn])
self.assertEquals(x_shape, np_batch_norm.shape)
self.assertEquals(x_shape, tf_batch_norm.shape)
self.assertAllClose(np_batch_norm, tf_batch_norm, atol=atol)
def testBatchNormArbitraryShapes(self):
"""Test for a variety of shapes and moments.
Batch normalization is expected to work regardless of the position and
dimensionality of the 'depth' axis/axes.
"""
self._testBatchNormArbitraryShapes((3, 3), (1, 3))
self._testBatchNormArbitraryShapes((3, 3), (3, 1))
self._testBatchNormArbitraryShapes((3, 2, 4, 5), (1, 2, 1, 1))
self._testBatchNormArbitraryShapes(
(2, 3, 2, 4, 5), (1, 1, 1, 4, 5), atol=0.005)
@test_util.with_c_api
class SufficientStatisticsTest(test.TestCase):
def _npSuffStats(self, x, axes, shift, keep_dims):
axis = tuple(axes)
if shift is not None:
m_ss = np.sum(x - shift, axis=axis, keepdims=keep_dims)
v_ss = np.sum((x - shift) * (x - shift), axis=axis, keepdims=keep_dims)
else:
m_ss = np.sum(x, axis=axis, keepdims=keep_dims)
v_ss = np.sum(x * x, axis=axis, keepdims=keep_dims)
count = 1.0
for d in xrange(x.ndim):
if d in set(axes):
count *= x.shape[d]
if not keep_dims:
shift = np.squeeze(shift, axis=axis)
return count, m_ss, v_ss, shift
def _opSuffStats(self, x, axes, shift, keep_dims):
return nn_impl.sufficient_statistics(x, axes, shift, keep_dims)
def _testSuffStats(self, x_shape, axes, shift, keep_dims, has_shape):
x_val = np.random.random_sample(x_shape).astype(np.float32)
np_c, np_m, np_v, np_s = self._npSuffStats(x_val, axes, shift, keep_dims)
for use_gpu in [True, False]:
with self.test_session(use_gpu=use_gpu) as sess:
if has_shape:
x = constant_op.constant(x_val, name="x")
x.set_shape(x_shape)
op_c, op_m, op_v, op_s = self._opSuffStats(x, axes, shift, keep_dims)
if shift:
tf_c, tf_m, tf_v, tf_s = sess.run([op_c, op_m, op_v, op_s])
else:
tf_c, tf_m, tf_v = sess.run([op_c, op_m, op_v])
else:
x = array_ops.placeholder(
dtype=dtypes.float32, shape=[None] * len(x_shape), name="x")
op_c, op_m, op_v, op_s = self._opSuffStats(x, axes, shift, keep_dims)
if shift:
tf_c, tf_m, tf_v, tf_s = sess.run([op_c, op_m, op_v, op_s],
feed_dict={x: x_val})
else:
tf_c, tf_m, tf_v = sess.run([op_c, op_m, op_v],
feed_dict={x: x_val})
self.assertAllClose(np_c, tf_c, atol=0.000001)
self.assertAllClose(np_m, tf_m, atol=0.000001)
self.assertAllClose(np_v, tf_v, atol=0.000001)
if shift:
self.assertAllClose(np_s, tf_s, atol=0.000001)
def testSuffStats(self):
for has_shape in [True, False]:
for keep_dims in [True, False]:
for shift in [None, 1.0]:
self._testSuffStats([2, 3], [1], shift, keep_dims, has_shape)
self._testSuffStats([2, 3], [0], shift, keep_dims, has_shape)
self._testSuffStats([1, 2, 3], [0, 2], shift, keep_dims, has_shape)
@test_util.with_c_api
class NormalizeMomentsTest(test.TestCase):
def _npNormalizeMoments(self, counts, mean_ss, variance_ss, shift):
mean = mean_ss / counts
variance = variance_ss / counts - mean * mean
if shift is not None:
mean += shift
return mean, variance
def _opNormalizeMoments(self, counts, mean_ss, variance_ss, shift):
return nn_impl.normalize_moments(counts, mean_ss, variance_ss, shift)
def _testNormalizeMoments(self, shape, shift):
counts = np.ones([1]).astype(np.float32)
mean_ss = np.random.random_sample(shape).astype(np.float32)
variance_ss = np.random.random_sample(shape).astype(np.float32)
variance_ss *= variance_ss
if shift:
shift_v = np.random.random_sample(shape).astype(np.float32)
else:
shift_v = None
npm, npv = self._npNormalizeMoments(counts, mean_ss, variance_ss, shift_v)
for use_gpu in [True, False]:
with self.test_session(use_gpu=use_gpu) as sess:
tf_counts = constant_op.constant(counts, name="counts")
tf_mean_ss = constant_op.constant(mean_ss, name="mean_ss")
tf_variance_ss = constant_op.constant(variance_ss, name="variance_ss")
if shift:
tf_shift_v = constant_op.constant(shift_v, name="shift")
else:
tf_shift_v = None
opm, opv = self._opNormalizeMoments(tf_counts, tf_mean_ss,
tf_variance_ss, tf_shift_v)
tfm, tfv = sess.run([opm, opv])
self.assertAllClose(npm, tfm, atol=0.000001)
self.assertAllClose(npv, tfv, atol=0.000001)
def testNormalizeMoments(self):
for shift in [None, 4.0]:
self._testNormalizeMoments([3], shift)
self._testNormalizeMoments([2, 3], shift)
@test_util.with_c_api
class MomentsTest(test.TestCase):
def _unweighted_moments(self, x, axes, keep_dims=False, extra_out_grads=None):
# Method to compute moments of `x` wrt `axes`.
#
# This is exposed so WeightedMomentsTest can inherit the tests and
# assertions from MomentsTest; the extra_out_grads argument allows
# its inherited gradient tests to assert gradients against the
# weights as well as the input values.
return nn_impl.moments(x, axes, keep_dims=keep_dims)
def RunMomentTestWithDynamicShape(self, shape, axes, keep_dims, dtype):
with self.test_session():
# shape = [batch, width, height, depth]
assert len(shape) == 4
x_numpy = np.random.normal(size=shape).astype(np.float32)
x = array_ops.placeholder(dtype, shape=[None] * len(shape))
mean, var = self._unweighted_moments(x, axes, keep_dims=keep_dims)
num_elements = np.prod([shape[i] for i in axes])
ax = tuple(axes)
expected_mean = np.sum(x_numpy, axis=ax,
keepdims=keep_dims) / num_elements
expected_mean_squared = np.multiply(expected_mean, expected_mean)
expected_x_squared = np.sum(np.multiply(x_numpy, x_numpy),
axis=ax,
keepdims=keep_dims) / num_elements
expected_variance = expected_x_squared - expected_mean_squared
# Check that the moments are correct.
self.assertAllCloseAccordingToType(
expected_mean, mean.eval(feed_dict={x: x_numpy}))
self.assertAllCloseAccordingToType(
expected_variance, var.eval(feed_dict={x: x_numpy}))
def RunMomentTest(self, shape, axes, keep_dims, dtype):
with self.test_session():
# shape = [batch, width, height, depth]
assert len(shape) == 4
x_numpy = np.random.normal(size=shape).astype(np.float32)
x = math_ops.cast(constant_op.constant(x_numpy), dtype=dtype)
# Compute the expected values at high precision since the method
# is prone to catastrophic cancellation:
x_numpy = x_numpy.astype(np.float128)
mean, var = self._unweighted_moments(x, axes, keep_dims=keep_dims)
num_elements = np.prod([shape[i] for i in axes])
ax = tuple(axes)
expected_mean = np.sum(x_numpy, axis=ax,
keepdims=keep_dims) / num_elements
expected_mean_squared = np.multiply(expected_mean, expected_mean)
expected_x_squared = np.sum(np.multiply(x_numpy, x_numpy),
axis=ax,
keepdims=keep_dims) / num_elements
expected_variance = expected_x_squared - expected_mean_squared
# Check that the moments are correct.
self.assertAllCloseAccordingToType(expected_mean, mean.eval())
self.assertAllCloseAccordingToType(expected_variance, var.eval())
def testBasic(self):
for keep_dims in [False, True]:
for dtype in [dtypes.float32, dtypes.float16]:
self.RunMomentTest(
shape=[2, 3, 5, 4], axes=[0], keep_dims=keep_dims, dtype=dtype)
self.RunMomentTestWithDynamicShape(
shape=[2, 3, 5, 4], axes=[0], keep_dims=keep_dims, dtype=dtype)
def testGlobalNormalization(self):
for keep_dims in [False, True]:
for dtype in [dtypes.float32, dtypes.float16]:
self.RunMomentTest(
shape=[2, 3, 5, 4],
axes=[0, 1, 2],
keep_dims=keep_dims,
dtype=dtype)
self.RunMomentTestWithDynamicShape(
shape=[2, 3, 5, 4],
axes=[0, 1, 2],
keep_dims=keep_dims,
dtype=dtype)
def testAxes(self):
for keep_dims in [False, True]:
for dtype in [dtypes.float32, dtypes.float16]:
self.RunMomentTest(
shape=[2, 3, 5, 4],
axes=[1, 2, 3],
keep_dims=keep_dims,
dtype=dtype)
self.RunMomentTestWithDynamicShape(
shape=[2, 3, 5, 4],
axes=[1, 2, 3],
keep_dims=keep_dims,
dtype=dtype)
def _testGlobalGradient(self, from_y="mean"):
with self.test_session():
x_shape = [3, 5, 4, 2]
x_val = np.random.random_sample(x_shape).astype(np.float64)
x = constant_op.constant(x_val)
x.set_shape(x_shape)
axes = [0, 1, 2]
y_shape = [2] # Depth of x
inputs_to_compute_gradients_for = [x]
out_mean, out_var = self._unweighted_moments(
x, axes, extra_out_grads=inputs_to_compute_gradients_for)
if from_y == "mean":
y = out_mean
elif from_y == "var":
y = out_var
for (i, v) in enumerate(inputs_to_compute_gradients_for):
err = gradient_checker.compute_gradient_error(v,
v.get_shape().as_list(),
y, y_shape)
print("Moments %s gradient err vs input %d = %g" % (from_y, i, err))
self.assertLess(err, 1e-11)
def testMeanGlobalGradient(self):
self._testGlobalGradient(from_y="mean")
def testVarGlobalGradient(self):
self._testGlobalGradient(from_y="var")
@test_util.with_c_api
class WeightedMomentsTest(MomentsTest):
"""Tests for nn.weighted_moments.
Note that this test inherits from MomentsTest, inheriting all its
test methods!
It modifies MomentsTest in two ways:
a) By overriding _unweighted_moments, all the codepaths in
MomentsTest are executed, but with calls to tf.nn.moments()
replaced by calls to tf.nn.weighted_moments() with a constant
weight of 1.
b) By overriding RunMomentTest and RunMomentTestWithDynamicShape,
this test adds multiple additional calls to
RunWeightedMomentsTest() to exercise correctness with
non-constant weights and varying broadcasting situations. (It
also continues to call MomentsTest.Run(Weighted)?MomentsTest as
well.)
"""
def _unweighted_moments(self, x, axes, keep_dims=False, extra_out_grads=None):
weights = constant_op.constant(1, dtype=x.dtype)
if extra_out_grads is not None:
# We want to assert gradients WRT weights as well as X!
extra_out_grads.append(weights)
return nn_impl.weighted_moments(x, axes, weights, keep_dims=keep_dims)
def RunMomentTest(self, shape, axes, keep_dims, dtype, dynshapes=False):
if not dynshapes:
super(WeightedMomentsTest, self).RunMomentTest(shape, axes, keep_dims,
dtype)
else:
super(WeightedMomentsTest, self).RunMomentTestWithDynamicShape(shape,
axes,
keep_dims,
dtype)
# 1:1 weights and inputs
self.RunWeightedMomentTest(shape, shape, axes, keep_dims, dtype)
# Various broadcasting combinations
for idx in range(len(shape)):
# try broadcasting weights in all positions
weight_shape = [1] * len(shape)
weight_shape[idx] = shape[idx]
self.RunWeightedMomentTest(shape, weight_shape, axes, keep_dims, dtype)
# Also try broadcasting with a suffix of length n
weight_shape = shape[-(idx + 1):]
self.RunWeightedMomentTest(
shape, weight_shape, axes, keep_dims, dtype, dynshapes=dynshapes)
def RunMomentTestWithDynamicShape(self, shape, axes, keep_dims, dtype):
self.RunMomentTest(shape, axes, keep_dims, dtype, dynshapes=True)
def RunWeightedMomentTest(self,
shape,
weights_shape,
axes,
keep_dims,
dtype,
dynshapes=False):
with self.test_session() as s:
x_numpy = np.random.normal(size=shape).astype(np.float32)
weights_numpy = np.absolute( # weights must be positive
np.random.normal(
size=weights_shape, loc=1.0).astype(np.float32))
# Expand the numpy version to higher precision
x_numpy = x_numpy.astype(np.float128)
weights_numpy = weights_numpy.astype(np.float128)
x_shape = [None] * len(shape) if dynshapes else shape
weights_shape = ([None] * len(weights_shape) if dynshapes else
weights_shape)
x = array_ops.placeholder(dtype, shape=x_shape)
weights = array_ops.placeholder(dtype, shape=weights_shape)
mean, var = nn_impl.weighted_moments(
x, axes, weights, keep_dims=keep_dims)
ax = tuple(axes)
def _np_weighted_sum(v):
return np.sum(weights_numpy * v, axis=ax, keepdims=keep_dims)
weight_sum = _np_weighted_sum(np.ones_like(x_numpy))
expected_mean = _np_weighted_sum(x_numpy) / weight_sum
expected_mean_squared = np.multiply(expected_mean, expected_mean)
expected_x_squared = (_np_weighted_sum(np.multiply(x_numpy, x_numpy)) /
weight_sum)
expected_variance = expected_x_squared - expected_mean_squared
mean_v, var_v = s.run([mean, var],
feed_dict={x: x_numpy,
weights: weights_numpy})
self.assertAllCloseAccordingToType(expected_mean, mean_v)
self.assertAllCloseAccordingToType(expected_variance, var_v)
if __name__ == "__main__":
test.main()
| apache-2.0 |
nimbis/django-cms | cms/management/commands/subcommands/tree.py | 4 | 5434 | # -*- coding: utf-8 -*-
from __future__ import absolute_import, print_function, unicode_literals
from collections import OrderedDict
from cms.models import Page, CMSPlugin
from .base import SubcommandsCommand
def get_descendant_ids(root_id):
"""
Returns the a generator of primary keys which represent
descendants of the given page ID (root_id)
"""
# Note this is done because get_descendants() can't be trusted
# as the tree can be corrupt.
children = Page.objects.filter(parent=root_id).values_list('pk', flat=True)
for child_id in children.iterator():
yield child_id
for descendant_id in get_descendant_ids(child_id):
yield descendant_id
class FixTreeCommand(SubcommandsCommand):
help_string = 'Repairing Materialized Path Tree for Pages'
command_name = 'fix-tree'
def handle(self, *args, **options):
"""
Repairs the tree
"""
self.stdout.write('fixing page tree')
Page.fix_tree()
root_draft_pages = Page.objects.filter(
publisher_is_draft=True,
parent__isnull=True,
)
last = None
try:
first = root_draft_pages.order_by('path')[0]
except IndexError:
first = None
for page in root_draft_pages.order_by('site__pk', 'path'):
if last:
last = last.reload()
page = page.reload()
page.move(target=last, pos='right')
elif first and first.pk != page.pk:
page.move(target=first, pos='left')
last = page.reload()
root_public_pages = Page.objects.filter(
publisher_is_draft=False,
parent__isnull=True,
).order_by('publisher_public__path')
# Filter out any root public pages whose draft page
# has a parent.
# This avoids a tree corruption where the public root page
# is added as a child of the draft page's draft parent
# instead of the draft page's public parent
root_public_pages = root_public_pages.filter(
publisher_public__parent__isnull=True
)
for page in root_public_pages:
page = page.reload()
public = page.publisher_public
page.move(target=public, pos='right')
for root in root_draft_pages.order_by('site__pk', 'path'):
self._update_descendants_tree(root)
self.stdout.write('fixing plugin tree')
CMSPlugin.fix_tree()
self.stdout.write('all done')
def _update_descendants_tree(self, root):
descendants_ids = get_descendant_ids(root.pk)
public_root_sibling = root.publisher_public
draft_descendants = (
Page
.objects
.filter(pk__in=descendants_ids)
.select_related('parent', 'publisher_public')
.order_by('depth', 'path')
)
descendants_by_parent = OrderedDict()
for descendant in draft_descendants.iterator():
parent = descendant.parent_id
descendants_by_parent.setdefault(parent, []).append(descendant)
for tree in descendants_by_parent.values():
last_draft = None
last_public = None
draft_parent = tree[0].parent
public_parent = draft_parent.publisher_public
for draft_page in tree:
draft_page.refresh_from_db()
if last_draft:
# This is not the loop so this is not the first draft
# child. Set this page a sibling of the last processed
# draft page.
draft_page.move(target=last_draft.reload(), pos='right')
else:
# This is the first time through the loop so this is the first
# draft child for this parent.
draft_page.move(target=draft_parent.reload(), pos='first-child')
last_draft = draft_page
if not draft_page.publisher_public_id:
continue
public_page = draft_page.publisher_public
if last_public:
public_target = last_public
public_position = 'right'
last_public = public_page
elif public_parent:
# always insert the first public child node found
# as the first child of the public parent
public_target = public_parent
public_position = 'first-child'
last_public = public_page
else:
# No public parent has been found
# Insert the node as a sibling to the last root sibling
# Its very unlikely but possible for the root to not have
# a public page. When this happens, use the root draft page
# as sibling.
public_target = public_root_sibling or root
public_position = 'right'
# This page now becomes the last root sibling
public_root_sibling = public_page
public_page.refresh_from_db()
public_page.move(
target=public_target.reload(),
pos=public_position,
)
| bsd-3-clause |
raboof/supybot | plugins/Alias/config.py | 15 | 2179 | ###
# Copyright (c) 2005, Jeremiah Fincher
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice,
# this list of conditions, and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions, and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the author of this software nor the name of
# contributors to this software may be used to endorse or promote products
# derived from this software without specific prior written consent.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
###
import supybot.conf as conf
import supybot.registry as registry
def configure(advanced):
# This will be called by supybot to configure this module. advanced is
# a bool that specifies whether the user identified himself as an advanced
# user or not. You should effect your configuration by manipulating the
# registry as appropriate.
from supybot.questions import expect, anything, something, yn
conf.registerPlugin('Alias', True)
Alias = conf.registerPlugin('Alias')
conf.registerGroup(Alias, 'aliases')
# vim:set shiftwidth=4 softtabstop=4 expandtab textwidth=79:
| bsd-3-clause |
alexforencich/python-ivi | ivi/tektronix/tektronixDPO4032.py | 2 | 1646 | """
Python Interchangeable Virtual Instrument Library
Copyright (c) 2016-2017 Alex Forencich
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
"""
from .tektronixDPO4000 import *
class tektronixDPO4032(tektronixDPO4000):
"Tektronix DPO4032 IVI oscilloscope driver"
def __init__(self, *args, **kwargs):
self.__dict__.setdefault('_instrument_id', 'DPO4032')
super(tektronixDPO4032, self).__init__(*args, **kwargs)
self._analog_channel_count = 2
self._digital_channel_count = 0
self._channel_count = self._analog_channel_count + self._digital_channel_count
self._bandwidth = 350e6
self._init_channels()
| mit |
petrutlucian94/nova | nova/objects/__init__.py | 9 | 2746 | # Copyright 2013 IBM Corp.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
# NOTE(comstud): You may scratch your head as you see code that imports
# this module and then accesses attributes for objects such as Instance,
# etc, yet you do not see these attributes in here. Never fear, there is
# a little bit of magic. When objects are registered, an attribute is set
# on this module automatically, pointing to the newest/latest version of
# the object.
def register_all():
# NOTE(danms): You must make sure your object gets imported in this
# function in order for it to be registered by services that may
# need to receive it via RPC.
__import__('nova.objects.agent')
__import__('nova.objects.aggregate')
__import__('nova.objects.bandwidth_usage')
__import__('nova.objects.block_device')
__import__('nova.objects.cell_mapping')
__import__('nova.objects.compute_node')
__import__('nova.objects.dns_domain')
__import__('nova.objects.ec2')
__import__('nova.objects.external_event')
__import__('nova.objects.fixed_ip')
__import__('nova.objects.flavor')
__import__('nova.objects.floating_ip')
__import__('nova.objects.hv_spec')
__import__('nova.objects.instance')
__import__('nova.objects.instance_action')
__import__('nova.objects.instance_fault')
__import__('nova.objects.instance_group')
__import__('nova.objects.instance_info_cache')
__import__('nova.objects.instance_mapping')
__import__('nova.objects.instance_numa_topology')
__import__('nova.objects.instance_pci_requests')
__import__('nova.objects.keypair')
__import__('nova.objects.migration')
__import__('nova.objects.network')
__import__('nova.objects.network_request')
__import__('nova.objects.numa')
__import__('nova.objects.pci_device')
__import__('nova.objects.pci_device_pool')
__import__('nova.objects.tag')
__import__('nova.objects.quotas')
__import__('nova.objects.security_group')
__import__('nova.objects.security_group_rule')
__import__('nova.objects.service')
__import__('nova.objects.vcpu_model')
__import__('nova.objects.virt_cpu_topology')
__import__('nova.objects.virtual_interface')
| apache-2.0 |
40023154/0628 | static/Brython3.1.1-20150328-091302/Lib/warnings.py | 752 | 13825 | """Python part of the warnings subsystem."""
# Note: function level imports should *not* be used
# in this module as it may cause import lock deadlock.
# See bug 683658.
import linecache
import sys
__all__ = ["warn", "showwarning", "formatwarning", "filterwarnings",
"resetwarnings", "catch_warnings"]
def showwarning(message, category, filename, lineno, file=None, line=None):
"""Hook to write a warning to a file; replace if you like."""
if file is None:
file = sys.stderr
try:
file.write(formatwarning(message, category, filename, lineno, line))
except IOError:
pass # the file (probably stderr) is invalid - this warning gets lost.
def formatwarning(message, category, filename, lineno, line=None):
"""Function to format a warning the standard way."""
s = "%s:%s: %s: %s\n" % (filename, lineno, category.__name__, message)
line = linecache.getline(filename, lineno) if line is None else line
if line:
line = line.strip()
s += " %s\n" % line
return s
def filterwarnings(action, message="", category=Warning, module="", lineno=0,
append=False):
"""Insert an entry into the list of warnings filters (at the front).
'action' -- one of "error", "ignore", "always", "default", "module",
or "once"
'message' -- a regex that the warning message must match
'category' -- a class that the warning must be a subclass of
'module' -- a regex that the module name must match
'lineno' -- an integer line number, 0 matches all warnings
'append' -- if true, append to the list of filters
"""
import re
assert action in ("error", "ignore", "always", "default", "module",
"once"), "invalid action: %r" % (action,)
assert isinstance(message, str), "message must be a string"
assert isinstance(category, type), "category must be a class"
assert issubclass(category, Warning), "category must be a Warning subclass"
assert isinstance(module, str), "module must be a string"
assert isinstance(lineno, int) and lineno >= 0, \
"lineno must be an int >= 0"
item = (action, re.compile(message, re.I), category,
re.compile(module), lineno)
if append:
filters.append(item)
else:
filters.insert(0, item)
def simplefilter(action, category=Warning, lineno=0, append=False):
"""Insert a simple entry into the list of warnings filters (at the front).
A simple filter matches all modules and messages.
'action' -- one of "error", "ignore", "always", "default", "module",
or "once"
'category' -- a class that the warning must be a subclass of
'lineno' -- an integer line number, 0 matches all warnings
'append' -- if true, append to the list of filters
"""
assert action in ("error", "ignore", "always", "default", "module",
"once"), "invalid action: %r" % (action,)
assert isinstance(lineno, int) and lineno >= 0, \
"lineno must be an int >= 0"
item = (action, None, category, None, lineno)
if append:
filters.append(item)
else:
filters.insert(0, item)
def resetwarnings():
"""Clear the list of warning filters, so that no filters are active."""
filters[:] = []
class _OptionError(Exception):
"""Exception used by option processing helpers."""
pass
# Helper to process -W options passed via sys.warnoptions
def _processoptions(args):
for arg in args:
try:
_setoption(arg)
except _OptionError as msg:
print("Invalid -W option ignored:", msg, file=sys.stderr)
# Helper for _processoptions()
def _setoption(arg):
import re
parts = arg.split(':')
if len(parts) > 5:
raise _OptionError("too many fields (max 5): %r" % (arg,))
while len(parts) < 5:
parts.append('')
action, message, category, module, lineno = [s.strip()
for s in parts]
action = _getaction(action)
message = re.escape(message)
category = _getcategory(category)
module = re.escape(module)
if module:
module = module + '$'
if lineno:
try:
lineno = int(lineno)
if lineno < 0:
raise ValueError
except (ValueError, OverflowError):
raise _OptionError("invalid lineno %r" % (lineno,))
else:
lineno = 0
filterwarnings(action, message, category, module, lineno)
# Helper for _setoption()
def _getaction(action):
if not action:
return "default"
if action == "all": return "always" # Alias
for a in ('default', 'always', 'ignore', 'module', 'once', 'error'):
if a.startswith(action):
return a
raise _OptionError("invalid action: %r" % (action,))
# Helper for _setoption()
def _getcategory(category):
import re
if not category:
return Warning
if re.match("^[a-zA-Z0-9_]+$", category):
try:
cat = eval(category)
except NameError:
raise _OptionError("unknown warning category: %r" % (category,))
else:
i = category.rfind(".")
module = category[:i]
klass = category[i+1:]
try:
m = __import__(module, None, None, [klass])
except ImportError:
raise _OptionError("invalid module name: %r" % (module,))
try:
cat = getattr(m, klass)
except AttributeError:
raise _OptionError("unknown warning category: %r" % (category,))
if not issubclass(cat, Warning):
raise _OptionError("invalid warning category: %r" % (category,))
return cat
# Code typically replaced by _warnings
def warn(message, category=None, stacklevel=1):
"""Issue a warning, or maybe ignore it or raise an exception."""
# Check if message is already a Warning object
if isinstance(message, Warning):
category = message.__class__
# Check category argument
if category is None:
category = UserWarning
assert issubclass(category, Warning)
# Get context information
try:
caller = sys._getframe(stacklevel)
except ValueError:
globals = sys.__dict__
lineno = 1
else:
globals = caller.f_globals
lineno = caller.f_lineno
if '__name__' in globals:
module = globals['__name__']
else:
module = "<string>"
filename = globals.get('__file__')
if filename:
fnl = filename.lower()
if fnl.endswith((".pyc", ".pyo")):
filename = filename[:-1]
else:
if module == "__main__":
try:
filename = sys.argv[0]
except AttributeError:
# embedded interpreters don't have sys.argv, see bug #839151
filename = '__main__'
if not filename:
filename = module
registry = globals.setdefault("__warningregistry__", {})
warn_explicit(message, category, filename, lineno, module, registry,
globals)
def warn_explicit(message, category, filename, lineno,
module=None, registry=None, module_globals=None):
lineno = int(lineno)
if module is None:
module = filename or "<unknown>"
if module[-3:].lower() == ".py":
module = module[:-3] # XXX What about leading pathname?
if registry is None:
registry = {}
if isinstance(message, Warning):
text = str(message)
category = message.__class__
else:
text = message
message = category(message)
key = (text, category, lineno)
# Quick test for common case
if registry.get(key):
return
# Search the filters
for item in filters:
action, msg, cat, mod, ln = item
if ((msg is None or msg.match(text)) and
issubclass(category, cat) and
(mod is None or mod.match(module)) and
(ln == 0 or lineno == ln)):
break
else:
action = defaultaction
# Early exit actions
if action == "ignore":
registry[key] = 1
return
# Prime the linecache for formatting, in case the
# "file" is actually in a zipfile or something.
linecache.getlines(filename, module_globals)
if action == "error":
raise message
# Other actions
if action == "once":
registry[key] = 1
oncekey = (text, category)
if onceregistry.get(oncekey):
return
onceregistry[oncekey] = 1
elif action == "always":
pass
elif action == "module":
registry[key] = 1
altkey = (text, category, 0)
if registry.get(altkey):
return
registry[altkey] = 1
elif action == "default":
registry[key] = 1
else:
# Unrecognized actions are errors
raise RuntimeError(
"Unrecognized action (%r) in warnings.filters:\n %s" %
(action, item))
if not callable(showwarning):
raise TypeError("warnings.showwarning() must be set to a "
"function or method")
# Print message and context
showwarning(message, category, filename, lineno)
class WarningMessage(object):
"""Holds the result of a single showwarning() call."""
_WARNING_DETAILS = ("message", "category", "filename", "lineno", "file",
"line")
def __init__(self, message, category, filename, lineno, file=None,
line=None):
local_values = locals()
for attr in self._WARNING_DETAILS:
setattr(self, attr, local_values[attr])
self._category_name = category.__name__ if category else None
def __str__(self):
return ("{message : %r, category : %r, filename : %r, lineno : %s, "
"line : %r}" % (self.message, self._category_name,
self.filename, self.lineno, self.line))
class catch_warnings(object):
"""A context manager that copies and restores the warnings filter upon
exiting the context.
The 'record' argument specifies whether warnings should be captured by a
custom implementation of warnings.showwarning() and be appended to a list
returned by the context manager. Otherwise None is returned by the context
manager. The objects appended to the list are arguments whose attributes
mirror the arguments to showwarning().
The 'module' argument is to specify an alternative module to the module
named 'warnings' and imported under that name. This argument is only useful
when testing the warnings module itself.
"""
def __init__(self, *, record=False, module=None):
"""Specify whether to record warnings and if an alternative module
should be used other than sys.modules['warnings'].
For compatibility with Python 3.0, please consider all arguments to be
keyword-only.
"""
self._record = record
self._module = sys.modules['warnings'] if module is None else module
self._entered = False
def __repr__(self):
args = []
if self._record:
args.append("record=True")
if self._module is not sys.modules['warnings']:
args.append("module=%r" % self._module)
name = type(self).__name__
return "%s(%s)" % (name, ", ".join(args))
def __enter__(self):
if self._entered:
raise RuntimeError("Cannot enter %r twice" % self)
self._entered = True
self._filters = self._module.filters
self._module.filters = self._filters[:]
self._showwarning = self._module.showwarning
if self._record:
log = []
def showwarning(*args, **kwargs):
log.append(WarningMessage(*args, **kwargs))
self._module.showwarning = showwarning
return log
else:
return None
def __exit__(self, *exc_info):
if not self._entered:
raise RuntimeError("Cannot exit %r without entering first" % self)
self._module.filters = self._filters
self._module.showwarning = self._showwarning
# filters contains a sequence of filter 5-tuples
# The components of the 5-tuple are:
# - an action: error, ignore, always, default, module, or once
# - a compiled regex that must match the warning message
# - a class representing the warning category
# - a compiled regex that must match the module that is being warned
# - a line number for the line being warning, or 0 to mean any line
# If either if the compiled regexs are None, match anything.
_warnings_defaults = False
try:
from _warnings import (filters, _defaultaction, _onceregistry,
warn, warn_explicit)
defaultaction = _defaultaction
onceregistry = _onceregistry
_warnings_defaults = True
except ImportError:
filters = []
defaultaction = "default"
onceregistry = {}
# Module initialization
_processoptions(sys.warnoptions)
if not _warnings_defaults:
silence = [ImportWarning, PendingDeprecationWarning]
silence.append(DeprecationWarning)
for cls in silence:
simplefilter("ignore", category=cls)
bytes_warning = sys.flags.bytes_warning
if bytes_warning > 1:
bytes_action = "error"
elif bytes_warning:
bytes_action = "default"
else:
bytes_action = "ignore"
simplefilter(bytes_action, category=BytesWarning, append=1)
# resource usage warnings are enabled by default in pydebug mode
if hasattr(sys, 'gettotalrefcount'):
resource_action = "always"
else:
resource_action = "ignore"
simplefilter(resource_action, category=ResourceWarning, append=1)
del _warnings_defaults
| gpl-3.0 |
coronary/RandomEpisode | depends/Lib/site-packages/setuptools/__init__.py | 130 | 5019 | """Extensions to the 'distutils' for large or complex distributions"""
import os
import functools
import distutils.core
import distutils.filelist
from distutils.util import convert_path
from fnmatch import fnmatchcase
from six.moves import filter, map
import setuptools.version
from setuptools.extension import Extension
from setuptools.dist import Distribution, Feature
from setuptools.depends import Require
from . import monkey
__all__ = [
'setup', 'Distribution', 'Feature', 'Command', 'Extension', 'Require',
'find_packages',
]
__version__ = setuptools.version.__version__
bootstrap_install_from = None
# If we run 2to3 on .py files, should we also convert docstrings?
# Default: yes; assume that we can detect doctests reliably
run_2to3_on_doctests = True
# Standard package names for fixer packages
lib2to3_fixer_packages = ['lib2to3.fixes']
class PackageFinder(object):
"""
Generate a list of all Python packages found within a directory
"""
@classmethod
def find(cls, where='.', exclude=(), include=('*',)):
"""Return a list all Python packages found within directory 'where'
'where' is the root directory which will be searched for packages. It
should be supplied as a "cross-platform" (i.e. URL-style) path; it will
be converted to the appropriate local path syntax.
'exclude' is a sequence of package names to exclude; '*' can be used
as a wildcard in the names, such that 'foo.*' will exclude all
subpackages of 'foo' (but not 'foo' itself).
'include' is a sequence of package names to include. If it's
specified, only the named packages will be included. If it's not
specified, all found packages will be included. 'include' can contain
shell style wildcard patterns just like 'exclude'.
"""
return list(cls._find_packages_iter(
convert_path(where),
cls._build_filter('ez_setup', '*__pycache__', *exclude),
cls._build_filter(*include)))
@classmethod
def _find_packages_iter(cls, where, exclude, include):
"""
All the packages found in 'where' that pass the 'include' filter, but
not the 'exclude' filter.
"""
for root, dirs, files in os.walk(where, followlinks=True):
# Copy dirs to iterate over it, then empty dirs.
all_dirs = dirs[:]
dirs[:] = []
for dir in all_dirs:
full_path = os.path.join(root, dir)
rel_path = os.path.relpath(full_path, where)
package = rel_path.replace(os.path.sep, '.')
# Skip directory trees that are not valid packages
if ('.' in dir or not cls._looks_like_package(full_path)):
continue
# Should this package be included?
if include(package) and not exclude(package):
yield package
# Keep searching subdirectories, as there may be more packages
# down there, even if the parent was excluded.
dirs.append(dir)
@staticmethod
def _looks_like_package(path):
"""Does a directory look like a package?"""
return os.path.isfile(os.path.join(path, '__init__.py'))
@staticmethod
def _build_filter(*patterns):
"""
Given a list of patterns, return a callable that will be true only if
the input matches at least one of the patterns.
"""
return lambda name: any(fnmatchcase(name, pat=pat) for pat in patterns)
class PEP420PackageFinder(PackageFinder):
@staticmethod
def _looks_like_package(path):
return True
find_packages = PackageFinder.find
setup = distutils.core.setup
_Command = monkey.get_unpatched(distutils.core.Command)
class Command(_Command):
__doc__ = _Command.__doc__
command_consumes_arguments = False
def __init__(self, dist, **kw):
"""
Construct the command for dist, updating
vars(self) with any keyword parameters.
"""
_Command.__init__(self, dist)
vars(self).update(kw)
def reinitialize_command(self, command, reinit_subcommands=0, **kw):
cmd = _Command.reinitialize_command(self, command, reinit_subcommands)
vars(cmd).update(kw)
return cmd
def _find_all_simple(path):
"""
Find all files under 'path'
"""
results = (
os.path.join(base, file)
for base, dirs, files in os.walk(path, followlinks=True)
for file in files
)
return filter(os.path.isfile, results)
def findall(dir=os.curdir):
"""
Find all files under 'dir' and return the list of full filenames.
Unless dir is '.', return full filenames with dir prepended.
"""
files = _find_all_simple(dir)
if dir == os.curdir:
make_rel = functools.partial(os.path.relpath, start=dir)
files = map(make_rel, files)
return list(files)
monkey.patch_all()
| mit |
tangfeixiong/nova | nova/tests/unit/objects/test_objects.py | 2 | 59782 | # Copyright 2013 IBM Corp.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from collections import OrderedDict
import contextlib
import copy
import datetime
import hashlib
import inspect
import os
import pprint
import mock
from oslo_log import log
from oslo_utils import timeutils
import six
from testtools import matchers
from nova.conductor import rpcapi as conductor_rpcapi
from nova import context
from nova import exception
from nova import objects
from nova.objects import base
from nova.objects import fields
from nova import rpc
from nova import test
from nova.tests import fixtures as nova_fixtures
from nova.tests.unit import fake_notifier
from nova import utils
LOG = log.getLogger(__name__)
class MyOwnedObject(base.NovaPersistentObject, base.NovaObject):
VERSION = '1.0'
fields = {'baz': fields.IntegerField()}
class MyObj(base.NovaPersistentObject, base.NovaObject,
base.NovaObjectDictCompat):
VERSION = '1.6'
fields = {'foo': fields.IntegerField(default=1),
'bar': fields.StringField(),
'missing': fields.StringField(),
'readonly': fields.IntegerField(read_only=True),
'rel_object': fields.ObjectField('MyOwnedObject', nullable=True),
'rel_objects': fields.ListOfObjectsField('MyOwnedObject',
nullable=True),
'mutable_default': fields.ListOfStringsField(default=[]),
}
@staticmethod
def _from_db_object(context, obj, db_obj):
self = MyObj()
self.foo = db_obj['foo']
self.bar = db_obj['bar']
self.missing = db_obj['missing']
self.readonly = 1
self._context = context
return self
def obj_load_attr(self, attrname):
setattr(self, attrname, 'loaded!')
@base.remotable_classmethod
def query(cls, context):
obj = cls(context=context, foo=1, bar='bar')
obj.obj_reset_changes()
return obj
@base.remotable
def marco(self):
return 'polo'
@base.remotable
def _update_test(self):
self.bar = 'updated'
@base.remotable
def save(self):
self.obj_reset_changes()
@base.remotable
def refresh(self):
self.foo = 321
self.bar = 'refreshed'
self.obj_reset_changes()
@base.remotable
def modify_save_modify(self):
self.bar = 'meow'
self.save()
self.foo = 42
self.rel_object = MyOwnedObject(baz=42)
def obj_make_compatible(self, primitive, target_version):
super(MyObj, self).obj_make_compatible(primitive, target_version)
# NOTE(danms): Simulate an older version that had a different
# format for the 'bar' attribute
if target_version == '1.1' and 'bar' in primitive:
primitive['bar'] = 'old%s' % primitive['bar']
class MyObjDiffVers(MyObj):
VERSION = '1.5'
@classmethod
def obj_name(cls):
return 'MyObj'
class MyObj2(object):
@classmethod
def obj_name(cls):
return 'MyObj'
@base.remotable_classmethod
def query(cls, *args, **kwargs):
pass
class RandomMixInWithNoFields(object):
"""Used to test object inheritance using a mixin that has no fields."""
pass
class TestSubclassedObject(RandomMixInWithNoFields, MyObj):
fields = {'new_field': fields.StringField()}
class TestMetaclass(test.NoDBTestCase):
def test_obj_tracking(self):
@six.add_metaclass(base.NovaObjectMetaclass)
class NewBaseClass(object):
VERSION = '1.0'
fields = {}
@classmethod
def obj_name(cls):
return cls.__name__
class Fake1TestObj1(NewBaseClass):
@classmethod
def obj_name(cls):
return 'fake1'
class Fake1TestObj2(Fake1TestObj1):
pass
class Fake1TestObj3(Fake1TestObj1):
VERSION = '1.1'
class Fake2TestObj1(NewBaseClass):
@classmethod
def obj_name(cls):
return 'fake2'
class Fake1TestObj4(Fake1TestObj3):
VERSION = '1.2'
class Fake2TestObj2(Fake2TestObj1):
VERSION = '1.1'
class Fake1TestObj5(Fake1TestObj1):
VERSION = '1.1'
# Newest versions first in the list. Duplicate versions take the
# newest object.
expected = {'fake1': [Fake1TestObj4, Fake1TestObj5, Fake1TestObj2],
'fake2': [Fake2TestObj2, Fake2TestObj1]}
self.assertEqual(expected, NewBaseClass._obj_classes)
# The following should work, also.
self.assertEqual(expected, Fake1TestObj1._obj_classes)
self.assertEqual(expected, Fake1TestObj2._obj_classes)
self.assertEqual(expected, Fake1TestObj3._obj_classes)
self.assertEqual(expected, Fake1TestObj4._obj_classes)
self.assertEqual(expected, Fake1TestObj5._obj_classes)
self.assertEqual(expected, Fake2TestObj1._obj_classes)
self.assertEqual(expected, Fake2TestObj2._obj_classes)
def test_field_checking(self):
def create_class(field):
class TestField(base.NovaObject):
VERSION = '1.5'
fields = {'foo': field()}
return TestField
create_class(fields.IPV4AndV6AddressField)
self.assertRaises(exception.ObjectFieldInvalid,
create_class, fields.IPV4AndV6Address)
self.assertRaises(exception.ObjectFieldInvalid,
create_class, int)
class TestObjToPrimitive(test.NoDBTestCase):
def test_obj_to_primitive_list(self):
class MyObjElement(base.NovaObject):
fields = {'foo': fields.IntegerField()}
def __init__(self, foo):
super(MyObjElement, self).__init__()
self.foo = foo
class MyList(base.ObjectListBase, base.NovaObject):
fields = {'objects': fields.ListOfObjectsField('MyObjElement')}
mylist = MyList()
mylist.objects = [MyObjElement(1), MyObjElement(2), MyObjElement(3)]
self.assertEqual([1, 2, 3],
[x['foo'] for x in base.obj_to_primitive(mylist)])
def test_obj_to_primitive_dict(self):
myobj = MyObj(foo=1, bar='foo')
self.assertEqual({'foo': 1, 'bar': 'foo'},
base.obj_to_primitive(myobj))
def test_obj_to_primitive_recursive(self):
class MyList(base.ObjectListBase, base.NovaObject):
fields = {'objects': fields.ListOfObjectsField('MyObj')}
mylist = MyList(objects=[MyObj(), MyObj()])
for i, value in enumerate(mylist):
value.foo = i
self.assertEqual([{'foo': 0}, {'foo': 1}],
base.obj_to_primitive(mylist))
def test_obj_to_primitive_with_ip_addr(self):
class TestObject(base.NovaObject):
fields = {'addr': fields.IPAddressField(),
'cidr': fields.IPNetworkField()}
obj = TestObject(addr='1.2.3.4', cidr='1.1.1.1/16')
self.assertEqual({'addr': '1.2.3.4', 'cidr': '1.1.1.1/16'},
base.obj_to_primitive(obj))
class TestObjMakeList(test.NoDBTestCase):
def test_obj_make_list(self):
class MyList(base.ObjectListBase, base.NovaObject):
fields = {
'objects': fields.ListOfObjectsField('MyObj'),
}
db_objs = [{'foo': 1, 'bar': 'baz', 'missing': 'banana'},
{'foo': 2, 'bar': 'bat', 'missing': 'apple'},
]
mylist = base.obj_make_list('ctxt', MyList(), MyObj, db_objs)
self.assertEqual(2, len(mylist))
self.assertEqual('ctxt', mylist._context)
for index, item in enumerate(mylist):
self.assertEqual(db_objs[index]['foo'], item.foo)
self.assertEqual(db_objs[index]['bar'], item.bar)
self.assertEqual(db_objs[index]['missing'], item.missing)
def compare_obj(test, obj, db_obj, subs=None, allow_missing=None,
comparators=None):
"""Compare a NovaObject and a dict-like database object.
This automatically converts TZ-aware datetimes and iterates over
the fields of the object.
:param:test: The TestCase doing the comparison
:param:obj: The NovaObject to examine
:param:db_obj: The dict-like database object to use as reference
:param:subs: A dict of objkey=dbkey field substitutions
:param:allow_missing: A list of fields that may not be in db_obj
:param:comparators: Map of comparator functions to use for certain fields
"""
if subs is None:
subs = {}
if allow_missing is None:
allow_missing = []
if comparators is None:
comparators = {}
for key in obj.fields:
if key in allow_missing and not obj.obj_attr_is_set(key):
continue
obj_val = getattr(obj, key)
db_key = subs.get(key, key)
db_val = db_obj[db_key]
if isinstance(obj_val, datetime.datetime):
obj_val = obj_val.replace(tzinfo=None)
if key in comparators:
comparator = comparators[key]
comparator(db_val, obj_val)
else:
test.assertEqual(db_val, obj_val)
class _BaseTestCase(test.TestCase):
def setUp(self):
super(_BaseTestCase, self).setUp()
self.remote_object_calls = list()
self.user_id = 'fake-user'
self.project_id = 'fake-project'
self.context = context.RequestContext(self.user_id, self.project_id)
fake_notifier.stub_notifier(self.stubs)
self.addCleanup(fake_notifier.reset)
def compare_obj(self, obj, db_obj, subs=None, allow_missing=None,
comparators=None):
compare_obj(self, obj, db_obj, subs=subs, allow_missing=allow_missing,
comparators=comparators)
def str_comparator(self, expected, obj_val):
"""Compare an object field to a string in the db by performing
a simple coercion on the object field value.
"""
self.assertEqual(expected, str(obj_val))
def assertNotIsInstance(self, obj, cls, msg=None):
"""Python < v2.7 compatibility. Assert 'not isinstance(obj, cls)."""
try:
f = super(_BaseTestCase, self).assertNotIsInstance
except AttributeError:
self.assertThat(obj,
matchers.Not(matchers.IsInstance(cls)),
message=msg or '')
else:
f(obj, cls, msg=msg)
class _LocalTest(_BaseTestCase):
def setUp(self):
super(_LocalTest, self).setUp()
# Just in case
self.useFixture(nova_fixtures.IndirectionAPIFixture(None))
@contextlib.contextmanager
def things_temporarily_local():
# Temporarily go non-remote so the conductor handles
# this request directly
_api = base.NovaObject.indirection_api
base.NovaObject.indirection_api = None
yield
base.NovaObject.indirection_api = _api
class _RemoteTest(_BaseTestCase):
def _testable_conductor(self):
self.conductor_service = self.start_service(
'conductor', manager='nova.conductor.manager.ConductorManager')
self.remote_object_calls = list()
orig_object_class_action = \
self.conductor_service.manager.object_class_action
orig_object_action = \
self.conductor_service.manager.object_action
def fake_object_class_action(*args, **kwargs):
self.remote_object_calls.append((kwargs.get('objname'),
kwargs.get('objmethod')))
with things_temporarily_local():
result = orig_object_class_action(*args, **kwargs)
return (base.NovaObject.obj_from_primitive(result, context=args[0])
if isinstance(result, base.NovaObject) else result)
self.stubs.Set(self.conductor_service.manager, 'object_class_action',
fake_object_class_action)
def fake_object_action(*args, **kwargs):
self.remote_object_calls.append((kwargs.get('objinst'),
kwargs.get('objmethod')))
with things_temporarily_local():
result = orig_object_action(*args, **kwargs)
return result
self.stubs.Set(self.conductor_service.manager, 'object_action',
fake_object_action)
# Things are remoted by default in this session
self.useFixture(nova_fixtures.IndirectionAPIFixture(
conductor_rpcapi.ConductorAPI()))
# To make sure local and remote contexts match
self.stubs.Set(rpc.RequestContextSerializer,
'serialize_context',
lambda s, c: c)
self.stubs.Set(rpc.RequestContextSerializer,
'deserialize_context',
lambda s, c: c)
def setUp(self):
super(_RemoteTest, self).setUp()
self._testable_conductor()
class _TestObject(object):
def test_object_attrs_in_init(self):
# Spot check a few
objects.Instance
objects.InstanceInfoCache
objects.SecurityGroup
# Now check the test one in this file. Should be newest version
self.assertEqual('1.6', objects.MyObj.VERSION)
def test_hydration_type_error(self):
primitive = {'nova_object.name': 'MyObj',
'nova_object.namespace': 'nova',
'nova_object.version': '1.5',
'nova_object.data': {'foo': 'a'}}
self.assertRaises(ValueError, MyObj.obj_from_primitive, primitive)
def test_hydration(self):
primitive = {'nova_object.name': 'MyObj',
'nova_object.namespace': 'nova',
'nova_object.version': '1.5',
'nova_object.data': {'foo': 1}}
real_method = MyObj._obj_from_primitive
def _obj_from_primitive(*args):
return real_method(*args)
with mock.patch.object(MyObj, '_obj_from_primitive') as ofp:
ofp.side_effect = _obj_from_primitive
obj = MyObj.obj_from_primitive(primitive)
ofp.assert_called_once_with(None, '1.5', primitive)
self.assertEqual(obj.foo, 1)
def test_hydration_version_different(self):
primitive = {'nova_object.name': 'MyObj',
'nova_object.namespace': 'nova',
'nova_object.version': '1.2',
'nova_object.data': {'foo': 1}}
obj = MyObj.obj_from_primitive(primitive)
self.assertEqual(obj.foo, 1)
self.assertEqual('1.2', obj.VERSION)
def test_hydration_bad_ns(self):
primitive = {'nova_object.name': 'MyObj',
'nova_object.namespace': 'foo',
'nova_object.version': '1.5',
'nova_object.data': {'foo': 1}}
self.assertRaises(exception.UnsupportedObjectError,
MyObj.obj_from_primitive, primitive)
def test_hydration_additional_unexpected_stuff(self):
primitive = {'nova_object.name': 'MyObj',
'nova_object.namespace': 'nova',
'nova_object.version': '1.5.1',
'nova_object.data': {
'foo': 1,
'unexpected_thing': 'foobar'}}
obj = MyObj.obj_from_primitive(primitive)
self.assertEqual(1, obj.foo)
self.assertFalse(hasattr(obj, 'unexpected_thing'))
# NOTE(danms): If we call obj_from_primitive() directly
# with a version containing .z, we'll get that version
# in the resulting object. In reality, when using the
# serializer, we'll get that snipped off (tested
# elsewhere)
self.assertEqual('1.5.1', obj.VERSION)
def test_dehydration(self):
expected = {'nova_object.name': 'MyObj',
'nova_object.namespace': 'nova',
'nova_object.version': '1.6',
'nova_object.data': {'foo': 1}}
obj = MyObj(foo=1)
obj.obj_reset_changes()
self.assertEqual(obj.obj_to_primitive(), expected)
def test_object_property(self):
obj = MyObj(foo=1)
self.assertEqual(obj.foo, 1)
def test_object_property_type_error(self):
obj = MyObj()
def fail():
obj.foo = 'a'
self.assertRaises(ValueError, fail)
def test_load(self):
obj = MyObj()
self.assertEqual(obj.bar, 'loaded!')
def test_load_in_base(self):
class Foo(base.NovaObject):
fields = {'foobar': fields.IntegerField()}
obj = Foo()
with self.assertRaisesRegex(NotImplementedError, ".*foobar.*"):
obj.foobar
def test_loaded_in_primitive(self):
obj = MyObj(foo=1)
obj.obj_reset_changes()
self.assertEqual(obj.bar, 'loaded!')
expected = {'nova_object.name': 'MyObj',
'nova_object.namespace': 'nova',
'nova_object.version': '1.6',
'nova_object.changes': ['bar'],
'nova_object.data': {'foo': 1,
'bar': 'loaded!'}}
self.assertEqual(obj.obj_to_primitive(), expected)
def test_changes_in_primitive(self):
obj = MyObj(foo=123)
self.assertEqual(obj.obj_what_changed(), set(['foo']))
primitive = obj.obj_to_primitive()
self.assertIn('nova_object.changes', primitive)
obj2 = MyObj.obj_from_primitive(primitive)
self.assertEqual(obj2.obj_what_changed(), set(['foo']))
obj2.obj_reset_changes()
self.assertEqual(obj2.obj_what_changed(), set())
def test_obj_class_from_name(self):
obj = base.NovaObject.obj_class_from_name('MyObj', '1.5')
self.assertEqual('1.5', obj.VERSION)
def test_obj_class_from_name_latest_compatible(self):
obj = base.NovaObject.obj_class_from_name('MyObj', '1.1')
self.assertEqual('1.6', obj.VERSION)
def test_unknown_objtype(self):
self.assertRaises(exception.UnsupportedObjectError,
base.NovaObject.obj_class_from_name, 'foo', '1.0')
def test_obj_class_from_name_supported_version(self):
error = None
try:
base.NovaObject.obj_class_from_name('MyObj', '1.25')
except exception.IncompatibleObjectVersion as error:
pass
self.assertIsNotNone(error)
self.assertEqual('1.6', error.kwargs['supported'])
def test_orphaned_object(self):
obj = MyObj.query(self.context)
obj._context = None
self.assertRaises(exception.OrphanedObjectError,
obj._update_test)
def test_changed_1(self):
obj = MyObj.query(self.context)
obj.foo = 123
self.assertEqual(obj.obj_what_changed(), set(['foo']))
obj._update_test()
self.assertEqual(obj.obj_what_changed(), set(['foo', 'bar']))
self.assertEqual(obj.foo, 123)
def test_changed_2(self):
obj = MyObj.query(self.context)
obj.foo = 123
self.assertEqual(obj.obj_what_changed(), set(['foo']))
obj.save()
self.assertEqual(obj.obj_what_changed(), set([]))
self.assertEqual(obj.foo, 123)
def test_changed_3(self):
obj = MyObj.query(self.context)
obj.foo = 123
self.assertEqual(obj.obj_what_changed(), set(['foo']))
obj.refresh()
self.assertEqual(obj.obj_what_changed(), set([]))
self.assertEqual(obj.foo, 321)
self.assertEqual(obj.bar, 'refreshed')
def test_changed_4(self):
obj = MyObj.query(self.context)
obj.bar = 'something'
self.assertEqual(obj.obj_what_changed(), set(['bar']))
obj.modify_save_modify()
self.assertEqual(obj.obj_what_changed(), set(['foo', 'rel_object']))
self.assertEqual(obj.foo, 42)
self.assertEqual(obj.bar, 'meow')
self.assertIsInstance(obj.rel_object, MyOwnedObject)
def test_changed_with_sub_object(self):
class ParentObject(base.NovaObject):
fields = {'foo': fields.IntegerField(),
'bar': fields.ObjectField('MyObj'),
}
obj = ParentObject()
self.assertEqual(set(), obj.obj_what_changed())
obj.foo = 1
self.assertEqual(set(['foo']), obj.obj_what_changed())
bar = MyObj()
obj.bar = bar
self.assertEqual(set(['foo', 'bar']), obj.obj_what_changed())
obj.obj_reset_changes()
self.assertEqual(set(), obj.obj_what_changed())
bar.foo = 1
self.assertEqual(set(['bar']), obj.obj_what_changed())
def test_static_result(self):
obj = MyObj.query(self.context)
self.assertEqual(obj.bar, 'bar')
result = obj.marco()
self.assertEqual(result, 'polo')
def test_updates(self):
obj = MyObj.query(self.context)
self.assertEqual(obj.foo, 1)
obj._update_test()
self.assertEqual(obj.bar, 'updated')
def test_base_attributes(self):
dt = datetime.datetime(1955, 11, 5)
obj = MyObj(created_at=dt, updated_at=dt, deleted_at=None,
deleted=False)
expected = {'nova_object.name': 'MyObj',
'nova_object.namespace': 'nova',
'nova_object.version': '1.6',
'nova_object.changes':
['deleted', 'created_at', 'deleted_at', 'updated_at'],
'nova_object.data':
{'created_at': timeutils.isotime(dt),
'updated_at': timeutils.isotime(dt),
'deleted_at': None,
'deleted': False,
}
}
actual = obj.obj_to_primitive()
self.assertJsonEqual(actual, expected)
def test_contains(self):
obj = MyObj()
self.assertNotIn('foo', obj)
obj.foo = 1
self.assertIn('foo', obj)
self.assertNotIn('does_not_exist', obj)
def test_obj_attr_is_set(self):
obj = MyObj(foo=1)
self.assertTrue(obj.obj_attr_is_set('foo'))
self.assertFalse(obj.obj_attr_is_set('bar'))
self.assertRaises(AttributeError, obj.obj_attr_is_set, 'bang')
def test_obj_reset_changes_recursive(self):
obj = MyObj(rel_object=MyOwnedObject(baz=123),
rel_objects=[MyOwnedObject(baz=456)])
self.assertEqual(set(['rel_object', 'rel_objects']),
obj.obj_what_changed())
obj.obj_reset_changes()
self.assertEqual(set(['rel_object']), obj.obj_what_changed())
self.assertEqual(set(['baz']), obj.rel_object.obj_what_changed())
self.assertEqual(set(['baz']), obj.rel_objects[0].obj_what_changed())
obj.obj_reset_changes(recursive=True, fields=['foo'])
self.assertEqual(set(['rel_object']), obj.obj_what_changed())
self.assertEqual(set(['baz']), obj.rel_object.obj_what_changed())
self.assertEqual(set(['baz']), obj.rel_objects[0].obj_what_changed())
obj.obj_reset_changes(recursive=True)
self.assertEqual(set([]), obj.rel_object.obj_what_changed())
self.assertEqual(set([]), obj.obj_what_changed())
def test_get(self):
obj = MyObj(foo=1)
# Foo has value, should not get the default
self.assertEqual(obj.get('foo', 2), 1)
# Foo has value, should return the value without error
self.assertEqual(obj.get('foo'), 1)
# Bar is not loaded, so we should get the default
self.assertEqual(obj.get('bar', 'not-loaded'), 'not-loaded')
# Bar without a default should lazy-load
self.assertEqual(obj.get('bar'), 'loaded!')
# Bar now has a default, but loaded value should be returned
self.assertEqual(obj.get('bar', 'not-loaded'), 'loaded!')
# Invalid attribute should raise AttributeError
self.assertRaises(AttributeError, obj.get, 'nothing')
# ...even with a default
self.assertRaises(AttributeError, obj.get, 'nothing', 3)
def test_object_inheritance(self):
base_fields = base.NovaPersistentObject.fields.keys()
myobj_fields = (['foo', 'bar', 'missing',
'readonly', 'rel_object',
'rel_objects', 'mutable_default'] +
base_fields)
myobj3_fields = ['new_field']
self.assertTrue(issubclass(TestSubclassedObject, MyObj))
self.assertEqual(len(myobj_fields), len(MyObj.fields))
self.assertEqual(set(myobj_fields), set(MyObj.fields.keys()))
self.assertEqual(len(myobj_fields) + len(myobj3_fields),
len(TestSubclassedObject.fields))
self.assertEqual(set(myobj_fields) | set(myobj3_fields),
set(TestSubclassedObject.fields.keys()))
def test_obj_as_admin(self):
obj = MyObj(context=self.context)
def fake(*args, **kwargs):
self.assertTrue(obj._context.is_admin)
with mock.patch.object(obj, 'obj_reset_changes') as mock_fn:
mock_fn.side_effect = fake
with obj.obj_as_admin():
obj.save()
self.assertTrue(mock_fn.called)
self.assertFalse(obj._context.is_admin)
def test_obj_as_admin_orphaned(self):
def testme():
obj = MyObj()
with obj.obj_as_admin():
pass
self.assertRaises(exception.OrphanedObjectError, testme)
def test_obj_alternate_context(self):
obj = MyObj(context=self.context)
with obj.obj_alternate_context(mock.sentinel.alt_ctx):
self.assertEqual(mock.sentinel.alt_ctx,
obj._context)
self.assertEqual(self.context, obj._context)
def test_get_changes(self):
obj = MyObj()
self.assertEqual({}, obj.obj_get_changes())
obj.foo = 123
self.assertEqual({'foo': 123}, obj.obj_get_changes())
obj.bar = 'test'
self.assertEqual({'foo': 123, 'bar': 'test'}, obj.obj_get_changes())
obj.obj_reset_changes()
self.assertEqual({}, obj.obj_get_changes())
def test_obj_fields(self):
class TestObj(base.NovaObject):
fields = {'foo': fields.IntegerField()}
obj_extra_fields = ['bar']
@property
def bar(self):
return 'this is bar'
obj = TestObj()
self.assertEqual(['foo', 'bar'], obj.obj_fields)
def test_obj_constructor(self):
obj = MyObj(context=self.context, foo=123, bar='abc')
self.assertEqual(123, obj.foo)
self.assertEqual('abc', obj.bar)
self.assertEqual(set(['foo', 'bar']), obj.obj_what_changed())
def test_obj_read_only(self):
obj = MyObj(context=self.context, foo=123, bar='abc')
obj.readonly = 1
self.assertRaises(exception.ReadOnlyFieldError, setattr,
obj, 'readonly', 2)
def test_obj_mutable_default(self):
obj = MyObj(context=self.context, foo=123, bar='abc')
obj.mutable_default = None
obj.mutable_default.append('s1')
self.assertEqual(obj.mutable_default, ['s1'])
obj1 = MyObj(context=self.context, foo=123, bar='abc')
obj1.mutable_default = None
obj1.mutable_default.append('s2')
self.assertEqual(obj1.mutable_default, ['s2'])
def test_obj_mutable_default_set_default(self):
obj1 = MyObj(context=self.context, foo=123, bar='abc')
obj1.obj_set_defaults('mutable_default')
self.assertEqual(obj1.mutable_default, [])
obj1.mutable_default.append('s1')
self.assertEqual(obj1.mutable_default, ['s1'])
obj2 = MyObj(context=self.context, foo=123, bar='abc')
obj2.obj_set_defaults('mutable_default')
self.assertEqual(obj2.mutable_default, [])
obj2.mutable_default.append('s2')
self.assertEqual(obj2.mutable_default, ['s2'])
def test_obj_repr(self):
obj = MyObj(foo=123)
self.assertEqual('MyObj(bar=<?>,created_at=<?>,deleted=<?>,'
'deleted_at=<?>,foo=123,missing=<?>,'
'mutable_default=<?>,readonly=<?>,rel_object=<?>,'
'rel_objects=<?>,updated_at=<?>)',
repr(obj))
def test_obj_make_obj_compatible(self):
subobj = MyOwnedObject(baz=1)
subobj.VERSION = '1.2'
obj = MyObj(rel_object=subobj)
obj.obj_relationships = {
'rel_object': [('1.5', '1.1'), ('1.7', '1.2')],
}
orig_primitive = obj.obj_to_primitive()['nova_object.data']
with mock.patch.object(subobj, 'obj_make_compatible') as mock_compat:
primitive = copy.deepcopy(orig_primitive)
obj._obj_make_obj_compatible(primitive, '1.8', 'rel_object')
self.assertFalse(mock_compat.called)
with mock.patch.object(subobj, 'obj_make_compatible') as mock_compat:
primitive = copy.deepcopy(orig_primitive)
obj._obj_make_obj_compatible(primitive, '1.7', 'rel_object')
self.assertFalse(mock_compat.called)
with mock.patch.object(subobj, 'obj_make_compatible') as mock_compat:
primitive = copy.deepcopy(orig_primitive)
obj._obj_make_obj_compatible(primitive, '1.6', 'rel_object')
mock_compat.assert_called_once_with(
primitive['rel_object']['nova_object.data'], '1.1')
self.assertEqual('1.1',
primitive['rel_object']['nova_object.version'])
with mock.patch.object(subobj, 'obj_make_compatible') as mock_compat:
primitive = copy.deepcopy(orig_primitive)
obj._obj_make_obj_compatible(primitive, '1.5', 'rel_object')
mock_compat.assert_called_once_with(
primitive['rel_object']['nova_object.data'], '1.1')
self.assertEqual('1.1',
primitive['rel_object']['nova_object.version'])
with mock.patch.object(subobj, 'obj_make_compatible') as mock_compat:
primitive = copy.deepcopy(orig_primitive)
obj._obj_make_obj_compatible(primitive, '1.4', 'rel_object')
self.assertFalse(mock_compat.called)
self.assertNotIn('rel_object', primitive)
def test_obj_make_compatible_hits_sub_objects(self):
subobj = MyOwnedObject(baz=1)
obj = MyObj(foo=123, rel_object=subobj)
obj.obj_relationships = {'rel_object': [('1.0', '1.0')]}
with mock.patch.object(obj, '_obj_make_obj_compatible') as mock_compat:
obj.obj_make_compatible({'rel_object': 'foo'}, '1.10')
mock_compat.assert_called_once_with({'rel_object': 'foo'}, '1.10',
'rel_object')
def test_obj_make_compatible_skips_unset_sub_objects(self):
obj = MyObj(foo=123)
obj.obj_relationships = {'rel_object': [('1.0', '1.0')]}
with mock.patch.object(obj, '_obj_make_obj_compatible') as mock_compat:
obj.obj_make_compatible({'rel_object': 'foo'}, '1.10')
self.assertFalse(mock_compat.called)
def test_obj_make_compatible_complains_about_missing_rules(self):
subobj = MyOwnedObject(baz=1)
obj = MyObj(foo=123, rel_object=subobj)
obj.obj_relationships = {}
self.assertRaises(exception.ObjectActionError,
obj.obj_make_compatible, {}, '1.0')
def test_obj_make_compatible_doesnt_skip_falsey_sub_objects(self):
class MyList(base.ObjectListBase, base.NovaObject):
VERSION = '1.2'
fields = {'objects': fields.ListOfObjectsField('MyObjElement')}
mylist = MyList(objects=[])
class MyOwner(base.NovaObject):
VERSION = '1.2'
fields = {'mylist': fields.ObjectField('MyList')}
obj_relationships = {
'mylist': [('1.1', '1.1')],
}
myowner = MyOwner(mylist=mylist)
primitive = myowner.obj_to_primitive('1.1')
self.assertIn('mylist', primitive['nova_object.data'])
def test_obj_make_compatible_handles_list_of_objects(self):
subobj = MyOwnedObject(baz=1)
obj = MyObj(rel_objects=[subobj])
obj.obj_relationships = {'rel_objects': [('1.0', '1.123')]}
def fake_make_compat(primitive, version):
self.assertEqual('1.123', version)
self.assertIn('baz', primitive)
with mock.patch.object(subobj, 'obj_make_compatible') as mock_mc:
mock_mc.side_effect = fake_make_compat
obj.obj_to_primitive('1.0')
self.assertTrue(mock_mc.called)
def test_delattr(self):
obj = MyObj(bar='foo')
del obj.bar
# Should appear unset now
self.assertFalse(obj.obj_attr_is_set('bar'))
# Make sure post-delete, references trigger lazy loads
self.assertEqual('loaded!', getattr(obj, 'bar'))
def test_delattr_unset(self):
obj = MyObj()
self.assertRaises(AttributeError, delattr, obj, 'bar')
class TestObject(_LocalTest, _TestObject):
def test_set_defaults(self):
obj = MyObj()
obj.obj_set_defaults('foo')
self.assertTrue(obj.obj_attr_is_set('foo'))
self.assertEqual(1, obj.foo)
def test_set_defaults_no_default(self):
obj = MyObj()
self.assertRaises(exception.ObjectActionError,
obj.obj_set_defaults, 'bar')
def test_set_all_defaults(self):
obj = MyObj()
obj.obj_set_defaults()
self.assertEqual(set(['deleted', 'foo', 'mutable_default']),
obj.obj_what_changed())
self.assertEqual(1, obj.foo)
def test_set_defaults_not_overwrite(self):
# NOTE(danms): deleted defaults to False, so verify that it does
# not get reset by obj_set_defaults()
obj = MyObj(deleted=True)
obj.obj_set_defaults()
self.assertEqual(1, obj.foo)
self.assertTrue(obj.deleted)
class TestRemoteObject(_RemoteTest, _TestObject):
def test_major_version_mismatch(self):
MyObj2.VERSION = '2.0'
self.assertRaises(exception.IncompatibleObjectVersion,
MyObj2.query, self.context)
def test_minor_version_greater(self):
MyObj2.VERSION = '1.7'
self.assertRaises(exception.IncompatibleObjectVersion,
MyObj2.query, self.context)
def test_minor_version_less(self):
MyObj2.VERSION = '1.2'
obj = MyObj2.query(self.context)
self.assertEqual(obj.bar, 'bar')
def test_compat(self):
MyObj2.VERSION = '1.1'
obj = MyObj2.query(self.context)
self.assertEqual('oldbar', obj.bar)
def test_revision_ignored(self):
MyObj2.VERSION = '1.1.456'
obj = MyObj2.query(self.context)
self.assertEqual('bar', obj.bar)
class TestObjectSerializer(_BaseTestCase):
def test_serialize_entity_primitive(self):
ser = base.NovaObjectSerializer()
for thing in (1, 'foo', [1, 2], {'foo': 'bar'}):
self.assertEqual(thing, ser.serialize_entity(None, thing))
def test_deserialize_entity_primitive(self):
ser = base.NovaObjectSerializer()
for thing in (1, 'foo', [1, 2], {'foo': 'bar'}):
self.assertEqual(thing, ser.deserialize_entity(None, thing))
def test_serialize_set_to_list(self):
ser = base.NovaObjectSerializer()
self.assertEqual([1, 2], ser.serialize_entity(None, set([1, 2])))
def _test_deserialize_entity_newer(self, obj_version, backported_to,
my_version='1.6'):
ser = base.NovaObjectSerializer()
ser._conductor = mock.Mock()
ser._conductor.object_backport.return_value = 'backported'
class MyTestObj(MyObj):
VERSION = my_version
obj = MyTestObj()
obj.VERSION = obj_version
primitive = obj.obj_to_primitive()
result = ser.deserialize_entity(self.context, primitive)
if backported_to is None:
self.assertFalse(ser._conductor.object_backport.called)
else:
self.assertEqual('backported', result)
ser._conductor.object_backport.assert_called_with(self.context,
primitive,
backported_to)
def test_deserialize_entity_newer_version_backports(self):
self._test_deserialize_entity_newer('1.25', '1.6')
def test_deserialize_entity_newer_revision_does_not_backport_zero(self):
self._test_deserialize_entity_newer('1.6.0', None)
def test_deserialize_entity_newer_revision_does_not_backport(self):
self._test_deserialize_entity_newer('1.6.1', None)
def test_deserialize_entity_newer_version_passes_revision(self):
self._test_deserialize_entity_newer('1.7', '1.6.1', '1.6.1')
def test_deserialize_dot_z_with_extra_stuff(self):
primitive = {'nova_object.name': 'MyObj',
'nova_object.namespace': 'nova',
'nova_object.version': '1.6.1',
'nova_object.data': {
'foo': 1,
'unexpected_thing': 'foobar'}}
ser = base.NovaObjectSerializer()
obj = ser.deserialize_entity(self.context, primitive)
self.assertEqual(1, obj.foo)
self.assertFalse(hasattr(obj, 'unexpected_thing'))
# NOTE(danms): The serializer is where the logic lives that
# avoids backports for cases where only a .z difference in
# the received object version is detected. As a result, we
# end up with a version of what we expected, effectively the
# .0 of the object.
self.assertEqual('1.6', obj.VERSION)
def test_object_serialization(self):
ser = base.NovaObjectSerializer()
obj = MyObj()
primitive = ser.serialize_entity(self.context, obj)
self.assertIn('nova_object.name', primitive)
obj2 = ser.deserialize_entity(self.context, primitive)
self.assertIsInstance(obj2, MyObj)
self.assertEqual(self.context, obj2._context)
def test_object_serialization_iterables(self):
ser = base.NovaObjectSerializer()
obj = MyObj()
for iterable in (list, tuple, set):
thing = iterable([obj])
primitive = ser.serialize_entity(self.context, thing)
self.assertEqual(1, len(primitive))
for item in primitive:
self.assertNotIsInstance(item, base.NovaObject)
thing2 = ser.deserialize_entity(self.context, primitive)
self.assertEqual(1, len(thing2))
for item in thing2:
self.assertIsInstance(item, MyObj)
# dict case
thing = {'key': obj}
primitive = ser.serialize_entity(self.context, thing)
self.assertEqual(1, len(primitive))
for item in six.itervalues(primitive):
self.assertNotIsInstance(item, base.NovaObject)
thing2 = ser.deserialize_entity(self.context, primitive)
self.assertEqual(1, len(thing2))
for item in six.itervalues(thing2):
self.assertIsInstance(item, MyObj)
# object-action updates dict case
thing = {'foo': obj.obj_to_primitive()}
primitive = ser.serialize_entity(self.context, thing)
self.assertEqual(thing, primitive)
thing2 = ser.deserialize_entity(self.context, thing)
self.assertIsInstance(thing2['foo'], base.NovaObject)
class TestArgsSerializer(test.NoDBTestCase):
def setUp(self):
super(TestArgsSerializer, self).setUp()
self.now = timeutils.utcnow()
self.str_now = timeutils.strtime(at=self.now)
@base.serialize_args
def _test_serialize_args(self, *args, **kwargs):
expected_args = ('untouched', self.str_now, self.str_now)
for index, val in enumerate(args):
self.assertEqual(expected_args[index], val)
expected_kwargs = {'a': 'untouched', 'b': self.str_now,
'c': self.str_now}
for key, val in six.iteritems(kwargs):
self.assertEqual(expected_kwargs[key], val)
def test_serialize_args(self):
self._test_serialize_args('untouched', self.now, self.now,
a='untouched', b=self.now, c=self.now)
# NOTE(danms): The hashes in this list should only be changed if
# they come with a corresponding version bump in the affected
# objects
object_data = {
'Agent': '1.0-c0c092abaceb6f51efe5d82175f15eba',
'AgentList': '1.0-4f12bf96ca77315e7e023d588fb071f1',
'Aggregate': '1.1-1ab35c4516f71de0bef7087026ab10d1',
'AggregateList': '1.2-79689d69db4de545a82fe09f30468c53',
'BandwidthUsage': '1.2-c6e4c779c7f40f2407e3d70022e3cd1c',
'BandwidthUsageList': '1.2-77b4d43e641459f464a6aa4d53debd8f',
'BlockDeviceMapping': '1.9-72d92c263f03a5cbc1761b0ea4c66c22',
'BlockDeviceMappingList': '1.10-972d431e07463ae1f68e752521937b01',
'CellMapping': '1.0-7f1a7e85a22bbb7559fc730ab658b9bd',
'ComputeNode': '1.11-71784d2e6f2814ab467d4e0f69286843',
'ComputeNodeList': '1.11-8d269636229e8a39fef1c3514f77d0c0',
'DNSDomain': '1.0-7b0b2dab778454b6a7b6c66afe163a1a',
'DNSDomainList': '1.0-f876961b1a6afe400b49cf940671db86',
'EC2Ids': '1.0-474ee1094c7ec16f8ce657595d8c49d9',
'EC2InstanceMapping': '1.0-a4556eb5c5e94c045fe84f49cf71644f',
'EC2SnapshotMapping': '1.0-47e7ddabe1af966dce0cfd0ed6cd7cd1',
'EC2VolumeMapping': '1.0-5b713751d6f97bad620f3378a521020d',
'FixedIP': '1.10-b5818a33996228fc146f096d1403742c',
'FixedIPList': '1.10-d0db9597559409a4a01b3577500dfe5e',
'Flavor': '1.1-b6bb7a730a79d720344accefafacf7ee',
'FlavorList': '1.1-d96e87307f94062ce538f77b5e221e13',
'FloatingIP': '1.6-52a67d52d85eb8b3f324a5b7935a335b',
'FloatingIPList': '1.7-bdd31ccd6ff9bb0d290108397b3cd44c',
'HVSpec': '1.0-3999ff70698fc472c2d4d60359949f6b',
'ImageMeta': '1.1-642d1b2eb3e880a367f37d72dd76162d',
'ImageMetaProps': '1.1-8fe09b7872538f291649e77375f8ac4c',
'Instance': '1.20-260d385315d4868b6397c61a13109841',
'InstanceAction': '1.1-f9f293e526b66fca0d05c3b3a2d13914',
'InstanceActionEvent': '1.1-e56a64fa4710e43ef7af2ad9d6028b33',
'InstanceActionEventList': '1.0-c37db4e58b637a857c90fb02284d8f7c',
'InstanceActionList': '1.0-89266105d853ff9b8f83351776fab788',
'InstanceExternalEvent': '1.0-33cc4a1bbd0655f68c0ee791b95da7e6',
'InstanceFault': '1.2-7ef01f16f1084ad1304a513d6d410a38',
'InstanceFaultList': '1.1-ac4076924f7eb5374a92e4f9db7aa053',
'InstanceGroup': '1.9-a413a4ec0ff391e3ef0faa4e3e2a96d0',
'InstanceGroupList': '1.6-1e383df73d9bd224714df83d9a9983bb',
'InstanceInfoCache': '1.5-cd8b96fefe0fc8d4d337243ba0bf0e1e',
'InstanceList': '1.17-64f6949d58e4ecd3219142f1567a61d9',
'InstanceMapping': '1.0-47ef26034dfcbea78427565d9177fe50',
'InstanceMappingList': '1.0-b7b108f6a56bd100c20a3ebd5f3801a1',
'InstanceNUMACell': '1.2-535ef30e0de2d6a0d26a71bd58ecafc4',
'InstanceNUMATopology': '1.1-d944a7d6c21e1c773ffdf09c6d025954',
'InstancePCIRequest': '1.1-b1d75ebc716cb12906d9d513890092bf',
'InstancePCIRequests': '1.1-fc8d179960869c9af038205a80af2541',
'KeyPair': '1.3-bfaa2a8b148cdf11e0c72435d9dd097a',
'KeyPairList': '1.2-60f984184dc5a8eba6e34e20cbabef04',
'Migration': '1.2-331b1f37d0b20b932614181b9832c860',
'MigrationList': '1.2-5e79c0693d7ebe4e9ac03b5db11ab243',
'MyObj': '1.6-ee7b607402fbfb3390a92ab7199e0d88',
'MyOwnedObject': '1.0-fec853730bd02d54cc32771dd67f08a0',
'NUMACell': '1.2-74fc993ac5c83005e76e34e8487f1c05',
'NUMAPagesTopology': '1.0-c71d86317283266dc8364c149155e48e',
'NUMATopology': '1.2-c63fad38be73b6afd04715c9c1b29220',
'NUMATopologyLimits': '1.0-9463e0edd40f64765ae518a539b9dfd2',
'Network': '1.2-a977ab383aa462a479b2fae8211a5dde',
'NetworkList': '1.2-b2ae592657f06f6edce4c616821abcf8',
'NetworkRequest': '1.1-7a3e4ca2ce1e7b62d8400488f2f2b756',
'NetworkRequestList': '1.1-ea2a8e1c1ecf3608af2956e657adeb4c',
'PciDevice': '1.3-4d43db45e3978fca4280f696633c7c20',
'PciDeviceList': '1.1-2b8b6d0cf622c58543c5dec50c7e877c',
'PciDevicePool': '1.1-3f5ddc3ff7bfa14da7f6c7e9904cc000',
'PciDevicePoolList': '1.1-ea2a8e1c1ecf3608af2956e657adeb4c',
'Quotas': '1.2-1fe4cd50593aaf5d36a6dc5ab3f98fb3',
'QuotasNoOp': '1.2-e041ddeb7dc8188ca71706f78aad41c1',
'S3ImageMapping': '1.0-7dd7366a890d82660ed121de9092276e',
'SecurityGroup': '1.1-0e1b9ba42fe85c13c1437f8b74bdb976',
'SecurityGroupList': '1.0-a3bb51998e7d2a95b3e613111e853817',
'SecurityGroupRule': '1.1-ae1da17b79970012e8536f88cb3c6b29',
'SecurityGroupRuleList': '1.1-521f1aeb7b0cc00d026175509289d020',
'Service': '1.13-bc6c9671a91439e08224c2652da5fc4c',
'ServiceList': '1.11-d1728430a30700c143e542b7c75f65b0',
'Tag': '1.0-616bf44af4a22e853c17b37a758ec73e',
'TagList': '1.0-e16d65894484b7530b720792ffbbbd02',
'TestSubclassedObject': '1.6-716fc8b481c9374f7e222de03ba0a621',
'VirtCPUFeature': '1.0-3310718d8c72309259a6e39bdefe83ee',
'VirtCPUModel': '1.0-6a5cc9f322729fc70ddc6733bacd57d3',
'VirtCPUTopology': '1.0-fc694de72e20298f7c6bab1083fd4563',
'VirtualInterface': '1.0-19921e38cba320f355d56ecbf8f29587',
'VirtualInterfaceList': '1.0-16a5c18df5574a9405e1a8b350ed8b27',
}
object_relationships = {
'BlockDeviceMapping': {'Instance': '1.20'},
'ComputeNode': {'HVSpec': '1.0', 'PciDevicePoolList': '1.1'},
'FixedIP': {'Instance': '1.20', 'Network': '1.2',
'VirtualInterface': '1.0',
'FloatingIPList': '1.7'},
'FloatingIP': {'FixedIP': '1.10'},
'ImageMeta': {'ImageMetaProps': '1.1'},
'Instance': {'InstanceFault': '1.2',
'InstanceInfoCache': '1.5',
'InstanceNUMATopology': '1.1',
'PciDeviceList': '1.1',
'TagList': '1.0',
'SecurityGroupList': '1.0',
'Flavor': '1.1',
'InstancePCIRequests': '1.1',
'VirtCPUModel': '1.0',
'EC2Ids': '1.0',
},
'InstanceNUMACell': {'VirtCPUTopology': '1.0'},
'InstanceNUMATopology': {'InstanceNUMACell': '1.2'},
'InstancePCIRequests': {'InstancePCIRequest': '1.1'},
'MyObj': {'MyOwnedObject': '1.0'},
'NUMACell': {'NUMAPagesTopology': '1.0'},
'NUMATopology': {'NUMACell': '1.2'},
'SecurityGroupRule': {'SecurityGroup': '1.1'},
'Service': {'ComputeNode': '1.11'},
'TestSubclassedObject': {'MyOwnedObject': '1.0'},
'VirtCPUModel': {'VirtCPUFeature': '1.0', 'VirtCPUTopology': '1.0'},
}
class TestObjectVersions(test.NoDBTestCase):
def _find_remotable_method(self, cls, thing, parent_was_remotable=False):
"""Follow a chain of remotable things down to the original function."""
if isinstance(thing, classmethod):
return self._find_remotable_method(cls, thing.__get__(None, cls))
elif inspect.ismethod(thing) and hasattr(thing, 'remotable'):
return self._find_remotable_method(cls, thing.original_fn,
parent_was_remotable=True)
elif parent_was_remotable:
# We must be the first non-remotable thing underneath a stack of
# remotable things (i.e. the actual implementation method)
return thing
else:
# This means the top-level thing never hit a remotable layer
return None
def _get_fingerprint(self, obj_name):
obj_class = base.NovaObject._obj_classes[obj_name][0]
fields = obj_class.fields.items()
fields.sort()
methods = []
for name in dir(obj_class):
thing = getattr(obj_class, name)
if inspect.ismethod(thing) or isinstance(thing, classmethod):
method = self._find_remotable_method(obj_class, thing)
if method:
methods.append((name, inspect.getargspec(method)))
methods.sort()
# NOTE(danms): Things that need a version bump are any fields
# and their types, or the signatures of any remotable methods.
# Of course, these are just the mechanical changes we can detect,
# but many other things may require a version bump (method behavior
# and return value changes, for example).
if hasattr(obj_class, 'child_versions'):
relevant_data = (fields, methods,
OrderedDict(
sorted(obj_class.child_versions.items())))
else:
relevant_data = (fields, methods)
fingerprint = '%s-%s' % (obj_class.VERSION,
hashlib.md5(str(relevant_data)).hexdigest())
return fingerprint
def test_versions(self):
fingerprints = {}
for obj_name in base.NovaObject._obj_classes:
fingerprints[obj_name] = self._get_fingerprint(obj_name)
if os.getenv('GENERATE_HASHES'):
file('object_hashes.txt', 'w').write(
pprint.pformat(fingerprints))
raise test.TestingException(
'Generated hashes in object_hashes.txt')
stored = set(object_data.items())
computed = set(fingerprints.items())
changed = stored.symmetric_difference(computed)
expected = {}
actual = {}
for name, hash in changed:
expected[name] = object_data.get(name)
actual[name] = fingerprints.get(name)
self.assertEqual(expected, actual,
'Some objects have changed; please make sure the '
'versions have been bumped, and then update their '
'hashes here.')
def test_registry_matches_metaclass(self):
reference = set(object_data.keys())
actual = set(base.NovaObjectRegistry.classes)
test_objects = set(['MyObj', 'MyOwnedObject', 'TestSubclassedObject'])
# NOTE(danms): In the new registry, we don't implicitly track test
# objects, so make sure that the difference between the metaclass and
# the opt-in registry is the set of test objects.
self.assertEqual(test_objects, reference.symmetric_difference(actual))
def _get_object_field_name(self, field):
if isinstance(field._type, fields.Object):
return field._type._obj_name
if isinstance(field, fields.ListOfObjectsField):
return field._type._element_type._type._obj_name
return None
def _build_tree(self, tree, obj_class):
obj_name = obj_class.obj_name()
if obj_name in tree:
return
for name, field in obj_class.fields.items():
# Notes(yjiang5): ObjectListBase should be covered by
# child_versions test
if (issubclass(obj_class, base.ObjectListBase) and
name == 'objects'):
continue
sub_obj_name = self._get_object_field_name(field)
if sub_obj_name:
sub_obj_class = base.NovaObject._obj_classes[sub_obj_name][0]
self._build_tree(tree, sub_obj_class)
tree.setdefault(obj_name, {})
tree[obj_name][sub_obj_name] = sub_obj_class.VERSION
def test_relationships(self):
tree = {}
for obj_name in base.NovaObject._obj_classes.keys():
self._build_tree(tree, base.NovaObject._obj_classes[obj_name][0])
stored = set([(x, str(y)) for x, y in object_relationships.items()])
computed = set([(x, str(y)) for x, y in tree.items()])
changed = stored.symmetric_difference(computed)
expected = {}
actual = {}
for name, deps in changed:
expected[name] = object_relationships.get(name)
actual[name] = tree.get(name)
self.assertEqual(expected, actual,
'Some objects have changed dependencies. '
'Please make sure to bump the versions of '
'parent objects and provide a rule in their '
'obj_make_compatible() routines to backlevel '
'the child object.')
def test_obj_make_compatible(self):
# Iterate all object classes and verify that we can run
# obj_make_compatible with every older version than current.
# This doesn't actually test the data conversions, but it at least
# makes sure the method doesn't blow up on something basic like
# expecting the wrong version format.
for obj_name in base.NovaObject._obj_classes:
obj_class = base.NovaObject._obj_classes[obj_name][0]
version = utils.convert_version_to_tuple(obj_class.VERSION)
for n in range(version[1]):
test_version = '%d.%d' % (version[0], n)
LOG.info('testing obj: %s version: %s' %
(obj_name, test_version))
obj_class().obj_to_primitive(target_version=test_version)
def _get_obj_to_test(self, obj_class):
obj = obj_class()
for fname, ftype in obj.fields.items():
if isinstance(ftype, fields.ObjectField):
fobjname = ftype.AUTO_TYPE._obj_name
fobjcls = base.NovaObject._obj_classes[fobjname][0]
setattr(obj, fname, self._get_obj_to_test(fobjcls))
elif isinstance(ftype, fields.ListOfObjectsField):
# FIXME(danms): This will result in no tests for this
# field type...
setattr(obj, fname, [])
return obj
def _find_version_mapping(self, my_ver, versions):
closest = None
my_ver = utils.convert_version_to_tuple(my_ver)
for _my, _child in versions:
_my = utils.convert_version_to_tuple(_my)
_child = utils.convert_version_to_tuple(_child)
if _my == my_ver:
return '%s.%s' % _child
elif _my < my_ver:
closest = _child
if closest:
return '%s.%s' % closest
else:
return None
def _validate_object_fields(self, obj_class, primitive):
for fname, ftype in obj_class.fields.items():
if isinstance(ftype, fields.ObjectField):
exp_vers = obj_class.obj_relationships[fname]
exp_ver = self._find_version_mapping(
primitive['nova_object.version'], exp_vers)
if exp_ver is None:
self.assertNotIn(fname, primitive['nova_object.data'])
else:
child_p = primitive['nova_object.data'][fname]
self.assertEqual(exp_ver,
child_p['nova_object.version'])
def test_obj_make_compatible_with_data(self):
# Iterate all object classes and verify that we can run
# obj_make_compatible with every older version than current.
# This doesn't actually test the data conversions, but it at least
# makes sure the method doesn't blow up on something basic like
# expecting the wrong version format.
for obj_name in base.NovaObject._obj_classes:
obj_class = base.NovaObject._obj_classes[obj_name][0]
if 'tests.unit' in obj_class.__module__:
# NOTE(danms): Skip test objects. When we move to
# oslo.versionedobjects, we won't have to do this
continue
version = utils.convert_version_to_tuple(obj_class.VERSION)
for n in range(version[1]):
test_version = '%d.%d' % (version[0], n)
LOG.info('testing obj: %s version: %s' %
(obj_name, test_version))
test_object = self._get_obj_to_test(obj_class)
obj_p = test_object.obj_to_primitive(
target_version=test_version)
self._validate_object_fields(obj_class, obj_p)
def test_obj_relationships_in_order(self):
# Iterate all object classes and verify that we can run
# obj_make_compatible with every older version than current.
# This doesn't actually test the data conversions, but it at least
# makes sure the method doesn't blow up on something basic like
# expecting the wrong version format.
for obj_name in base.NovaObject._obj_classes:
obj_class = base.NovaObject._obj_classes[obj_name][0]
for field, versions in obj_class.obj_relationships.items():
last_my_version = (0, 0)
last_child_version = (0, 0)
for my_version, child_version in versions:
_my_version = utils.convert_version_to_tuple(my_version)
_ch_version = utils.convert_version_to_tuple(child_version)
self.assertTrue((last_my_version < _my_version
and last_child_version <= _ch_version),
'Object %s relationship '
'%s->%s for field %s is out of order' % (
obj_name, my_version, child_version,
field))
last_my_version = _my_version
last_child_version = _ch_version
class TestObjEqualPrims(test.NoDBTestCase):
def test_object_equal(self):
obj1 = MyObj(foo=1, bar='goodbye')
obj1.obj_reset_changes()
obj2 = MyObj(foo=1, bar='goodbye')
obj2.obj_reset_changes()
obj2.bar = 'goodbye'
# obj2 will be marked with field 'three' updated
self.assertTrue(base.obj_equal_prims(obj1, obj2),
"Objects that differ only because one a is marked "
"as updated should be equal")
def test_object_not_equal(self):
obj1 = MyObj(foo=1, bar='goodbye')
obj1.obj_reset_changes()
obj2 = MyObj(foo=1, bar='hello')
obj2.obj_reset_changes()
self.assertFalse(base.obj_equal_prims(obj1, obj2),
"Objects that differ in any field "
"should not be equal")
def test_object_ignore_equal(self):
obj1 = MyObj(foo=1, bar='goodbye')
obj1.obj_reset_changes()
obj2 = MyObj(foo=1, bar='hello')
obj2.obj_reset_changes()
self.assertTrue(base.obj_equal_prims(obj1, obj2, ['bar']),
"Objects that only differ in an ignored field "
"should be equal")
| apache-2.0 |
RachitKansal/scikit-learn | examples/mixture/plot_gmm_classifier.py | 250 | 3918 | """
==================
GMM classification
==================
Demonstration of Gaussian mixture models for classification.
See :ref:`gmm` for more information on the estimator.
Plots predicted labels on both training and held out test data using a
variety of GMM classifiers on the iris dataset.
Compares GMMs with spherical, diagonal, full, and tied covariance
matrices in increasing order of performance. Although one would
expect full covariance to perform best in general, it is prone to
overfitting on small datasets and does not generalize well to held out
test data.
On the plots, train data is shown as dots, while test data is shown as
crosses. The iris dataset is four-dimensional. Only the first two
dimensions are shown here, and thus some points are separated in other
dimensions.
"""
print(__doc__)
# Author: Ron Weiss <[email protected]>, Gael Varoquaux
# License: BSD 3 clause
# $Id$
import matplotlib.pyplot as plt
import matplotlib as mpl
import numpy as np
from sklearn import datasets
from sklearn.cross_validation import StratifiedKFold
from sklearn.externals.six.moves import xrange
from sklearn.mixture import GMM
def make_ellipses(gmm, ax):
for n, color in enumerate('rgb'):
v, w = np.linalg.eigh(gmm._get_covars()[n][:2, :2])
u = w[0] / np.linalg.norm(w[0])
angle = np.arctan2(u[1], u[0])
angle = 180 * angle / np.pi # convert to degrees
v *= 9
ell = mpl.patches.Ellipse(gmm.means_[n, :2], v[0], v[1],
180 + angle, color=color)
ell.set_clip_box(ax.bbox)
ell.set_alpha(0.5)
ax.add_artist(ell)
iris = datasets.load_iris()
# Break up the dataset into non-overlapping training (75%) and testing
# (25%) sets.
skf = StratifiedKFold(iris.target, n_folds=4)
# Only take the first fold.
train_index, test_index = next(iter(skf))
X_train = iris.data[train_index]
y_train = iris.target[train_index]
X_test = iris.data[test_index]
y_test = iris.target[test_index]
n_classes = len(np.unique(y_train))
# Try GMMs using different types of covariances.
classifiers = dict((covar_type, GMM(n_components=n_classes,
covariance_type=covar_type, init_params='wc', n_iter=20))
for covar_type in ['spherical', 'diag', 'tied', 'full'])
n_classifiers = len(classifiers)
plt.figure(figsize=(3 * n_classifiers / 2, 6))
plt.subplots_adjust(bottom=.01, top=0.95, hspace=.15, wspace=.05,
left=.01, right=.99)
for index, (name, classifier) in enumerate(classifiers.items()):
# Since we have class labels for the training data, we can
# initialize the GMM parameters in a supervised manner.
classifier.means_ = np.array([X_train[y_train == i].mean(axis=0)
for i in xrange(n_classes)])
# Train the other parameters using the EM algorithm.
classifier.fit(X_train)
h = plt.subplot(2, n_classifiers / 2, index + 1)
make_ellipses(classifier, h)
for n, color in enumerate('rgb'):
data = iris.data[iris.target == n]
plt.scatter(data[:, 0], data[:, 1], 0.8, color=color,
label=iris.target_names[n])
# Plot the test data with crosses
for n, color in enumerate('rgb'):
data = X_test[y_test == n]
plt.plot(data[:, 0], data[:, 1], 'x', color=color)
y_train_pred = classifier.predict(X_train)
train_accuracy = np.mean(y_train_pred.ravel() == y_train.ravel()) * 100
plt.text(0.05, 0.9, 'Train accuracy: %.1f' % train_accuracy,
transform=h.transAxes)
y_test_pred = classifier.predict(X_test)
test_accuracy = np.mean(y_test_pred.ravel() == y_test.ravel()) * 100
plt.text(0.05, 0.8, 'Test accuracy: %.1f' % test_accuracy,
transform=h.transAxes)
plt.xticks(())
plt.yticks(())
plt.title(name)
plt.legend(loc='lower right', prop=dict(size=12))
plt.show()
| bsd-3-clause |
jboeuf/grpc | src/python/grpcio_testing/grpc_testing/_common.py | 27 | 4481 | # Copyright 2017 gRPC authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Common interfaces and implementation."""
import abc
import collections
import six
def _fuss(tuplified_metadata):
return tuplified_metadata + ((
'grpc.metadata_added_by_runtime',
'gRPC is allowed to add metadata in transmission and does so.',
),)
FUSSED_EMPTY_METADATA = _fuss(())
def fuss_with_metadata(metadata):
if metadata is None:
return FUSSED_EMPTY_METADATA
else:
return _fuss(tuple(metadata))
def rpc_names(service_descriptors):
rpc_names_to_descriptors = {}
for service_descriptor in service_descriptors:
for method_descriptor in service_descriptor.methods_by_name.values():
rpc_name = '/{}/{}'.format(service_descriptor.full_name,
method_descriptor.name)
rpc_names_to_descriptors[rpc_name] = method_descriptor
return rpc_names_to_descriptors
class ChannelRpcRead(
collections.namedtuple('ChannelRpcRead', (
'response',
'trailing_metadata',
'code',
'details',
))):
pass
class ChannelRpcHandler(six.with_metaclass(abc.ABCMeta)):
@abc.abstractmethod
def initial_metadata(self):
raise NotImplementedError()
@abc.abstractmethod
def add_request(self, request):
raise NotImplementedError()
@abc.abstractmethod
def close_requests(self):
raise NotImplementedError()
@abc.abstractmethod
def take_response(self):
raise NotImplementedError()
@abc.abstractmethod
def cancel(self, code, details):
raise NotImplementedError()
@abc.abstractmethod
def termination(self):
raise NotImplementedError()
@abc.abstractmethod
def is_active(self):
raise NotImplementedError()
@abc.abstractmethod
def time_remaining(self):
raise NotImplementedError()
@abc.abstractmethod
def add_callback(self, callback):
raise NotImplementedError()
class ChannelHandler(six.with_metaclass(abc.ABCMeta)):
@abc.abstractmethod
def invoke_rpc(self, method_full_rpc_name, invocation_metadata, requests,
requests_closed, timeout):
raise NotImplementedError()
class ServerRpcRead(
collections.namedtuple('ServerRpcRead', (
'request',
'requests_closed',
'terminated',
))):
pass
REQUESTS_CLOSED = ServerRpcRead(None, True, False)
TERMINATED = ServerRpcRead(None, False, True)
class ServerRpcHandler(six.with_metaclass(abc.ABCMeta)):
@abc.abstractmethod
def send_initial_metadata(self, initial_metadata):
raise NotImplementedError()
@abc.abstractmethod
def take_request(self):
raise NotImplementedError()
@abc.abstractmethod
def add_response(self, response):
raise NotImplementedError()
@abc.abstractmethod
def send_termination(self, trailing_metadata, code, details):
raise NotImplementedError()
@abc.abstractmethod
def add_termination_callback(self, callback):
raise NotImplementedError()
class Serverish(six.with_metaclass(abc.ABCMeta)):
@abc.abstractmethod
def invoke_unary_unary(self, method_descriptor, handler,
invocation_metadata, request, deadline):
raise NotImplementedError()
@abc.abstractmethod
def invoke_unary_stream(self, method_descriptor, handler,
invocation_metadata, request, deadline):
raise NotImplementedError()
@abc.abstractmethod
def invoke_stream_unary(self, method_descriptor, handler,
invocation_metadata, deadline):
raise NotImplementedError()
@abc.abstractmethod
def invoke_stream_stream(self, method_descriptor, handler,
invocation_metadata, deadline):
raise NotImplementedError()
| apache-2.0 |
amith01994/intellij-community | python/lib/Lib/site-packages/django/contrib/contenttypes/tests.py | 87 | 2781 | from django import db
from django.conf import settings
from django.contrib.contenttypes.models import ContentType
from django.contrib.sites.models import Site
from django.contrib.contenttypes.views import shortcut
from django.core.exceptions import ObjectDoesNotExist
from django.http import HttpRequest
from django.test import TestCase
class ContentTypesTests(TestCase):
def setUp(self):
# First, let's make sure we're dealing with a blank slate (and that
# DEBUG is on so that queries get logged)
self.old_DEBUG = settings.DEBUG
self.old_Site_meta_installed = Site._meta.installed
settings.DEBUG = True
ContentType.objects.clear_cache()
db.reset_queries()
def tearDown(self):
settings.DEBUG = self.old_DEBUG
Site._meta.installed = self.old_Site_meta_installed
def test_lookup_cache(self):
"""
Make sure that the content type cache (see ContentTypeManager)
works correctly. Lookups for a particular content type -- by model or
by ID -- should hit the database only on the first lookup.
"""
# At this point, a lookup for a ContentType should hit the DB
ContentType.objects.get_for_model(ContentType)
self.assertEqual(1, len(db.connection.queries))
# A second hit, though, won't hit the DB, nor will a lookup by ID
ct = ContentType.objects.get_for_model(ContentType)
self.assertEqual(1, len(db.connection.queries))
ContentType.objects.get_for_id(ct.id)
self.assertEqual(1, len(db.connection.queries))
# Once we clear the cache, another lookup will again hit the DB
ContentType.objects.clear_cache()
ContentType.objects.get_for_model(ContentType)
len(db.connection.queries)
self.assertEqual(2, len(db.connection.queries))
def test_shortcut_view(self):
"""
Check that the shortcut view (used for the admin "view on site"
functionality) returns a complete URL regardless of whether the sites
framework is installed
"""
request = HttpRequest()
request.META = {
"SERVER_NAME": "Example.com",
"SERVER_PORT": "80",
}
from django.contrib.auth.models import User
user_ct = ContentType.objects.get_for_model(User)
obj = User.objects.create(username="john")
Site._meta.installed = True
response = shortcut(request, user_ct.id, obj.id)
self.assertEqual("http://example.com/users/john/", response._headers.get("location")[1])
Site._meta.installed = False
response = shortcut(request, user_ct.id, obj.id)
self.assertEqual("http://Example.com/users/john/", response._headers.get("location")[1])
| apache-2.0 |
nelson-liu/scikit-learn | sklearn/cluster/affinity_propagation_.py | 30 | 10689 | """Affinity Propagation clustering algorithm."""
# Author: Alexandre Gramfort [email protected]
# Gael Varoquaux [email protected]
# License: BSD 3 clause
import numpy as np
from ..base import BaseEstimator, ClusterMixin
from ..utils import as_float_array, check_array
from ..utils.validation import check_is_fitted
from ..metrics import euclidean_distances
from ..metrics import pairwise_distances_argmin
def affinity_propagation(S, preference=None, convergence_iter=15, max_iter=200,
damping=0.5, copy=True, verbose=False,
return_n_iter=False):
"""Perform Affinity Propagation Clustering of data
Read more in the :ref:`User Guide <affinity_propagation>`.
Parameters
----------
S : array-like, shape (n_samples, n_samples)
Matrix of similarities between points
preference : array-like, shape (n_samples,) or float, optional
Preferences for each point - points with larger values of
preferences are more likely to be chosen as exemplars. The number of
exemplars, i.e. of clusters, is influenced by the input preferences
value. If the preferences are not passed as arguments, they will be
set to the median of the input similarities (resulting in a moderate
number of clusters). For a smaller amount of clusters, this can be set
to the minimum value of the similarities.
convergence_iter : int, optional, default: 15
Number of iterations with no change in the number
of estimated clusters that stops the convergence.
max_iter : int, optional, default: 200
Maximum number of iterations
damping : float, optional, default: 0.5
Damping factor between 0.5 and 1.
copy : boolean, optional, default: True
If copy is False, the affinity matrix is modified inplace by the
algorithm, for memory efficiency
verbose : boolean, optional, default: False
The verbosity level
return_n_iter : bool, default False
Whether or not to return the number of iterations.
Returns
-------
cluster_centers_indices : array, shape (n_clusters,)
index of clusters centers
labels : array, shape (n_samples,)
cluster labels for each point
n_iter : int
number of iterations run. Returned only if `return_n_iter` is
set to True.
Notes
-----
See examples/cluster/plot_affinity_propagation.py for an example.
References
----------
Brendan J. Frey and Delbert Dueck, "Clustering by Passing Messages
Between Data Points", Science Feb. 2007
"""
S = as_float_array(S, copy=copy)
n_samples = S.shape[0]
if S.shape[0] != S.shape[1]:
raise ValueError("S must be a square array (shape=%s)" % repr(S.shape))
if preference is None:
preference = np.median(S)
if damping < 0.5 or damping >= 1:
raise ValueError('damping must be >= 0.5 and < 1')
random_state = np.random.RandomState(0)
# Place preference on the diagonal of S
S.flat[::(n_samples + 1)] = preference
A = np.zeros((n_samples, n_samples))
R = np.zeros((n_samples, n_samples)) # Initialize messages
# Intermediate results
tmp = np.zeros((n_samples, n_samples))
# Remove degeneracies
S += ((np.finfo(np.double).eps * S + np.finfo(np.double).tiny * 100) *
random_state.randn(n_samples, n_samples))
# Execute parallel affinity propagation updates
e = np.zeros((n_samples, convergence_iter))
ind = np.arange(n_samples)
for it in range(max_iter):
# tmp = A + S; compute responsibilities
np.add(A, S, tmp)
I = np.argmax(tmp, axis=1)
Y = tmp[ind, I] # np.max(A + S, axis=1)
tmp[ind, I] = -np.inf
Y2 = np.max(tmp, axis=1)
# tmp = Rnew
np.subtract(S, Y[:, None], tmp)
tmp[ind, I] = S[ind, I] - Y2
# Damping
tmp *= 1 - damping
R *= damping
R += tmp
# tmp = Rp; compute availabilities
np.maximum(R, 0, tmp)
tmp.flat[::n_samples + 1] = R.flat[::n_samples + 1]
# tmp = -Anew
tmp -= np.sum(tmp, axis=0)
dA = np.diag(tmp).copy()
tmp.clip(0, np.inf, tmp)
tmp.flat[::n_samples + 1] = dA
# Damping
tmp *= 1 - damping
A *= damping
A -= tmp
# Check for convergence
E = (np.diag(A) + np.diag(R)) > 0
e[:, it % convergence_iter] = E
K = np.sum(E, axis=0)
if it >= convergence_iter:
se = np.sum(e, axis=1)
unconverged = (np.sum((se == convergence_iter) + (se == 0))
!= n_samples)
if (not unconverged and (K > 0)) or (it == max_iter):
if verbose:
print("Converged after %d iterations." % it)
break
else:
if verbose:
print("Did not converge")
I = np.where(np.diag(A + R) > 0)[0]
K = I.size # Identify exemplars
if K > 0:
c = np.argmax(S[:, I], axis=1)
c[I] = np.arange(K) # Identify clusters
# Refine the final set of exemplars and clusters and return results
for k in range(K):
ii = np.where(c == k)[0]
j = np.argmax(np.sum(S[ii[:, np.newaxis], ii], axis=0))
I[k] = ii[j]
c = np.argmax(S[:, I], axis=1)
c[I] = np.arange(K)
labels = I[c]
# Reduce labels to a sorted, gapless, list
cluster_centers_indices = np.unique(labels)
labels = np.searchsorted(cluster_centers_indices, labels)
else:
labels = np.empty((n_samples, 1))
cluster_centers_indices = None
labels.fill(np.nan)
if return_n_iter:
return cluster_centers_indices, labels, it + 1
else:
return cluster_centers_indices, labels
###############################################################################
class AffinityPropagation(BaseEstimator, ClusterMixin):
"""Perform Affinity Propagation Clustering of data.
Read more in the :ref:`User Guide <affinity_propagation>`.
Parameters
----------
damping : float, optional, default: 0.5
Damping factor between 0.5 and 1.
convergence_iter : int, optional, default: 15
Number of iterations with no change in the number
of estimated clusters that stops the convergence.
max_iter : int, optional, default: 200
Maximum number of iterations.
copy : boolean, optional, default: True
Make a copy of input data.
preference : array-like, shape (n_samples,) or float, optional
Preferences for each point - points with larger values of
preferences are more likely to be chosen as exemplars. The number
of exemplars, ie of clusters, is influenced by the input
preferences value. If the preferences are not passed as arguments,
they will be set to the median of the input similarities.
affinity : string, optional, default=``euclidean``
Which affinity to use. At the moment ``precomputed`` and
``euclidean`` are supported. ``euclidean`` uses the
negative squared euclidean distance between points.
verbose : boolean, optional, default: False
Whether to be verbose.
Attributes
----------
cluster_centers_indices_ : array, shape (n_clusters,)
Indices of cluster centers
cluster_centers_ : array, shape (n_clusters, n_features)
Cluster centers (if affinity != ``precomputed``).
labels_ : array, shape (n_samples,)
Labels of each point
affinity_matrix_ : array, shape (n_samples, n_samples)
Stores the affinity matrix used in ``fit``.
n_iter_ : int
Number of iterations taken to converge.
Notes
-----
See examples/cluster/plot_affinity_propagation.py for an example.
The algorithmic complexity of affinity propagation is quadratic
in the number of points.
References
----------
Brendan J. Frey and Delbert Dueck, "Clustering by Passing Messages
Between Data Points", Science Feb. 2007
"""
def __init__(self, damping=.5, max_iter=200, convergence_iter=15,
copy=True, preference=None, affinity='euclidean',
verbose=False):
self.damping = damping
self.max_iter = max_iter
self.convergence_iter = convergence_iter
self.copy = copy
self.verbose = verbose
self.preference = preference
self.affinity = affinity
@property
def _pairwise(self):
return self.affinity == "precomputed"
def fit(self, X, y=None):
""" Create affinity matrix from negative euclidean distances, then
apply affinity propagation clustering.
Parameters
----------
X : array-like, shape (n_samples, n_features) or (n_samples, n_samples)
Data matrix or, if affinity is ``precomputed``, matrix of
similarities / affinities.
"""
X = check_array(X, accept_sparse='csr')
if self.affinity == "precomputed":
self.affinity_matrix_ = X
elif self.affinity == "euclidean":
self.affinity_matrix_ = -euclidean_distances(X, squared=True)
else:
raise ValueError("Affinity must be 'precomputed' or "
"'euclidean'. Got %s instead"
% str(self.affinity))
self.cluster_centers_indices_, self.labels_, self.n_iter_ = \
affinity_propagation(
self.affinity_matrix_, self.preference, max_iter=self.max_iter,
convergence_iter=self.convergence_iter, damping=self.damping,
copy=self.copy, verbose=self.verbose, return_n_iter=True)
if self.affinity != "precomputed":
self.cluster_centers_ = X[self.cluster_centers_indices_].copy()
return self
def predict(self, X):
"""Predict the closest cluster each sample in X belongs to.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
New data to predict.
Returns
-------
labels : array, shape (n_samples,)
Index of the cluster each sample belongs to.
"""
check_is_fitted(self, "cluster_centers_indices_")
if not hasattr(self, "cluster_centers_"):
raise ValueError("Predict method is not supported when "
"affinity='precomputed'.")
return pairwise_distances_argmin(X, self.cluster_centers_)
| bsd-3-clause |
ztemt/NX505J_5.1_kernel | scripts/build-all.py | 1474 | 10189 | #! /usr/bin/env python
# Copyright (c) 2009-2013, The Linux Foundation. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of The Linux Foundation nor
# the names of its contributors may be used to endorse or promote
# products derived from this software without specific prior written
# permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NON-INFRINGEMENT ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
# OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
# WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
# OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
# ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
# Build the kernel for all targets using the Android build environment.
#
# TODO: Accept arguments to indicate what to build.
import glob
from optparse import OptionParser
import subprocess
import os
import os.path
import re
import shutil
import sys
version = 'build-all.py, version 0.01'
build_dir = '../all-kernels'
make_command = ["vmlinux", "modules", "dtbs"]
make_env = os.environ
make_env.update({
'ARCH': 'arm',
'KCONFIG_NOTIMESTAMP': 'true' })
make_env.setdefault('CROSS_COMPILE', 'arm-none-linux-gnueabi-')
all_options = {}
def error(msg):
sys.stderr.write("error: %s\n" % msg)
def fail(msg):
"""Fail with a user-printed message"""
error(msg)
sys.exit(1)
def check_kernel():
"""Ensure that PWD is a kernel directory"""
if (not os.path.isfile('MAINTAINERS') or
not os.path.isfile('arch/arm/mach-msm/Kconfig')):
fail("This doesn't seem to be an MSM kernel dir")
def check_build():
"""Ensure that the build directory is present."""
if not os.path.isdir(build_dir):
try:
os.makedirs(build_dir)
except OSError as exc:
if exc.errno == errno.EEXIST:
pass
else:
raise
def update_config(file, str):
print 'Updating %s with \'%s\'\n' % (file, str)
defconfig = open(file, 'a')
defconfig.write(str + '\n')
defconfig.close()
def scan_configs():
"""Get the full list of defconfigs appropriate for this tree."""
names = {}
arch_pats = (
r'[fm]sm[0-9]*_defconfig',
r'apq*_defconfig',
r'qsd*_defconfig',
r'msmkrypton*_defconfig',
)
for p in arch_pats:
for n in glob.glob('arch/arm/configs/' + p):
names[os.path.basename(n)[:-10]] = n
return names
class Builder:
def __init__(self, logname):
self.logname = logname
self.fd = open(logname, 'w')
def run(self, args):
devnull = open('/dev/null', 'r')
proc = subprocess.Popen(args, stdin=devnull,
env=make_env,
bufsize=0,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT)
count = 0
# for line in proc.stdout:
rawfd = proc.stdout.fileno()
while True:
line = os.read(rawfd, 1024)
if not line:
break
self.fd.write(line)
self.fd.flush()
if all_options.verbose:
sys.stdout.write(line)
sys.stdout.flush()
else:
for i in range(line.count('\n')):
count += 1
if count == 64:
count = 0
print
sys.stdout.write('.')
sys.stdout.flush()
print
result = proc.wait()
self.fd.close()
return result
failed_targets = []
def build(target):
dest_dir = os.path.join(build_dir, target)
log_name = '%s/log-%s.log' % (build_dir, target)
print 'Building %s in %s log %s' % (target, dest_dir, log_name)
if not os.path.isdir(dest_dir):
os.mkdir(dest_dir)
defconfig = 'arch/arm/configs/%s_defconfig' % target
dotconfig = '%s/.config' % dest_dir
savedefconfig = '%s/defconfig' % dest_dir
shutil.copyfile(defconfig, dotconfig)
staging_dir = 'install_staging'
modi_dir = '%s' % staging_dir
hdri_dir = '%s/usr' % staging_dir
shutil.rmtree(os.path.join(dest_dir, staging_dir), ignore_errors=True)
devnull = open('/dev/null', 'r')
subprocess.check_call(['make', 'O=%s' % dest_dir,
'%s_defconfig' % target], env=make_env, stdin=devnull)
devnull.close()
if not all_options.updateconfigs:
# Build targets can be dependent upon the completion of previous
# build targets, so build them one at a time.
cmd_line = ['make',
'INSTALL_HDR_PATH=%s' % hdri_dir,
'INSTALL_MOD_PATH=%s' % modi_dir,
'O=%s' % dest_dir]
build_targets = []
for c in make_command:
if re.match(r'^-{1,2}\w', c):
cmd_line.append(c)
else:
build_targets.append(c)
for t in build_targets:
build = Builder(log_name)
result = build.run(cmd_line + [t])
if result != 0:
if all_options.keep_going:
failed_targets.append(target)
fail_or_error = error
else:
fail_or_error = fail
fail_or_error("Failed to build %s, see %s" %
(target, build.logname))
# Copy the defconfig back.
if all_options.configs or all_options.updateconfigs:
devnull = open('/dev/null', 'r')
subprocess.check_call(['make', 'O=%s' % dest_dir,
'savedefconfig'], env=make_env, stdin=devnull)
devnull.close()
shutil.copyfile(savedefconfig, defconfig)
def build_many(allconf, targets):
print "Building %d target(s)" % len(targets)
for target in targets:
if all_options.updateconfigs:
update_config(allconf[target], all_options.updateconfigs)
build(target)
if failed_targets:
fail('\n '.join(["Failed targets:"] +
[target for target in failed_targets]))
def main():
global make_command
check_kernel()
check_build()
configs = scan_configs()
usage = ("""
%prog [options] all -- Build all targets
%prog [options] target target ... -- List specific targets
%prog [options] perf -- Build all perf targets
%prog [options] noperf -- Build all non-perf targets""")
parser = OptionParser(usage=usage, version=version)
parser.add_option('--configs', action='store_true',
dest='configs',
help="Copy configs back into tree")
parser.add_option('--list', action='store_true',
dest='list',
help='List available targets')
parser.add_option('-v', '--verbose', action='store_true',
dest='verbose',
help='Output to stdout in addition to log file')
parser.add_option('--oldconfig', action='store_true',
dest='oldconfig',
help='Only process "make oldconfig"')
parser.add_option('--updateconfigs',
dest='updateconfigs',
help="Update defconfigs with provided option setting, "
"e.g. --updateconfigs=\'CONFIG_USE_THING=y\'")
parser.add_option('-j', '--jobs', type='int', dest="jobs",
help="Number of simultaneous jobs")
parser.add_option('-l', '--load-average', type='int',
dest='load_average',
help="Don't start multiple jobs unless load is below LOAD_AVERAGE")
parser.add_option('-k', '--keep-going', action='store_true',
dest='keep_going', default=False,
help="Keep building other targets if a target fails")
parser.add_option('-m', '--make-target', action='append',
help='Build the indicated make target (default: %s)' %
' '.join(make_command))
(options, args) = parser.parse_args()
global all_options
all_options = options
if options.list:
print "Available targets:"
for target in configs.keys():
print " %s" % target
sys.exit(0)
if options.oldconfig:
make_command = ["oldconfig"]
elif options.make_target:
make_command = options.make_target
if options.jobs:
make_command.append("-j%d" % options.jobs)
if options.load_average:
make_command.append("-l%d" % options.load_average)
if args == ['all']:
build_many(configs, configs.keys())
elif args == ['perf']:
targets = []
for t in configs.keys():
if "perf" in t:
targets.append(t)
build_many(configs, targets)
elif args == ['noperf']:
targets = []
for t in configs.keys():
if "perf" not in t:
targets.append(t)
build_many(configs, targets)
elif len(args) > 0:
targets = []
for t in args:
if t not in configs.keys():
parser.error("Target '%s' not one of %s" % (t, configs.keys()))
targets.append(t)
build_many(configs, targets)
else:
parser.error("Must specify a target to build, or 'all'")
if __name__ == "__main__":
main()
| gpl-2.0 |
steynovich/ansible-modules-extras | cloud/vmware/vmware_vsan_cluster.py | 64 | 3944 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# (c) 2015, Russell Teague <rteague2 () csc.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
DOCUMENTATION = '''
---
module: vmware_vsan_cluster
short_description: Configure VSAN clustering on an ESXi host
description:
- This module can be used to configure VSAN clustering on an ESXi host
version_added: 2.0
author: "Russell Teague (@mtnbikenc)"
notes:
- Tested on vSphere 5.5
requirements:
- "python >= 2.6"
- PyVmomi
options:
cluster_uuid:
description:
- Desired cluster UUID
required: False
extends_documentation_fragment: vmware.documentation
'''
EXAMPLES = '''
# Example command from Ansible Playbook
- name: Configure VMware VSAN Cluster
hosts: deploy_node
gather_facts: False
tags:
- vsan
tasks:
- name: Configure VSAN on first host
vmware_vsan_cluster:
hostname: "{{ groups['esxi'][0] }}"
username: "{{ esxi_username }}"
password: "{{ site_password }}"
register: vsan_cluster
- name: Configure VSAN on remaining hosts
vmware_vsan_cluster:
hostname: "{{ item }}"
username: "{{ esxi_username }}"
password: "{{ site_password }}"
cluster_uuid: "{{ vsan_cluster.cluster_uuid }}"
with_items: groups['esxi'][1:]
'''
try:
from pyVmomi import vim, vmodl
HAS_PYVMOMI = True
except ImportError:
HAS_PYVMOMI = False
def create_vsan_cluster(host_system, new_cluster_uuid):
host_config_manager = host_system.configManager
vsan_system = host_config_manager.vsanSystem
vsan_config = vim.vsan.host.ConfigInfo()
vsan_config.enabled = True
if new_cluster_uuid is not None:
vsan_config.clusterInfo = vim.vsan.host.ConfigInfo.ClusterInfo()
vsan_config.clusterInfo.uuid = new_cluster_uuid
vsan_config.storageInfo = vim.vsan.host.ConfigInfo.StorageInfo()
vsan_config.storageInfo.autoClaimStorage = True
task = vsan_system.UpdateVsan_Task(vsan_config)
changed, result = wait_for_task(task)
host_status = vsan_system.QueryHostStatus()
cluster_uuid = host_status.uuid
return changed, result, cluster_uuid
def main():
argument_spec = vmware_argument_spec()
argument_spec.update(dict(cluster_uuid=dict(required=False, type='str')))
module = AnsibleModule(argument_spec=argument_spec, supports_check_mode=False)
if not HAS_PYVMOMI:
module.fail_json(msg='pyvmomi is required for this module')
new_cluster_uuid = module.params['cluster_uuid']
try:
content = connect_to_api(module, False)
host = get_all_objs(content, [vim.HostSystem])
if not host:
module.fail_json(msg="Unable to locate Physical Host.")
host_system = host.keys()[0]
changed, result, cluster_uuid = create_vsan_cluster(host_system, new_cluster_uuid)
module.exit_json(changed=changed, result=result, cluster_uuid=cluster_uuid)
except vmodl.RuntimeFault as runtime_fault:
module.fail_json(msg=runtime_fault.msg)
except vmodl.MethodFault as method_fault:
module.fail_json(msg=method_fault.msg)
except Exception as e:
module.fail_json(msg=str(e))
from ansible.module_utils.vmware import *
from ansible.module_utils.basic import *
if __name__ == '__main__':
main()
| gpl-3.0 |
PictureYo-self/Picture-Yo-self | code/colorpicker10.py | 6 | 2691 | #// screen manager imported from http://kivy.org/docs/api-kivy.uix.screenmanager.html
from kivy.app import App
from kivy.uix.floatlayout import FloatLayout
from kivy.uix.boxlayout import BoxLayout
from kivy.lang import Builder
from random import random
from kivy.uix.widget import Widget
from kivy.graphics import Color, Rectangle
from kivy.uix.button import Button
from kivy.graphics import Color, Ellipse, Line
from kivy.uix.image import Image
import sys
from kivy.clock import Clock
f = open('/home/pi/Picture-Yo-self/code/pictures/picName.txt','r')
picname = f.read()
f.close()
print picname
f = open('/home/pi/Picture-Yo-self/code/pictures/email.txt','r')
email = f.read()
f.close()
email = '/home/pi/Picture-Yo-self/code/pictures/' + email + '.png'
f = open('/home/pi/Picture-Yo-self/code/college.txt','r')
col = f.readline().strip()
f.close()
college = '/home/pi/Picture-Yo-self/code/pictures/' + col
print col
#college = '/home/pi/Picture-Yo-self/code/pictures/Jones.jpg'#' + col + '.jpg'
print college
#reload(sys)
class MyPaintWidget(Widget):
def on_touch_down(self, touch):
color = (random(), 1, 1)
with self.canvas:
Color(*color, mode='hsv')
touch.ud['line'] = Line(points=(touch.x, touch.y), width=3)
def on_touch_move(self, touch):
touch.ud['line'].points += [touch.x, touch.y]
class MainApp(App):
im=Image(source=picname, size_hint=(1,50))
crest=Image(source=college, size_hint=(25,25))#, pos=(1,1))
def build(self):
root = BoxLayout(orientation='vertical')
parent = BoxLayout(orientation='horizontal')
painter = MyPaintWidget()
crestwid = BoxLayout(orientation='horizontal')
# create clear button
clearbtn = Button(text='Clear', size_hint=(1,5))
parent.add_widget(clearbtn)
def clear_canvas(obj):
painter.canvas.clear()
clearbtn.bind(on_release=clear_canvas)
# create retake photo button
retakebtn = Button(text='Retake Photo', size_hint=(1,5))
parent.add_widget(retakebtn)
def retake_pic(obj):
execfile("momocapture.py")
self.im.reload()
painter.canvas.clear()
retakebtn.bind(on_release=retake_pic)
# create save button
savebtn = Button(text='Save and send to email', size_hint=(1,5))
parent.add_widget(savebtn)
def save_pic(obj):
parent.remove_widget(savebtn)
parent.remove_widget(clearbtn)
parent.remove_widget(retakebtn)
root.export_to_png(email)
exit()
savebtn.bind(on_release=save_pic)
crestwid.add_widget(self.crest)
parent.add_widget(crestwid)
root.add_widget(self.im)
root.add_widget(painter)
root.add_widget(parent)
#root.add_widget(crestwid)
return root
class RootWidget(BoxLayout):
pass
if __name__ == '__main__':
MainApp().run()
| gpl-2.0 |
widdowquinn/THAPBI-pycits | pycits/cd_hit.py | 1 | 3265 | #!/usr/bin/env python
#
# cd_hit * (clusterassembled reads with database)
# cd_hit_est used as this is the nt clustering tool
# http://weizhongli-lab.org/lab-wiki/doku.php?id=cd-hit-user-guide
# follow this link to get the download.
# https://github.com/weizhongli/cdhit
# cd_hit-0.9.10-bin-64.tar.gz
#
# (c) The James Hutton Institute 2016
# Author: Leighton Pritchard and Peter Thorpe
import os
import subprocess
from collections import namedtuple
from .tools import is_exe, NotExecutableError
# factory class for cd_hit class returned values
Results = namedtuple("Results", "command fastaout clusters " +
"stdout stderr")
class Cd_hit_Error(Exception):
"""Exception raised when cd_hit fails"""
def __init__(self, message):
self.message = message
class Cd_hit(object):
"""Class for working with cd_hit"""
def __init__(self, exe_path):
"""Instantiate with location of executable"""
if not is_exe(exe_path):
msg = "{0} is not an executable".format(exe_path)
raise NotExecutableError(msg)
self._exe_path = exe_path
def run(self, fasta_in, threads, threshold, outdir, prefix, dry_run=False):
"""Run cd_hit to cluster passed fasta files
- fasta_in - fasta file to be clustered
- threshold - threshold to cluster at
- threads - number of threads for cd_hit to use
- outdir - output directory for clustering output
- prefix - file prefix for cd_hit output
- dry_run - if True, returns cmd-line but does not run
Returns a tuple of output filenames, and the STOUT returned by the
cd_hit run.
"""
self.__build_cmd(fasta_in, threads, threshold, outdir, prefix)
if dry_run:
return(self._cmd)
pipe = subprocess.run(self._cmd, shell=True,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
check=True)
results = Results(self._cmd, *self._outfnames, pipe.stdout,
pipe.stderr)
return results
def __build_cmd(self, fasta_in, threads, threshold, outdir, prefix):
"""Build a command-line for cd_hit_est.
cd_hit takes a path to an output directory PLUS the prefix of the
files to write, such that
-o a/b/cdefg
writes files
a/b/cdefg
a/b/cdefg.clstr
and so on.
-d added to the command is so the output clusters will write out the
names up to 500 letters long. The default chops these at 20.
(too short)
-M added to allow unlimited memeroy - not a problem for
small jobs. If job are big, we will have to alter this.
"""
# outfiles are name WhatEver.out + .bak.clstr and + .clstr
self._outfnames = [os.path.join(outdir, prefix) + suffix for suffix in
('.fasta', '.clstr')]
cmd = ["cd-hit-est",
"-i", fasta_in,
"-o", os.path.join(outdir, prefix),
"-T {0}".format(threads),
"-M", "0",
"-c", str(threshold),
"-d", "500"]
self._cmd = ' '.join(cmd)
| mit |
mbrukman/flocker | admin/functional/test_aws.py | 14 | 1570 | import shutil
import os
from unittest import skipIf
import boto
from effect import Effect, sync_perform, ComposedDispatcher
from twisted.python.filepath import FilePath
from twisted.trial.unittest import SynchronousTestCase
from ..aws import boto_dispatcher, UploadToS3
from flocker.provision._effect import dispatcher as base_dispatcher
from flocker.testtools import random_name
# Bucket to use for testing
bucket_name = 'clusterhq-archive-testing'
try:
boto.connect_s3().head_bucket(bucket_name)
_can_connect = True
except:
_can_connect = False
if_aws = skipIf(not _can_connect, "Requires boto AWS credentials")
class AWSTest(SynchronousTestCase):
@if_aws
def test_upload_content_type(self):
"""
A content type can be set for an uploaded file.
"""
filename = random_name(self)
tmpdir = FilePath(self.mktemp())
tmpdir.makedirs()
tmpfile = tmpdir.child(filename)
tmpfile.setContent('foo')
s3 = boto.connect_s3()
bucket = s3.get_bucket(bucket_name)
self.addCleanup(bucket.delete_key, filename)
sync_perform(
dispatcher=ComposedDispatcher([boto_dispatcher, base_dispatcher]),
effect=Effect(UploadToS3(
source_path=tmpdir,
target_bucket=bucket_name,
target_key=filename,
file=tmpfile,
content_type='application/json',
))
)
key = bucket.get_key(filename)
self.assertEqual('application/json', key.content_type)
| apache-2.0 |
hughperkins/kgsgo-dataset-preprocessor | dataset_partitioner.py | 1 | 7011 | #!/usr/bin/python
#
# Copyright Hugh Perkins 2015 hughperkins at gmail
#
# This Source Code Form is subject to the terms of the Mozilla Public License,
# v. 2.0. If a copy of the MPL was not distributed with this file, You can
# obtain one at http://mozilla.org/MPL/2.0/.
# goal of this is to partition the data into two sets:
# - training data
# - testing data
#
# These sets will be assigned according to the following principles:
# - both sets should be relatively stable, not change as new archive data is available
# - test set should be not too big, but large enough that accuracy will be to around 0.1%
# - training set will contain the rest of the data
# - the same matches should not be present in both training and test set (not even different moves from
# the same match)
# - should probably be documented which are in which perhaps? eg stored as a python file in the
# repository (or as a yaml file?)
from __future__ import print_function, unicode_literals, division, absolute_import
from os import path, sys
sys.path.append( path.dirname(path.abspath(__file__)) + '/thirdparty/future/src' )
from builtins import ( bytes, dict, int, list, object, range, str, ascii, chr,
hex, input, next, oct, open, pow, round, super, filter, map, zip )
import sys, os, time, random
import index_processor
numTestGames = 100
testGames = []
trainGames = []
def draw_samples( dataDirectory, numSamples ):
# draws filename, and game index number, from the available games
# without replacement (so we should check for dupes :-( )
# first we should create a single list, containing pairs of ( filename, gameindex )
# then we will draw samples from this
# we should restrict the available games to something static, eg everything up to dec 2014, inclusive
availableGames = []
fileinfos = index_processor.get_fileInfos( dataDirectory )
for fileinfo in fileinfos:
filename = fileinfo['filename']
year = int( filename.split('-')[1].split('_')[0] )
if year > 2014:
continue # ignore after 2014, to keep the set of games fixed
numgames = fileinfo['numGames']
for i in range( numgames ):
availableGames.append( ( filename, i ) )
print( 'total num games: ' + str( len( availableGames ) ) )
# need to seed random first
random.seed(0)
samplesSet = set()
while len( samplesSet ) < numSamples:
sample = random.choice( availableGames )
if sample not in samplesSet:
samplesSet.add( sample )
print( 'Drawn ' + str( numSamples ) + ' samples:' )
# copy to list
samples = list( samplesSet )
return samples
def draw_training_games( dataDirectory ):
# gets list of all non-test games, that are no later than dec 2014
global testGames
train_games = []
fileinfos = index_processor.get_fileInfos( dataDirectory )
for fileinfo in fileinfos:
filename = fileinfo['filename']
year = int( filename.split('-')[1].split('_')[0] )
if year > 2014:
continue # ignore after 2014, to keep the set of games fixed
numgames = fileinfo['numGames']
for i in range( numgames ):
sample = ( filename, i )
if sample not in testGames:
train_games.append( sample )
print( 'total num training games: ' + str( len( train_games ) ) )
def draw_test_samples( dataDirectory ):
global numTestGames, testGames
if len( testGames ) > 0:
return testGames
try:
testSampleFile = open( 'test_samples.py', 'r' )
samplesContents = testSampleFile.read()
testSampleFile.close()
for line in samplesContents.split('\n'):
#print( line )
if line != "":
( filename, index ) = eval( line )
testGames.append( ( filename, index ) )
except Exception as e:
print( e )
testGames = draw_samples( dataDirectory, numTestGames )
testSampleFile = open( '~test_samples.py', 'w' )
for sample in testGames:
testSampleFile.write( str( sample ) + "\n" )
testSampleFile.close()
os.rename( '~test_samples.py', 'test_samples.py' )
# for sample in testGames:
# print( 'testgame: ' + str( sample ) )
return testGames
# draw training games, not overlapping with any of the test games
def draw_training_samples( dataDirectory, numSamples ):
test_samples = draw_test_samples( dataDirectory )
availableGames = []
fileinfos = index_processor.get_fileInfos( dataDirectory )
for fileinfo in fileinfos:
filename = fileinfo['filename']
year = int( filename.split('-')[1].split('_')[0] )
if year > 2014:
continue # ignore after 2014, to keep the set of games fixed
numgames = fileinfo['numGames']
for i in range( numgames ):
availableGames.append( ( filename, i ) )
print( 'total num games: ' + str( len( availableGames ) ) )
# need to seed random first
random.seed(0)
# I suppose the first 100 samples will be the testing ones :-P
# anyway, just skip those....
samplesSet = set()
while len( samplesSet ) < numSamples:
sample = random.choice( availableGames )
if sample not in test_samples:
samplesSet.add( sample )
print( 'Drawn ' + str( numSamples ) + ' samples:' )
# copy to list
samples = list( samplesSet )
return samples
def draw_all_training( dataDirectory ):
test_samples = draw_test_samples( dataDirectory )
availableGames = []
fileinfos = index_processor.get_fileInfos( dataDirectory )
for fileinfo in fileinfos:
filename = fileinfo['filename']
year = int( filename.split('-')[1].split('_')[0] )
if year > 2014:
continue # ignore after 2014, to keep the set of games fixed
numgames = fileinfo['numGames']
for i in range( numgames ):
availableGames.append( ( filename, i ) )
print( 'total num games: ' + str( len( availableGames ) ) )
# need to seed random first
random.seed(0)
# I suppose the first 100 samples will be the testing ones :-P
# anyway, just skip those....
samplesSet = set()
for sample in availableGames:
if sample not in test_samples:
samplesSet.add( sample )
print( 'Drawn all samples, ie ' + str( len( samplesSet ) ) + ' samples:' )
# copy to list
samples = list( samplesSet )
return samples
def draw_training_10k( dataDirectory ):
return draw_training_samples( dataDirectory, 10000 )
def go(dataDirectory):
testsamples = draw_test_samples( dataDirectory )
for sample in testsamples:
print( 'testgame: ' + str( sample ) )
# all other games are training games...
draw_training_games( dataDirectory )
if __name__ == '__main__':
sTargetDirectory = 'data'
if len(sys.argv) == 2:
sTargetDirectory = sys.argv[1]
go(sTargetDirectory)
| mpl-2.0 |
mattlevesque/py-shrt-lkr | py_shrt_lkr/tests_ORG.py | 2 | 1767 | import unittest
import transaction
from pyramid import testing
from .core.models import DBSession
# class TestMyViewSuccessCondition(unittest.TestCase):
# def setUp(self):
# self.config = testing.setUp()
# from sqlalchemy import create_engine
# engine = create_engine('sqlite://')
# from .models import (
# Base,
# MyModel,
# )
# DBSession.configure(bind=engine)
# Base.metadata.create_all(engine)
# with transaction.manager:
# model = MyModel(name='one', value=55)
# DBSession.add(model)
#
# def tearDown(self):
# DBSession.remove()
# testing.tearDown()
#
# def test_passing_view(self):
# from .views import my_view
# request = testing.DummyRequest()
# info = my_view(request)
# self.assertEqual(info['one'].name, 'one')
# self.assertEqual(info['project'], 'py-shrt-lkr')
#
#
# class TestMyViewFailureCondition(unittest.TestCase):
# def setUp(self):
# self.config = testing.setUp()
# from sqlalchemy import create_engine
# engine = create_engine('sqlite://')
# DBSession.configure(bind=engine)
#
# def tearDown(self):
# DBSession.remove()
# testing.tearDown()
#
# def test_failing_view(self):
# from .views import my_view
# request = testing.DummyRequest()
# info = my_view(request)
# self.assertEqual(info.status_int, 500)
##
##
# Services tests
##
class TestServiceUserSuccessCondition(unittest.TestCase):
def setUp(self):
self.config = testing.setUp()
from sqlalchemy import create_engine
engine = create_engine('sqlite://')
DBSession.configure(bind=engine)
def tearDown(self):
DBSession.remove()
testing.tearDown()
def test_failing_view(self):
from .core.services import UserService
print("TEST")
self.assertEqual(UserService.get_by_id(77), -10, "Yup that works")
| gpl-2.0 |
BT-rmartin/odoo | addons/account/wizard/account_move_line_select.py | 385 | 2800 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.osv import osv
class account_move_line_select(osv.osv_memory):
"""
Account move line select
"""
_name = "account.move.line.select"
_description = "Account move line select"
def open_window(self, cr, uid, ids, context=None):
mod_obj = self.pool.get('ir.model.data')
act_obj = self.pool.get('ir.actions.act_window')
account_obj = self.pool.get('account.account')
fiscalyear_obj = self.pool.get('account.fiscalyear')
if context is None:
context = {}
if 'fiscalyear' not in context:
fiscalyear_ids = fiscalyear_obj.search(cr, uid, [('state', '=', 'draft')])
else:
fiscalyear_ids = [context['fiscalyear']]
fiscalyears = fiscalyear_obj.browse(cr, uid, fiscalyear_ids, context=context)
period_ids = []
if fiscalyears:
for fiscalyear in fiscalyears:
for period in fiscalyear.period_ids:
period_ids.append(period.id)
domain = str(('period_id', 'in', period_ids))
result = mod_obj.get_object_reference(cr, uid, 'account', 'action_move_line_tree1')
id = result and result[1] or False
result = act_obj.read(cr, uid, [id])[0]
result['context'] = {
'fiscalyear': False,
'account_id': context['active_id'],
'active_id': context['active_id'],
}
if context['active_id']:
acc_data = account_obj.browse(cr, uid, context['active_id']).child_consol_ids
if acc_data:
result['context'].update({'consolidate_children': True})
result['domain']=result['domain'][0:-1]+','+domain+result['domain'][-1]
return result
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
asposewords/Aspose_Words_Cloud | Examples/Python/Examples/GetSpecificRunOfParagraphFromWordDocument.py | 2 | 1537 | import asposewordscloud
from asposewordscloud.WordsApi import WordsApi
from asposewordscloud.WordsApi import ApiException
from asposewordscloud.models import RunResponse
import asposestoragecloud
from asposestoragecloud.StorageApi import StorageApi
from asposestoragecloud.StorageApi import ResponseMessage
apiKey = "XXXXX" #sepcify App Key
appSid = "XXXXX" #sepcify App SID
apiServer = "http://api.aspose.com/v1.1"
data_folder = "../../data/"
#Instantiate Aspose Storage API SDK
storage_apiClient = asposestoragecloud.ApiClient.ApiClient(apiKey, appSid, True)
storageApi = StorageApi(storage_apiClient)
#Instantiate Aspose Words API SDK
api_client = asposewordscloud.ApiClient.ApiClient(apiKey, appSid, True)
wordsApi = WordsApi(api_client)
#set input file name
filename = "SampleWordDocument.docx"
index = 1
runIndex = 0
#upload file to aspose cloud storage
storageApi.PutCreate(Path=filename, file=data_folder + filename)
try:
#invoke Aspose.Words Cloud SDK API to get a specific run of a paragraph from a word document
response = wordsApi.GetDocumentParagraphRun(name=filename, index=index, runIndex=runIndex)
if response.Status == "OK":
docParagraphRun = response.Run
#display document paragraph run info
if docParagraphRun is not None:
print "NoteId : " + docParagraphRun.NodeId
print "Text : " + docParagraphRun.Text
except ApiException as ex:
print "ApiException:"
print "Code:" + str(ex.code)
print "Message:" + ex.message
| mit |
mhaessig/servo | tests/wpt/css-tests/tools/pywebsocket/src/setup.py | 434 | 2863 | #!/usr/bin/env python
#
# Copyright 2012, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Set up script for mod_pywebsocket.
"""
from distutils.core import setup, Extension
import sys
_PACKAGE_NAME = 'mod_pywebsocket'
# Build and use a C++ extension for faster masking. SWIG is required.
_USE_FAST_MASKING = False
if sys.version < '2.3':
print >> sys.stderr, '%s requires Python 2.3 or later.' % _PACKAGE_NAME
sys.exit(1)
if _USE_FAST_MASKING:
setup(ext_modules=[
Extension(
'mod_pywebsocket/_fast_masking',
['mod_pywebsocket/fast_masking.i'],
swig_opts=['-c++'])])
setup(author='Yuzo Fujishima',
author_email='[email protected]',
description='WebSocket extension for Apache HTTP Server.',
long_description=(
'mod_pywebsocket is an Apache HTTP Server extension for '
'the WebSocket Protocol (RFC 6455). '
'See mod_pywebsocket/__init__.py for more detail.'),
license='See COPYING',
name=_PACKAGE_NAME,
packages=[_PACKAGE_NAME, _PACKAGE_NAME + '.handshake'],
url='http://code.google.com/p/pywebsocket/',
# See the source of distutils.version, distutils.versionpredicate and
# distutils.dist to understand how to name version numbers.
version='0.7.9',
)
# vi:sts=4 sw=4 et
| mpl-2.0 |
lfz/Guided-Denoise | Attackset/fgsm_inresv2_random/attack_iter.py | 1 | 10010 | """Implementation of sample attack."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import numpy as np
from scipy.misc import imread
from scipy.misc import imsave
from nets import inception_v3, inception_v4, inception_resnet_v2, resnet_v2
from functools import partial
from multiprocessing import Pool
import tensorflow as tf
slim = tf.contrib.slim
tf.flags.DEFINE_string(
'master', '', 'The address of the TensorFlow master to use.')
tf.flags.DEFINE_string(
'checkpoint_path_inception_v3', '', 'Path to checkpoint for inception network.')
tf.flags.DEFINE_string(
'checkpoint_path_adv_inception_v3', '', 'Path to checkpoint for inception network.')
tf.flags.DEFINE_string(
'checkpoint_path_ens3_adv_inception_v3', '', 'Path to checkpoint for inception network.')
tf.flags.DEFINE_string(
'checkpoint_path_ens4_adv_inception_v3', '', 'Path to checkpoint for inception network.')
tf.flags.DEFINE_string(
'checkpoint_path_inception_v4', '', 'Path to checkpoint for inception network.')
tf.flags.DEFINE_string(
'checkpoint_path_inception_resnet_v2', '', 'Path to checkpoint for inception network.')
tf.flags.DEFINE_string(
'checkpoint_path_ens_adv_inception_resnet_v2', '', 'Path to checkpoint for inception network.')
tf.flags.DEFINE_string(
'checkpoint_path_resnet', '', 'Path to checkpoint for inception network.')
tf.flags.DEFINE_string(
'input_dir', '', 'Input directory with images.')
tf.flags.DEFINE_string(
'output_dir', '', 'Output directory with images.')
tf.flags.DEFINE_float(
'max_epsilon', 16.0, 'Maximum size of adversarial perturbation.')
tf.flags.DEFINE_integer(
'num_iter', 10, 'Number of iterations.')
tf.flags.DEFINE_integer(
'image_width', 299, 'Width of each input images.')
tf.flags.DEFINE_integer(
'image_height', 299, 'Height of each input images.')
tf.flags.DEFINE_integer(
'batch_size', 10, 'How many images process at one time.')
tf.flags.DEFINE_integer(
'use_existing', 0, 'whether reuse existing result')
tf.flags.DEFINE_integer(
'random_eps', 0, 'whether use random pertubation')
tf.flags.DEFINE_float(
'momentum', 1.0, 'Momentum.')
tf.flags.DEFINE_string(
'gpu','0','')
FLAGS = tf.flags.FLAGS
def load_images(input_dir, batch_shape):
"""Read png images from input directory in batches.
Args:
input_dir: input directory
batch_shape: shape of minibatch array, i.e. [batch_size, height, width, 3]
Yields:
filenames: list file names without path of each image
Lenght of this list could be less than batch_size, in this case only
first few images of the result are elements of the minibatch.
images: array with all images from this batch
"""
images = np.zeros(batch_shape)
filenames = []
idx = 0
batch_size = batch_shape[0]
for filepath in tf.gfile.Glob(os.path.join(input_dir, '*.png')):
with tf.gfile.Open(filepath) as f:
image = imread(f, mode='RGB').astype(np.float) / 255.0
# Images for inception classifier are normalized to be in [-1, 1] interval.
images[idx, :, :, :] = image * 2.0 - 1.0
filenames.append(os.path.basename(filepath))
idx += 1
if idx == batch_size:
yield filenames, images
filenames = []
images = np.zeros(batch_shape)
idx = 0
if idx > 0:
yield filenames, images
def save_images(arg):
image,filename,output_dir = arg
imsave(os.path.join(output_dir, filename), (image + 1.0) * 0.5, format='png')
def graph(x, y, i, x_max, x_min, grad, eps_inside):
num_iter = FLAGS.num_iter
alpha = eps_inside / num_iter
momentum = FLAGS.momentum
num_classes = 1001
with slim.arg_scope(inception_resnet_v2.inception_resnet_v2_arg_scope()):
logits_res_v2, end_points_res_v2 = inception_resnet_v2.inception_resnet_v2(
x, num_classes=num_classes, is_training=False)
pred = tf.argmax( end_points_res_v2['Predictions'], 1)
first_round = tf.cast(tf.equal(i, 0), tf.int64)
y = first_round * pred + (1 - first_round) * y
one_hot = tf.one_hot(y, num_classes)
logits = logits_res_v2
auxlogits = end_points_res_v2['AuxLogits']
cross_entropy = tf.losses.softmax_cross_entropy(one_hot,
logits,
label_smoothing=0.0,
weights=1.0)
cross_entropy += tf.losses.softmax_cross_entropy(one_hot,
auxlogits,
label_smoothing=0.0,
weights=0.4)
noise = tf.gradients(cross_entropy, x)[0]
x = x + alpha * tf.sign(noise)
x = tf.clip_by_value(x, x_min, x_max)
i = tf.add(i, 1)
return x, y, i, x_max, x_min, noise, eps_inside
def stop(x, y, i, x_max, x_min, grad, eps_inside):
num_iter = FLAGS.num_iter
return tf.less(i, num_iter)
def main(_):
# Images for inception classifier are normalized to be in [-1, 1] interval,
# eps is a difference between pixels so it should be in [0, 2] interval.
# Renormalizing epsilon from [0, 255] to [0, 2].
print(FLAGS.output_dir)
#eps = 2.0 * FLAGS.max_epsilon / 255.0
gpus = np.array(FLAGS.gpu.split(',')).astype('int')
n_gpus = len(gpus)
bs_single = FLAGS.batch_size
FLAGS.batch_size *= n_gpus
batch_shape = [FLAGS.batch_size, FLAGS.image_height, FLAGS.image_width, 3]
batch_shape_single = [bs_single, FLAGS.image_height, FLAGS.image_width, 3]
tf.logging.set_verbosity(tf.logging.INFO)
pool = Pool()
with tf.Graph().as_default(), tf.device('/cpu:0'):
flists = set([f for f in os.listdir(FLAGS.input_dir) if 'png' in f])
if FLAGS.use_existing == 1:
flists_existing = set([f for f in os.listdir(FLAGS.output_dir) if 'png' in f ])
newfiles = list(flists.difference(flists_existing))
newfiles = [os.path.join(FLAGS.input_dir,f) for f in newfiles]
else:
newfiles = [os.path.join(FLAGS.input_dir,f) for f in flists]
print('creating %s new files'%(len(newfiles)))
if len(newfiles) == 0:
return
filename_queue = tf.train.string_input_producer(newfiles, shuffle = False, num_epochs = FLAGS.batch_size)
image_reader = tf.WholeFileReader()
filename, image_file = image_reader.read(filename_queue)
image = tf.image.decode_png(image_file)
image.set_shape((299, 299, 3))
eps = tf.placeholder(dtype='float32', shape = [FLAGS.batch_size, None, None, None])
# Generate batch
num_preprocess_threads = 20
min_queue_examples = 256
images,filenames = tf.train.batch(
[image,filename],
batch_size=FLAGS.batch_size,
num_threads=num_preprocess_threads,
capacity= 3 * FLAGS.batch_size,
allow_smaller_final_batch = False)
images = tf.cast(images,tf.float32)/255.0*2.-1.
images_splits = tf.split(axis=0, num_or_size_splits=n_gpus, value=images)
eps_splits = tf.split(axis=0, num_or_size_splits=n_gpus, value=eps)
# Prepare graph
#x_input = tf.placeholder(tf.float32, shape=batch_shape)
x_advlist = []
for i_gpu in range(n_gpus):
start = i_gpu*bs_single
print('gpu'+str(i_gpu))
with tf.device('/gpu:'+str(i_gpu)):
with tf.variable_scope(tf.get_variable_scope(),
reuse=True if i_gpu > 0 else None):
# with tf.name_scope('%s_%d' % ('tower', i_gpu)):
x_in_single = images_splits[i_gpu]
eps_single = eps_splits[i_gpu]
x_max = tf.clip_by_value(x_in_single + eps_single, -1.0, 1.0)
x_min = tf.clip_by_value(x_in_single - eps_single, -1.0, 1.0)
bs_this = x_in_single.shape[0]
y = tf.constant(np.zeros([bs_single]), tf.int64)
i = tf.constant(0)
grad = tf.zeros_like(x_in_single)
x_adv, _, _, _, _, _, _ = tf.while_loop(stop, graph, [x_in_single, y, i, x_max, x_min, grad, eps_single])
x_advlist.append(x_adv)
x_adv = tf.concat(x_advlist,0)
# Run computation
s6 = tf.train.Saver(slim.get_model_variables(scope='InceptionResnetV2'))
init = (tf.global_variables_initializer(), tf.local_variables_initializer())
with tf.Session() as sess:
sess.run(init)
s6.restore(sess, FLAGS.checkpoint_path_inception_resnet_v2)
coord = tf.train.Coordinator()
threads = tf.train.start_queue_runners(coord=coord)
n_iter = -(-(len(newfiles))//FLAGS.batch_size)
stack_img = []
stack_names = []
for i in range(n_iter):
if FLAGS.random_eps:
eps_value = np.random.randint(1,FLAGS.max_epsilon, [FLAGS.batch_size,1,1,1])
else:
eps_value = np.ones([FLAGS.batch_size,1,1,1]) * FLAGS.max_epsilon
eps_value = eps_value.astype('float32') *2 /255
names,adv_images,orig_images = sess.run([filenames,x_adv,images], feed_dict={eps:eps_value})
names = [os.path.basename(name) for name in names]
stack_img.append(adv_images)
stack_names.append(names)
# save_images2(adv_images, names, FLAGS.output_dir, pool)
# save_images(adv_images, names, FLAGS.output_dir)
if ((i+1)%100 ==0) or i == n_iter-1:
print("%d / %d"%(i+1,n_iter))
stack_img = np.concatenate(stack_img)
stack_names = np.concatenate(stack_names)
#partial_save = partial(save_one,images=stack_img,filenames=stack_names,output_dir=FLAGS.output_dir)
paras = ((im,name,FLAGS.output_dir) for (im,name) in zip(stack_img,stack_names))
pool.map_async(save_images,paras)
stack_img = []
stack_names = []
# save_images(adv_images, filenames, FLAGS.output_dir)
# Finish off the filename queue coordinator.
coord.request_stop()
coord.join(threads)
pool.close()
pool.join()
if __name__ == '__main__':
tf.app.run()
| apache-2.0 |
Captnoord/openpli-enigma2 | lib/python/Screens/ChoiceBox.py | 10 | 4653 | from Screens.Screen import Screen
from Components.ActionMap import NumberActionMap
from Components.Label import Label
from Components.ChoiceList import ChoiceEntryComponent, ChoiceList
from Components.Sources.StaticText import StaticText
import enigma
class ChoiceBox(Screen):
def __init__(self, session, title = "", list = [], keys = None, selection = 0, skin_name = []):
Screen.__init__(self, session)
if isinstance(skin_name, str):
skin_name = [skin_name]
self.skinName = skin_name + ["ChoiceBox"]
self["text"] = Label(title)
self.list = []
self.summarylist = []
if keys is None:
self.__keys = [ "1", "2", "3", "4", "5", "6", "7", "8", "9", "0", "red", "green", "yellow", "blue" ] + (len(list) - 10) * [""]
else:
self.__keys = keys + (len(list) - len(keys)) * [""]
self.keymap = {}
pos = 0
for x in list:
strpos = str(self.__keys[pos])
self.list.append(ChoiceEntryComponent(key = strpos, text = x))
if self.__keys[pos] != "":
self.keymap[self.__keys[pos]] = list[pos]
self.summarylist.append((self.__keys[pos],x[0]))
pos += 1
self["list"] = ChoiceList(list = self.list, selection = selection)
self["summary_list"] = StaticText()
self["summary_selection"] = StaticText()
self.updateSummary(selection)
self["actions"] = NumberActionMap(["WizardActions", "InputActions", "ColorActions", "DirectionActions"],
{
"ok": self.go,
"back": self.cancel,
"1": self.keyNumberGlobal,
"2": self.keyNumberGlobal,
"3": self.keyNumberGlobal,
"4": self.keyNumberGlobal,
"5": self.keyNumberGlobal,
"6": self.keyNumberGlobal,
"7": self.keyNumberGlobal,
"8": self.keyNumberGlobal,
"9": self.keyNumberGlobal,
"0": self.keyNumberGlobal,
"red": self.keyRed,
"green": self.keyGreen,
"yellow": self.keyYellow,
"blue": self.keyBlue,
"up": self.up,
"down": self.down
}, -1)
def autoResize(self):
orgwidth = self.instance.size().width()
orgpos = self.instance.position()
textsize = self["text"].getSize()
count = len(self.list)
if count > 10:
count = 10
offset = 25 * count
wsizex = textsize[0] + 60
wsizey = textsize[1] + offset
if (520 > wsizex):
wsizex = 520
wsize = (wsizex, wsizey)
# resize
self.instance.resize(enigma.eSize(*wsize))
# resize label
self["text"].instance.resize(enigma.eSize(*textsize))
# move list
listsize = (wsizex, 25 * count)
self["list"].instance.move(enigma.ePoint(0, textsize[1]))
self["list"].instance.resize(enigma.eSize(*listsize))
# center window
newwidth = wsize[0]
self.instance.move(enigma.ePoint((720-wsizex)/2, (576-wsizey)/(count > 7 and 2 or 3)))
def keyLeft(self):
pass
def keyRight(self):
pass
def up(self):
if len(self["list"].list) > 0:
while 1:
self["list"].instance.moveSelection(self["list"].instance.moveUp)
self.updateSummary(self["list"].l.getCurrentSelectionIndex())
if self["list"].l.getCurrentSelection()[0][0] != "--" or self["list"].l.getCurrentSelectionIndex() == 0:
break
def down(self):
if len(self["list"].list) > 0:
while 1:
self["list"].instance.moveSelection(self["list"].instance.moveDown)
self.updateSummary(self["list"].l.getCurrentSelectionIndex())
if self["list"].l.getCurrentSelection()[0][0] != "--" or self["list"].l.getCurrentSelectionIndex() == len(self["list"].list) - 1:
break
# runs a number shortcut
def keyNumberGlobal(self, number):
self.goKey(str(number))
# runs the current selected entry
def go(self):
cursel = self["list"].l.getCurrentSelection()
if cursel:
self.goEntry(cursel[0])
else:
self.cancel()
# runs a specific entry
def goEntry(self, entry):
if len(entry) > 2 and isinstance(entry[1], str) and entry[1] == "CALLFUNC":
# CALLFUNC wants to have the current selection as argument
arg = self["list"].l.getCurrentSelection()[0]
entry[2](arg)
else:
self.close(entry)
# lookups a key in the keymap, then runs it
def goKey(self, key):
if self.keymap.has_key(key):
entry = self.keymap[key]
self.goEntry(entry)
# runs a color shortcut
def keyRed(self):
self.goKey("red")
def keyGreen(self):
self.goKey("green")
def keyYellow(self):
self.goKey("yellow")
def keyBlue(self):
self.goKey("blue")
def updateSummary(self, curpos=0):
pos = 0
summarytext = ""
for entry in self.summarylist:
if pos > curpos-2 and pos < curpos+5:
if pos == curpos:
summarytext += ">"
self["summary_selection"].setText(entry[1])
else:
summarytext += entry[0]
summarytext += ' ' + entry[1] + '\n'
pos += 1
self["summary_list"].setText(summarytext)
def cancel(self):
self.close(None)
| gpl-2.0 |
hpcuantwerpen/easybuild-easyblocks | easybuild/easyblocks/b/blacs.py | 3 | 8124 | ##
# Copyright 2009-2021 Ghent University
#
# This file is part of EasyBuild,
# originally created by the HPC team of Ghent University (http://ugent.be/hpc/en),
# with support of Ghent University (http://ugent.be/hpc),
# the Flemish Supercomputer Centre (VSC) (https://www.vscentrum.be),
# Flemish Research Foundation (FWO) (http://www.fwo.be/en)
# and the Department of Economy, Science and Innovation (EWI) (http://www.ewi-vlaanderen.be/en).
#
# https://github.com/easybuilders/easybuild
#
# EasyBuild is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation v2.
#
# EasyBuild is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with EasyBuild. If not, see <http://www.gnu.org/licenses/>.
##
"""
EasyBuild support for building and installing BLACS, implemented as an easyblock
@author: Stijn De Weirdt (Ghent University)
@author: Dries Verdegem (Ghent University)
@author: Kenneth Hoste (Ghent University)
@author: Pieter De Baets (Ghent University)
@author: Jens Timmerman (Ghent University)
"""
import glob
import re
import os
import shutil
from easybuild.easyblocks.generic.configuremake import ConfigureMake
from easybuild.tools.build_log import EasyBuildError
from easybuild.tools.run import run_cmd
# also used by ScaLAPACK
def det_interface(log, path):
"""Determine interface through 'xintface' heuristic tool"""
(out, _) = run_cmd(os.path.join(path, "xintface"), log_all=True, simple=False)
intregexp = re.compile(r".*INTFACE\s*=\s*-D(\S+)\s*")
res = intregexp.search(out)
if res:
return res.group(1)
else:
raise EasyBuildError("Failed to determine interface, output for xintface: %s", out)
class EB_BLACS(ConfigureMake):
"""
Support for building/installing BLACS
- configure: symlink BMAKES/Bmake.MPI-LINUX to Bmake.inc
- make install: copy files
"""
def configure_step(self):
"""Configure BLACS build by copying Bmake.inc file."""
src = os.path.join(self.cfg['start_dir'], 'BMAKES', 'Bmake.MPI-LINUX')
dest = os.path.join(self.cfg['start_dir'], 'Bmake.inc')
if not os.path.isfile(src):
raise EasyBuildError("Can't find source file %s", src)
if os.path.exists(dest):
raise EasyBuildError("Destination file %s exists", dest)
try:
shutil.copy(src, dest)
except OSError as err:
raise EasyBuildError("Copying %s to %s failed: %s", src, dest, err)
def build_step(self):
"""Build BLACS using build_step, after figuring out the make options based on the heuristic tools available."""
opts = {
'mpicc': "%s %s" % (os.getenv('MPICC'), os.getenv('CFLAGS')),
'mpif77': "%s %s" % (os.getenv('MPIF77'), os.getenv('FFLAGS')),
'f77': os.getenv('F77'),
'cc': os.getenv('CC'),
'builddir': os.getcwd(),
'mpidir': os.path.dirname(os.getenv('MPI_LIB_DIR')),
}
# determine interface and transcomm settings
comm = ''
interface = 'UNKNOWN'
try:
cwd = os.getcwd()
os.chdir('INSTALL')
# need to build
cmd = "make"
cmd += " CC='%(mpicc)s' F77='%(mpif77)s' MPIdir=%(mpidir)s" \
" MPILIB='' BTOPdir=%(builddir)s INTERFACE=NONE" % opts
# determine interface using xintface
run_cmd("%s xintface" % cmd, log_all=True, simple=True)
interface = det_interface(self.log, "./EXE")
# try and determine transcomm using xtc_CsameF77 and xtc_UseMpich
if not comm:
run_cmd("%s xtc_CsameF77" % cmd, log_all=True, simple=True)
(out, _) = run_cmd(self.toolchain.mpi_cmd_for("./EXE/xtc_CsameF77", 2), log_all=True, simple=False)
# get rid of first two lines, that inform about how to use this tool
out = '\n'.join(out.split('\n')[2:])
notregexp = re.compile("_NOT_")
if not notregexp.search(out):
# if it doesn't say '_NOT_', set it
comm = "TRANSCOMM='-DCSameF77'"
else:
(_, ec) = run_cmd("%s xtc_UseMpich" % cmd, log_all=False, log_ok=False, simple=False)
if ec == 0:
(out, _) = run_cmd(self.toolchain.mpi_cmd_for("./EXE/xtc_UseMpich", 2),
log_all=True, simple=False)
if not notregexp.search(out):
commregexp = re.compile(r'Set TRANSCOMM\s*=\s*(.*)$')
res = commregexp.search(out)
if res:
# found how to set TRANSCOMM, so set it
comm = "TRANSCOMM='%s'" % res.group(1)
else:
# no match, set empty TRANSCOMM
comm = "TRANSCOMM=''"
else:
# if it fails to compile, set empty TRANSCOMM
comm = "TRANSCOMM=''"
os.chdir(cwd)
except OSError as err:
raise EasyBuildError("Failed to determine interface and transcomm settings: %s", err)
opts.update({
'comm': comm,
'int': interface,
})
add_makeopts = ' MPICC="%(mpicc)s" MPIF77="%(mpif77)s" %(comm)s ' % opts
add_makeopts += ' INTERFACE=%(int)s MPIdir=%(mpidir)s BTOPdir=%(builddir)s mpi ' % opts
self.cfg.update('buildopts', add_makeopts)
super(EB_BLACS, self).build_step()
def install_step(self):
"""Install by copying files to install dir."""
# include files and libraries
for (srcdir, destdir, ext) in [
(os.path.join("SRC", "MPI"), "include", ".h"), # include files
("LIB", "lib", ".a"), # libraries
]:
src = os.path.join(self.cfg['start_dir'], srcdir)
dest = os.path.join(self.installdir, destdir)
try:
os.makedirs(dest)
os.chdir(src)
for lib in glob.glob('*%s' % ext):
# copy file
shutil.copy2(os.path.join(src, lib), dest)
self.log.debug("Copied %s to %s" % (lib, dest))
if destdir == 'lib':
# create symlink with more standard name for libraries
symlink_name = "lib%s.a" % lib.split('_')[0]
os.symlink(os.path.join(dest, lib), os.path.join(dest, symlink_name))
self.log.debug("Symlinked %s/%s to %s" % (dest, lib, symlink_name))
except OSError as err:
raise EasyBuildError("Copying %s/*.%s to installation dir %s failed: %s", src, ext, dest, err)
# utilities
src = os.path.join(self.cfg['start_dir'], 'INSTALL', 'EXE', 'xintface')
dest = os.path.join(self.installdir, 'bin')
try:
os.makedirs(dest)
shutil.copy2(src, dest)
self.log.debug("Copied %s to %s" % (src, dest))
except OSError as err:
raise EasyBuildError("Copying %s to installation dir %s failed: %s", src, dest, err)
def sanity_check_step(self):
"""Custom sanity check for BLACS."""
custom_paths = {
'files': [fil for filptrn in ["blacs", "blacsCinit", "blacsF77init"]
for fil in ["lib/lib%s.a" % filptrn,
"lib/%s_MPI-LINUX-0.a" % filptrn]] +
["bin/xintface"],
'dirs': []
}
super(EB_BLACS, self).sanity_check_step(custom_paths=custom_paths)
| gpl-2.0 |
cjayb/mne-python | tutorials/source-modeling/plot_eeg_no_mri.py | 4 | 2723 | # -*- coding: utf-8 -*-
"""
.. _tut-eeg-fsaverage-source-modeling:
EEG forward operator with a template MRI
========================================
This tutorial explains how to compute the forward operator from EEG data
using the standard template MRI subject ``fsaverage``.
.. caution:: Source reconstruction without an individual T1 MRI from the
subject will be less accurate. Do not over interpret
activity locations which can be off by multiple centimeters.
.. contents:: This tutorial covers:
:local:
:depth: 2
"""
# Authors: Alexandre Gramfort <[email protected]>
# Joan Massich <[email protected]>
#
# License: BSD Style.
import os.path as op
import mne
from mne.datasets import eegbci
from mne.datasets import fetch_fsaverage
# Download fsaverage files
fs_dir = fetch_fsaverage(verbose=True)
subjects_dir = op.dirname(fs_dir)
# The files live in:
subject = 'fsaverage'
trans = 'fsaverage' # MNE has a built-in fsaverage transformation
src = op.join(fs_dir, 'bem', 'fsaverage-ico-5-src.fif')
bem = op.join(fs_dir, 'bem', 'fsaverage-5120-5120-5120-bem-sol.fif')
##############################################################################
# Load the data
# -------------
#
# We use here EEG data from the BCI dataset.
#
# .. note:: See :ref:`plot_montage` to view all the standard EEG montages
# available in MNE-Python.
raw_fname, = eegbci.load_data(subject=1, runs=[6])
raw = mne.io.read_raw_edf(raw_fname, preload=True)
# Clean channel names to be able to use a standard 1005 montage
new_names = dict(
(ch_name,
ch_name.rstrip('.').upper().replace('Z', 'z').replace('FP', 'Fp'))
for ch_name in raw.ch_names)
raw.rename_channels(new_names)
# Read and set the EEG electrode locations
montage = mne.channels.make_standard_montage('standard_1005')
raw.set_montage(montage)
raw.set_eeg_reference(projection=True) # needed for inverse modeling
# Check that the locations of EEG electrodes is correct with respect to MRI
mne.viz.plot_alignment(
raw.info, src=src, eeg=['original', 'projected'], trans=trans,
show_axes=True, mri_fiducials=True, dig='fiducials')
##############################################################################
# Setup source space and compute forward
# --------------------------------------
fwd = mne.make_forward_solution(raw.info, trans=trans, src=src,
bem=bem, eeg=True, mindist=5.0, n_jobs=1)
print(fwd)
# for illustration purposes use fwd to compute the sensitivity map
eeg_map = mne.sensitivity_map(fwd, ch_type='eeg', mode='fixed')
eeg_map.plot(time_label='EEG sensitivity', subjects_dir=subjects_dir,
clim=dict(lims=[5, 50, 100]))
| bsd-3-clause |
artas360/pythran | pythran/tests/cases/periodic_dist.py | 5 | 1259 | #pythran export dist(float [], float[], float[], int, bool, bool, bool)
#runas import numpy as np ; N = 20 ; x = np.arange(0., N, 0.1) ; L = 4 ; periodic = True ; dist(x, x, x, L,periodic, periodic, periodic)
#bench import numpy as np ; N = 300 ; x = np.arange(0., N, 0.1) ; L = 4 ; periodic = True ; dist(x, x, x, L,periodic, periodic, periodic)
import numpy as np
def dist(x, y, z, L, periodicX, periodicY, periodicZ):
" ""Computes distances between all particles and places the result in a matrix such that the ij th matrix entry corresponds to the distance between particle i and j"" "
N = len(x)
xtemp = np.tile(x,(N,1))
dx = xtemp - xtemp.T
ytemp = np.tile(y,(N,1))
dy = ytemp - ytemp.T
ztemp = np.tile(z,(N,1))
dz = ztemp - ztemp.T
# Particles 'feel' each other across the periodic boundaries
if periodicX:
dx[dx>L/2]=dx[dx > L/2]-L
dx[dx<-L/2]=dx[dx < -L/2]+L
if periodicY:
dy[dy>L/2]=dy[dy>L/2]-L
dy[dy<-L/2]=dy[dy<-L/2]+L
if periodicZ:
dz[dz>L/2]=dz[dz>L/2]-L
dz[dz<-L/2]=dz[dz<-L/2]+L
# Total Distances
d = np.sqrt(dx**2+dy**2+dz**2)
# Mark zero entries with negative 1 to avoid divergences
d[d==0] = -1
return d, dx, dy, dz
| bsd-3-clause |
emedvedev/st2 | st2actions/tests/unit/test_async_runner.py | 8 | 1802 | # Licensed to the StackStorm, Inc ('StackStorm') under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
try:
import simplejson as json
except:
import json
from st2actions.runners import AsyncActionRunner
from st2common.constants.action import (LIVEACTION_STATUS_RUNNING)
RAISE_PROPERTY = 'raise'
def get_runner():
return AsyncTestRunner()
class AsyncTestRunner(AsyncActionRunner):
def __init__(self):
super(AsyncTestRunner, self).__init__(runner_id='1')
self.pre_run_called = False
self.run_called = False
self.post_run_called = False
def pre_run(self):
self.pre_run_called = True
def run(self, action_params):
self.run_called = True
result = {}
if self.runner_parameters.get(RAISE_PROPERTY, False):
raise Exception('Raise required.')
else:
result = {
'ran': True,
'action_params': action_params
}
return (LIVEACTION_STATUS_RUNNING, json.dumps(result), {'id': 'foo'})
def post_run(self, status, result):
self.post_run_called = True
| apache-2.0 |
SectorLabs/django-postgres-extra | psqlextra/partitioning/manager.py | 1 | 4538 | from typing import List, Optional, Tuple
from django.db import connections
from psqlextra.models import PostgresPartitionedModel
from .config import PostgresPartitioningConfig
from .constants import AUTO_PARTITIONED_COMMENT
from .error import PostgresPartitioningError
from .partition import PostgresPartition
from .plan import PostgresModelPartitioningPlan, PostgresPartitioningPlan
PartitionList = List[Tuple[PostgresPartitionedModel, List[PostgresPartition]]]
class PostgresPartitioningManager:
"""Helps managing partitions by automatically creating new partitions and
deleting old ones according to the configuration."""
def __init__(self, configs: List[PostgresPartitioningConfig]) -> None:
self.configs = configs
self._validate_configs(self.configs)
def plan(
self,
skip_create: bool = False,
skip_delete: bool = False,
using: Optional[str] = None,
) -> PostgresPartitioningPlan:
"""Plans which partitions should be deleted/created.
Arguments:
skip_create:
If set to True, no partitions will be marked
for creation, regardless of the configuration.
skip_delete:
If set to True, no partitions will be marked
for deletion, regardless of the configuration.
using:
Name of the database connection to use.
Returns:
A plan describing what partitions would be created
and deleted if the plan is applied.
"""
model_plans = []
for config in self.configs:
model_plan = self._plan_for_config(
config,
skip_create=skip_create,
skip_delete=skip_delete,
using=using,
)
if not model_plan:
continue
model_plans.append(model_plan)
return PostgresPartitioningPlan(model_plans)
def find_config_for_model(
self, model: PostgresPartitionedModel
) -> Optional[PostgresPartitioningConfig]:
"""Finds the partitioning config for the specified model."""
return next(
(config for config in self.configs if config.model == model), None
)
def _plan_for_config(
self,
config: PostgresPartitioningConfig,
skip_create: bool = False,
skip_delete: bool = False,
using: Optional[str] = None,
) -> Optional[PostgresModelPartitioningPlan]:
"""Creates a partitioning plan for one partitioning config."""
connection = connections[using or "default"]
table = self._get_partitioned_table(connection, config.model)
model_plan = PostgresModelPartitioningPlan(config)
if not skip_create:
for partition in config.strategy.to_create():
if table.partition_by_name(name=partition.name()):
continue
model_plan.creations.append(partition)
if not skip_delete:
for partition in config.strategy.to_delete():
introspected_partition = table.partition_by_name(
name=partition.name()
)
if not introspected_partition:
break
if introspected_partition.comment != AUTO_PARTITIONED_COMMENT:
continue
model_plan.deletions.append(partition)
if len(model_plan.creations) == 0 and len(model_plan.deletions) == 0:
return None
return model_plan
@staticmethod
def _get_partitioned_table(connection, model: PostgresPartitionedModel):
with connection.cursor() as cursor:
table = connection.introspection.get_partitioned_table(
cursor, model._meta.db_table
)
if not table:
raise PostgresPartitioningError(
f"Model {model.__name__}, with table "
f"{model._meta.db_table} does not exists in the "
"database. Did you run `python manage.py migrate`?"
)
return table
@staticmethod
def _validate_configs(configs: List[PostgresPartitioningConfig]):
"""Ensures there is only one config per model."""
models = set([config.model.__name__ for config in configs])
if len(models) != len(configs):
raise PostgresPartitioningError(
"Only one partitioning config per model is allowed"
)
| mit |
nealegibson/Infer | src/GPKernelFunctions.py | 1 | 10701 | """
Useful (non-periodic) GP Kernel Functions
"""
import numpy as np
import scipy.spatial
from scipy.special import gamma,kv
###################################################################################################
#Exponential class
def SqExponentialRad(X,Y,theta,white_noise=False):
"""
Standard squared exponential function (just one length parameter).
k(x,x') = th0^2 * exp( - 1/2*th1^2 Sum_i * (x_i-x_i')^2 ) [+ sigma^2 delta_']
theta[0] - sqrt maximum covariance parameter - gives 1sigma prior dist size
theta[1] - inverse length scale (1/2l^2)
theta[2] - white noise standard deviation if white_noise=True
X,Y - input matricies
"""
#Calculate distance matrix without scaling
D2 = EuclideanDist2(X,Y)
#Calculate covariance matrix
K = theta[0]**2 * np.exp( - D2 / (2*(theta[1]**2)) )
#Add white noise
if white_noise == True: K += (np.identity(X[:,0].size) * (theta[2]**2))
return np.matrix(K)
def SqExponentialARD(X,Y,theta,white_noise=False):
"""
ARD squared exponential function
(with n inverse length scale for each input in X vectors).
k(x,x') = th0^2 * exp( -Sum_i n_i * (x_i-x_i')^2 ) [+ sigma^2 delta_']
theta[0] - sqrt maximum covariance parameter - gives 1sigma prior dist size
theta[1:-1] - inverse length scales (1/2l_i^2) for each input vector in X,Y
theta[-1] - white noise standard deviation if white_noise=True
X,Y - input matrices
"""
#Calculate distance matrix with scaling - multiply each coord by sqrt(eta)
#n(x_i-x_j)^2 = (sqrt(n)*x_i-sqrt(n)*x_j)^2
D2 = EuclideanDist2(X,Y,v=np.sqrt(np.abs(np.array(theta[1:-1]))))
#Calculate covariance matrix (leave out the factor of 1/2)
K = theta[0]**2 * np.exp( -D2 )
#Add white noise
if white_noise == True: K += np.identity(X[:,0].size) * (theta[-1]**2)
return np.matrix(K)
def SqExponential(X,Y,theta,white_noise=False):
"""
ARD squared exponential function
(with n length scales for each input in X vectors).
k(x,x') = th0^2 * exp( -Sum_i n_i * (x_i-x_i')^2 ) [+ sigma^2 delta_']
theta[0] - sqrt maximum covariance parameter - gives 1sigma prior dist size
theta[1:-1] - inverse length scales (1/2l_i^2) for each input vector in X,Y
theta[-1] - white noise standard deviation if white_noise=True
X,Y - input matricies
"""
#Calculate distance matrix with scaling - multiply each coord by sqrt(eta)
#n(x_i-x_j)^2 = (sqrt(n)*x_i-sqrt(n)*x_j)^2
D2 = EuclideanDist2(X,Y,v=1./(np.array(theta[1:-1])*np.sqrt(2.)))
#Calculate covariance matrix (leave out the factor of 1/2)
K = theta[0]**2 * np.exp( -D2 )
#Add white noise
if white_noise == True: K += np.identity(X[:,0].size) * (theta[-1]**2)
return np.matrix(K)
def ExponentialRad(X,Y,theta,white_noise=False):
"""
Standard Exponential function (with single length scale).
k(x,x') = th0^2 * exp( - 1/2*th1^2 Sum_i * (x_i-x_i') ) [+ sigma^2 delta_']
theta[0] - sqrt maximum covariance parameter - gives 1sigma prior dist size
theta[1] - inverse length scale (1/2l^2)
theta[2] - white noise standard deviation if white_noise=True
X,Y - input matricies
"""
#Calculate distance matrix with scaling
D = EuclideanDist(X,Y,v=None)
#Calculate covariance matrix
K = theta[0]**2 * np.exp( - D / (2*(theta[1]**2)) )
#Add white noise
if white_noise == True: K += np.identity(X[:,0].size) * (theta[-1]**2)
return np.matrix(K)
def ExponentialARD(X,Y,theta,white_noise=False):
"""
ARD squared exponential function
(with n inverse length vector for each input in X vectors).
k(x,x') = th0^2 * exp( -Sum_i n_i * (x_i-x_i') ) [+ sigma^2 delta_']
theta[0] - sqrt maximum covariance parameter - gives 1sigma prior dist size
theta[1:-1] - inverse length scales (1/2l_i^2) for each input vector in X,Y
theta[-1] - white noise standard deviation if white_noise=True
X,Y - input matricies
"""
#Calculate distance matrix with scaling
D = EuclideanDist(X,Y,v=np.sqrt(np.abs(np.array(theta[1:-1]))))
#Calculate covariance matrix
K = theta[0]**2 * np.exp( - D / 2 )
#Add white noise
if white_noise == True: K += np.identity(X[:,0].size) * (theta[-1]**2)
return np.matrix(K)
####################################################################################################
def SqExponentialSum(X,Y,theta,white_noise=False):
"""
Squared exponential function with independent basis components
(with height scale and inverse length scale for each input in X vectors).
k(x,x') = Sum th_i^2 * exp( n_i * (x_i-x_i')^2 ) [+ sigma^2 delta_']
theta[0+n,1+n] - height scale, inverse length scale pairs
theta[-1] - white noise standard deviation if white_noise=True
X,Y - input matricies
"""
#Calculate distance matrix with scaling - multiply each coord by sqrt(eta)
m,n = X.shape
#ensure inputs are matrices - otherwise EuclideanDist fails for 1D
assert type(X) is np.matrixlib.defmatrix.matrix
assert type(Y) is np.matrixlib.defmatrix.matrix
K = np.zeros((m,m))
#sum over the input vectors
for i in range(n):
D2 = EuclideanDist2( np.mat(X[:,i]),np.mat(Y[:,i]),v=[np.sqrt(np.abs(theta[2*i+1]))])
K += theta[2*i]**2 * np.exp( -D2 )
#Add white noise
if white_noise == True: K += np.identity(m) * (theta[-1]**2)
return np.matrix(K)
####################################################################################################
#Rational quadratic - not tested
def RationalQuadRad(X, Y, theta, white_noise = False):
"""
Rational quadratic kernel (radial) - not fully tested
k(x,x') = th0^2 * (1 + (x_i-x_i')^2/2th1*th2^2)^-th1) [+ th5^2 delta_']
theta[0] - sqrt maximum covariance parameter - gives 1sigma prior dist size
theta[1] - alpha
theta[2] - length scale
theta[3] - white noise standard deviation if white_noise=True
"""
# Calculate distance matrix without scaling
D2 = EuclideanDist2(X, Y)
# Calculate covariance matrix
K = theta[0]**2 * (1 + (D2 / (2.*theta[1]*(theta[2]**2.)) ) )**(-theta[1])
# Add white noise
if white_noise == True: K += (np.identity(X[:,0].size) * (theta[3]**2))
return np.matrix(K)
####################################################################################################
#Matern class of covariance functions - not tested
def MaternRad(X,Y,theta,white_noise=False):
"""
Matern covariance kernel - not properly tested!
Radial - ie same length scales in all inputs
"""
#Calculate distance matrix with (global) scaling
D = EuclideanDist(X,Y) / theta[2]
#Calculate covariance matrix from matern function
v = theta[1]
K = 2.**(1.-v) / gamma(v) * (np.sqrt(2.*v)*D)**v * kv(v,np.sqrt(2.*v)*D)
#diagonal terms should be set to one (when D2 = 0, kv diverges but full function = 1)
#this only works for square 'covariance' matrix...
#ie fails for blocks..;
# K[np.where(np.identity(X[:,0].size)==1)] = 1.
#this should work, but again needs tested properly...
K[np.where(D==0.)] = 1.
#now multiply by an overall scale function
K = K * theta[0]**2
#Add white noise
if white_noise == True: K += np.identity(X[:,0].size) * (theta[3]**2)
return np.matrix(K)
#matern kernel for v=3/2 fixed - rougher than sq exponential
def MAT_Kernel32(X,Y,theta,white_noise=False):
"""
Matern covariance kernel for 3/2 shape parameter
theta[0] - overall scale param - ie prior covariance
theta[1] - length scale
theta[2] - white noise
"""
D = EuclideanDist(X,Y) / theta[1]
K = theta[0]**2 * (1 + np.sqrt(3.)*D) * np.exp(-np.sqrt(3.)*D)
if white_noise == True: K += np.identity(X[:,0].size) * (theta[2]**2)
return np.matrix(K)
#matern kernel for v=5/2 fixed - rougher than sq exponential, smoother than above
#3/2 process
def MAT_Kernel52(X,Y,theta,white_noise=False):
"""
Matern covariance kernel for 5/2 shape parameter
theta[0] - overall scale param - ie prior covariance
theta[1] - length scale
theta[2] - white noise
"""
D = EuclideanDist(X,Y) / theta[1]
K = theta[0]**2 * (1 + np.sqrt(5.)*D + 5./3.*(D**2)) * np.exp(-np.sqrt(5.)*D)
if white_noise == True: K += np.identity(X[:,0].size) * (theta[2]**2)
return np.matrix(K)
def MaternARD(X,Y,theta,white_noise=False):
"""
Matern covariance kernel - not fully tested!
different length scales in all inputs
theta[0] - overall scale param - ie prior covariance
theta[1] - shape parameter
theta[2:-1] - length scales
theta[-1] - white noise
"""
#Calculate distance matrix with scaling
D = EuclideanDist(X,Y,v=theta[2:-1])
#Calculate covariance matrix from matern function
v = theta[1]
K = 2**(1.-v) / gamma(v) * (np.sqrt(2*v)*D)**v * kv(v,np.sqrt(2*v)*D)
#diagonal terms should be set to one (when D2 = 0, kv diverges but full function = 1)
#this only works for square 'covariance' matrix...
#ie fails for blocks..;
# K[np.where(np.identity(X[:,0].size)==1)] = 1.
#this should work, but again needs tested properly...
K[np.where(D==0.)] = 1.
#now multiply by an overall scale function
K = K * theta[0]
#Add white noise
if white_noise == True: K += np.identity(X[:,0].size) * (theta[-1]**2)
return np.matrix(K)
####################################################################################################
#Auxilary functions to compute euclidean distances
def EuclideanDist(X1,X2,v=None):
"""
Calculate the distance matrix for 2 data matricies
X1 - n x D input matrix
X2 - m x D input matrix
v - weight vector
D - output an n x m matrix of dist = sqrt( Sum_i (1/l_i^2) * (x_i - x'_i)^2 )
"""
#ensure inputs are in matrix form
X1,X2 = np.matrix(X1), np.matrix(X2)
if v is not None: #scale each coord in Xs by the weight vector
V = np.abs(np.matrix( np.diag(v) ))
X1 = X1 * V
X2 = X2 * V
#calculate sqaured euclidean distance (after weighting)
D = scipy.spatial.distance.cdist( X1, X2, 'euclidean')
return D
def EuclideanDist2(X1,X2,v=None):
"""
Calculate the distance matrix squared for 2 data matricies
X1 - n x D input matrix
X2 - m x D input matrix
v - weight vector
D2 - output an n x m matrix of dist^2 = Sum_i (1/l_i^2) * (x_i - x'_i)^2
"""
#ensure inputs are in matrix form
X1,X2 = np.matrix(X1), np.matrix(X2)
if v is not None: #scale each coord in Xs by the weight vector
V = np.abs(np.matrix( np.diag(v) ))
X1 = X1 * V
X2 = X2 * V
#calculate sqaured euclidean distance (after weighting)
D2 = scipy.spatial.distance.cdist( X1, X2, 'sqeuclidean' )
return D2
####################################################################################################
| gpl-3.0 |
dsajkl/reqiop | common/lib/xmodule/xmodule/progress.py | 127 | 4896 | '''
Progress class for modules. Represents where a student is in a module.
Useful things to know:
- Use Progress.to_js_status_str() to convert a progress into a simple
status string to pass to js.
- Use Progress.to_js_detail_str() to convert a progress into a more detailed
string to pass to js.
In particular, these functions have a canonical handing of None.
For most subclassing needs, you should only need to reimplement
frac() and __str__().
'''
import numbers
class Progress(object):
'''Represents a progress of a/b (a out of b done)
a and b must be numeric, but not necessarily integer, with
0 <= a <= b and b > 0.
Progress can only represent Progress for modules where that makes sense. Other
modules (e.g. html) should return None from get_progress().
TODO: add tag for module type? Would allow for smarter merging.
'''
def __init__(self, a, b):
'''Construct a Progress object. a and b must be numbers, and must have
0 <= a <= b and b > 0
'''
# Want to do all checking at construction time, so explicitly check types
if not (isinstance(a, numbers.Number) and
isinstance(b, numbers.Number)):
raise TypeError('a and b must be numbers. Passed {0}/{1}'.format(a, b))
if a > b:
a = b
if a < 0:
a = 0
if b <= 0:
raise ValueError('fraction a/b = {0}/{1} must have b > 0'.format(a, b))
self._a = a
self._b = b
def frac(self):
''' Return tuple (a,b) representing progress of a/b'''
return (self._a, self._b)
def percent(self):
''' Returns a percentage progress as a float between 0 and 100.
subclassing note: implemented in terms of frac(), assumes sanity
checking is done at construction time.
'''
(a, b) = self.frac()
return 100.0 * a / b
def started(self):
''' Returns True if fractional progress is greater than 0.
subclassing note: implemented in terms of frac(), assumes sanity
checking is done at construction time.
'''
return self.frac()[0] > 0
def inprogress(self):
''' Returns True if fractional progress is strictly between 0 and 1.
subclassing note: implemented in terms of frac(), assumes sanity
checking is done at construction time.
'''
(a, b) = self.frac()
return a > 0 and a < b
def done(self):
''' Return True if this represents done.
subclassing note: implemented in terms of frac(), assumes sanity
checking is done at construction time.
'''
(a, b) = self.frac()
return a == b
def ternary_str(self):
''' Return a string version of this progress: either
"none", "in_progress", or "done".
subclassing note: implemented in terms of frac()
'''
(a, b) = self.frac()
if a == 0:
return "none"
if a < b:
return "in_progress"
return "done"
def __eq__(self, other):
''' Two Progress objects are equal if they have identical values.
Implemented in terms of frac()'''
if not isinstance(other, Progress):
return False
(a, b) = self.frac()
(a2, b2) = other.frac()
return a == a2 and b == b2
def __ne__(self, other):
''' The opposite of equal'''
return not self.__eq__(other)
def __str__(self):
''' Return a string representation of this string.
subclassing note: implemented in terms of frac().
'''
(a, b) = self.frac()
return "{0}/{1}".format(a, b)
@staticmethod
def add_counts(a, b):
'''Add two progress indicators, assuming that each represents items done:
(a / b) + (c / d) = (a + c) / (b + d).
If either is None, returns the other.
'''
if a is None:
return b
if b is None:
return a
# get numerators + denominators
(n, d) = a.frac()
(n2, d2) = b.frac()
return Progress(n + n2, d + d2)
@staticmethod
def to_js_status_str(progress):
'''
Return the "status string" version of the passed Progress
object that should be passed to js. Use this function when
sending Progress objects to js to limit dependencies.
'''
if progress is None:
return "0"
return progress.ternary_str()
@staticmethod
def to_js_detail_str(progress):
'''
Return the "detail string" version of the passed Progress
object that should be passed to js. Use this function when
passing Progress objects to js to limit dependencies.
'''
if progress is None:
return "0"
return str(progress)
| agpl-3.0 |
tdr130/pupy | pupy/modules/socks5proxy.py | 6 | 7573 | # -*- coding: UTF8 -*-
# --------------------------------------------------------------
# Copyright (c) 2015, Nicolas VERDIER ([email protected])
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE
# --------------------------------------------------------------
#RFC @https://www.ietf.org/rfc/rfc1928.txt
from pupylib.PupyModule import *
import StringIO
import pupylib.utils
import SocketServer
import threading
import socket
import logging
import struct
import traceback
import time
__class_name__="Socks5Proxy"
CODE_SUCCEEDED='\x00'
CODE_GENERAL_SRV_FAILURE='\x01'
CODE_CONN_NOT_ALLOWED='\x02'
CODE_NET_NOT_REACHABLE='\x03'
CODE_HOST_UNREACHABLE='\x04'
CODE_CONN_REFUSED='\x05'
CODE_TTL_EXPIRED='\x06'
CODE_COMMAND_NOT_SUPPORTED='\x07'
CODE_ADDRESS_TYPE_NOT_SUPPORTED='\x08'
CODE_UNASSIGNED='\x09'
class SocketPiper(threading.Thread):
def __init__(self, read_sock, write_sock):
threading.Thread.__init__(self)
self.daemon=True
self.read_sock=read_sock
self.write_sock=write_sock
def run(self):
try:
self.read_sock.setblocking(0)
while True:
data=""
try:
data+=self.read_sock.recv(1000000)
if not data:
break
except Exception as e:
if e[0]==9:#errno connection closed
break
if not data:
time.sleep(0.05)
continue
self.write_sock.sendall(data)
except Exception as e:
logging.debug("error in socket piper: %s"%str(traceback.format_exc()))
finally:
try:
self.write_sock.shutdown(socket.SHUT_RDWR)
self.write_sock.close()
except Exception:
pass
try:
self.read_sock.shutdown(socket.SHUT_RDWR)
self.read_sock.close()
except Exception:
pass
logging.debug("piper finished")
class Socks5RequestHandler(SocketServer.BaseRequestHandler):
def _socks_response(self, code, terminate=False):
ip="".join([chr(int(i)) for i in self.server.server_address[0].split(".")])
port=struct.pack("!H",self.server.server_address[1])
self.request.sendall("\x05"+code+"\x00"+"\x01"+ip+port)
if terminate:
self.request.shutdown(socket.SHUT_RDWR)
self.request.close()
def handle(self):
self.request.settimeout(5)
VER=self.request.recv(1)
NMETHODS=self.request.recv(1)
METHODS=self.request.recv(int(struct.unpack("!B",NMETHODS)[0]))
"""
o X'00' NO AUTHENTICATION REQUIRED
o X'01' GSSAPI
o X'02' USERNAME/PASSWORD
o X'03' to X'7F' IANA ASSIGNED
o X'80' to X'FE' RESERVED FOR PRIVATE METHODS
o X'FF' NO ACCEPTABLE METHODS
"""
#for now only no authentication is supported :
self.request.sendall("\x05\x00")
VER=self.request.recv(1)
if VER!="\x05":
logging.debug("receiving unsuported socks version: %s"%VER.encode('hex'))
self._socks_response(CODE_GENERAL_SRV_FAILURE, terminate=True)
return
CMD=self.request.recv(1)
if CMD!="\x01": # we only support CONNECT for now
logging.debug("receiving unsuported socks CMD: %s"%CMD.encode('hex'))
self._socks_response(CODE_COMMAND_NOT_SUPPORTED, terminate=True)
return
RSV=self.request.recv(1)
DST_ADDR=None
DST_PORT=None
ATYP=self.request.recv(1)
if ATYP=="\x01":
DST_ADDR=".".join([str(ord(x)) for x in self.request.recv(4)])
DST_PORT=struct.unpack("!H",self.request.recv(2))[0]
elif ATYP=="\x03":
DOMAIN_LEN=int(struct.unpack("!B",self.request.recv(1))[0])
DST_ADDR=self.request.recv(DOMAIN_LEN)
DST_PORT=struct.unpack("!H",self.request.recv(2))[0]
else: #TODO: ipv6
logging.debug("atyp not supported: %s"%ATYP.encode('hex'))
self._socks_response(CODE_ADDRESS_TYPE_NOT_SUPPORTED, terminate=True)
return
#now we have all we need, we can open the socket proxyfied through rpyc :)
logging.debug("connecting to %s:%s through the rpyc client"%(DST_ADDR,DST_PORT))
rsocket_mod=self.server.rpyc_client.conn.modules.socket
rsocket=rsocket_mod.socket(rsocket_mod.AF_INET,rsocket_mod.SOCK_STREAM)
rsocket.settimeout(5)
try:
rsocket.connect((DST_ADDR, DST_PORT))
except Exception as e:
logging.debug("error: %s"%e)
if e[0]==10060:
logging.debug("unreachable !")
self._socks_response(CODE_HOST_UNREACHABLE, terminate=True)
else:
self._socks_response(CODE_NET_NOT_REACHABLE, terminate=True)
return
self._socks_response(CODE_SUCCEEDED)
logging.debug("connection succeeded !")
#self.request.settimeout(30)
#rsocket.settimeout(30)
sp1=SocketPiper(self.request, rsocket)
sp2=SocketPiper(rsocket, self.request)
sp1.start()
sp2.start()
sp1.join()
sp2.join()
logging.debug("conn to %s:%s closed"%(DST_ADDR,DST_PORT))
class Socks5Server(SocketServer.TCPServer):
allow_reuse_address = True
def __init__(self, server_address, RequestHandlerClass, bind_and_activate=True, rpyc_client=None):
self.rpyc_client=rpyc_client
SocketServer.TCPServer.__init__(self, server_address, RequestHandlerClass, bind_and_activate)
class ThreadedSocks5Server(SocketServer.ThreadingMixIn, Socks5Server):
pass
class Socks5Proxy(PupyModule):
""" start a socks5 proxy gooing through a client """
max_clients=1
unique_instance=True
daemon=True
server=None
def init_argparse(self):
self.arg_parser = PupyArgumentParser(prog='socks5proxy', description=self.__doc__)
self.arg_parser.add_argument('-p', '--port', default='1080')
self.arg_parser.add_argument('action', choices=['start','stop'])
def stop_daemon(self):
self.success("shuting down socks server ...")
if self.server:
self.server.shutdown()
del self.server
self.success("socks server shut down")
else:
self.error("server is None")
def run(self, args):
if args.action=="start":
if self.server is None:
self.success("starting server ...")
self.server = ThreadedSocks5Server(("127.0.0.1", int(args.port)), Socks5RequestHandler, rpyc_client=self.client)
t=threading.Thread(target=self.server.serve_forever)
t.daemon=True
t.start()
self.success("socks5 server started on 127.0.0.1:%s"%args.port)
else:
self.error("socks5 server is already started !")
elif args.action=="stop":
if self.server:
self.job.stop()
del self.job
self.success("socks5 server stopped !")
else:
self.error("socks5 server is already stopped")
| bsd-3-clause |
darshanthaker/nupic | external/linux32/lib/python2.6/site-packages/matplotlib/afm.py | 69 | 15057 | """
This is a python interface to Adobe Font Metrics Files. Although a
number of other python implementations exist (and may be more complete
than mine) I decided not to go with them because either they were
either
1) copyrighted or used a non-BSD compatible license
2) had too many dependencies and I wanted a free standing lib
3) Did more than I needed and it was easier to write my own than
figure out how to just get what I needed from theirs
It is pretty easy to use, and requires only built-in python libs::
>>> from afm import AFM
>>> fh = file('ptmr8a.afm')
>>> afm = AFM(fh)
>>> afm.string_width_height('What the heck?')
(6220.0, 683)
>>> afm.get_fontname()
'Times-Roman'
>>> afm.get_kern_dist('A', 'f')
0
>>> afm.get_kern_dist('A', 'y')
-92.0
>>> afm.get_bbox_char('!')
[130, -9, 238, 676]
>>> afm.get_bbox_font()
[-168, -218, 1000, 898]
AUTHOR:
John D. Hunter <[email protected]>
"""
import sys, os, re
from _mathtext_data import uni2type1
#Convert string the a python type
_to_int = int
_to_float = float
_to_str = str
def _to_list_of_ints(s):
s = s.replace(',', ' ')
return [_to_int(val) for val in s.split()]
def _to_list_of_floats(s):
return [_to_float(val) for val in s.split()]
def _to_bool(s):
if s.lower().strip() in ('false', '0', 'no'): return False
else: return True
def _sanity_check(fh):
"""
Check if the file at least looks like AFM.
If not, raise :exc:`RuntimeError`.
"""
# Remember the file position in case the caller wants to
# do something else with the file.
pos = fh.tell()
try:
line = fh.readline()
finally:
fh.seek(pos, 0)
# AFM spec, Section 4: The StartFontMetrics keyword [followed by a
# version number] must be the first line in the file, and the
# EndFontMetrics keyword must be the last non-empty line in the
# file. We just check the first line.
if not line.startswith('StartFontMetrics'):
raise RuntimeError('Not an AFM file')
def _parse_header(fh):
"""
Reads the font metrics header (up to the char metrics) and returns
a dictionary mapping *key* to *val*. *val* will be converted to the
appropriate python type as necessary; eg:
* 'False'->False
* '0'->0
* '-168 -218 1000 898'-> [-168, -218, 1000, 898]
Dictionary keys are
StartFontMetrics, FontName, FullName, FamilyName, Weight,
ItalicAngle, IsFixedPitch, FontBBox, UnderlinePosition,
UnderlineThickness, Version, Notice, EncodingScheme, CapHeight,
XHeight, Ascender, Descender, StartCharMetrics
"""
headerConverters = {
'StartFontMetrics': _to_float,
'FontName': _to_str,
'FullName': _to_str,
'FamilyName': _to_str,
'Weight': _to_str,
'ItalicAngle': _to_float,
'IsFixedPitch': _to_bool,
'FontBBox': _to_list_of_ints,
'UnderlinePosition': _to_int,
'UnderlineThickness': _to_int,
'Version': _to_str,
'Notice': _to_str,
'EncodingScheme': _to_str,
'CapHeight': _to_float, # Is the second version a mistake, or
'Capheight': _to_float, # do some AFM files contain 'Capheight'? -JKS
'XHeight': _to_float,
'Ascender': _to_float,
'Descender': _to_float,
'StdHW': _to_float,
'StdVW': _to_float,
'StartCharMetrics': _to_int,
'CharacterSet': _to_str,
'Characters': _to_int,
}
d = {}
while 1:
line = fh.readline()
if not line: break
line = line.rstrip()
if line.startswith('Comment'): continue
lst = line.split( ' ', 1 )
#print '%-s\t%-d line :: %-s' % ( fh.name, len(lst), line )
key = lst[0]
if len( lst ) == 2:
val = lst[1]
else:
val = ''
#key, val = line.split(' ', 1)
try: d[key] = headerConverters[key](val)
except ValueError:
print >>sys.stderr, 'Value error parsing header in AFM:', key, val
continue
except KeyError:
print >>sys.stderr, 'Found an unknown keyword in AFM header (was %s)' % key
continue
if key=='StartCharMetrics': return d
raise RuntimeError('Bad parse')
def _parse_char_metrics(fh):
"""
Return a character metric dictionary. Keys are the ASCII num of
the character, values are a (*wx*, *name*, *bbox*) tuple, where
*wx* is the character width, *name* is the postscript language
name, and *bbox* is a (*llx*, *lly*, *urx*, *ury*) tuple.
This function is incomplete per the standard, but thus far parses
all the sample afm files tried.
"""
ascii_d = {}
name_d = {}
while 1:
line = fh.readline()
if not line: break
line = line.rstrip()
if line.startswith('EndCharMetrics'): return ascii_d, name_d
vals = line.split(';')[:4]
if len(vals) !=4 : raise RuntimeError('Bad char metrics line: %s' % line)
num = _to_int(vals[0].split()[1])
wx = _to_float(vals[1].split()[1])
name = vals[2].split()[1]
bbox = _to_list_of_ints(vals[3][2:])
# Workaround: If the character name is 'Euro', give it the corresponding
# character code, according to WinAnsiEncoding (see PDF Reference).
if name == 'Euro':
num = 128
if num != -1:
ascii_d[num] = (wx, name, bbox)
name_d[name] = (wx, bbox)
raise RuntimeError('Bad parse')
def _parse_kern_pairs(fh):
"""
Return a kern pairs dictionary; keys are (*char1*, *char2*) tuples and
values are the kern pair value. For example, a kern pairs line like
``KPX A y -50``
will be represented as::
d[ ('A', 'y') ] = -50
"""
line = fh.readline()
if not line.startswith('StartKernPairs'):
raise RuntimeError('Bad start of kern pairs data: %s'%line)
d = {}
while 1:
line = fh.readline()
if not line: break
line = line.rstrip()
if len(line)==0: continue
if line.startswith('EndKernPairs'):
fh.readline() # EndKernData
return d
vals = line.split()
if len(vals)!=4 or vals[0]!='KPX':
raise RuntimeError('Bad kern pairs line: %s'%line)
c1, c2, val = vals[1], vals[2], _to_float(vals[3])
d[(c1,c2)] = val
raise RuntimeError('Bad kern pairs parse')
def _parse_composites(fh):
"""
Return a composites dictionary. Keys are the names of the
composites. Values are a num parts list of composite information,
with each element being a (*name*, *dx*, *dy*) tuple. Thus a
composites line reading:
CC Aacute 2 ; PCC A 0 0 ; PCC acute 160 170 ;
will be represented as::
d['Aacute'] = [ ('A', 0, 0), ('acute', 160, 170) ]
"""
d = {}
while 1:
line = fh.readline()
if not line: break
line = line.rstrip()
if len(line)==0: continue
if line.startswith('EndComposites'):
return d
vals = line.split(';')
cc = vals[0].split()
name, numParts = cc[1], _to_int(cc[2])
pccParts = []
for s in vals[1:-1]:
pcc = s.split()
name, dx, dy = pcc[1], _to_float(pcc[2]), _to_float(pcc[3])
pccParts.append( (name, dx, dy) )
d[name] = pccParts
raise RuntimeError('Bad composites parse')
def _parse_optional(fh):
"""
Parse the optional fields for kern pair data and composites
return value is a (*kernDict*, *compositeDict*) which are the
return values from :func:`_parse_kern_pairs`, and
:func:`_parse_composites` if the data exists, or empty dicts
otherwise
"""
optional = {
'StartKernData' : _parse_kern_pairs,
'StartComposites' : _parse_composites,
}
d = {'StartKernData':{}, 'StartComposites':{}}
while 1:
line = fh.readline()
if not line: break
line = line.rstrip()
if len(line)==0: continue
key = line.split()[0]
if key in optional: d[key] = optional[key](fh)
l = ( d['StartKernData'], d['StartComposites'] )
return l
def parse_afm(fh):
"""
Parse the Adobe Font Metics file in file handle *fh*. Return value
is a (*dhead*, *dcmetrics*, *dkernpairs*, *dcomposite*) tuple where
*dhead* is a :func:`_parse_header` dict, *dcmetrics* is a
:func:`_parse_composites` dict, *dkernpairs* is a
:func:`_parse_kern_pairs` dict (possibly {}), and *dcomposite* is a
:func:`_parse_composites` dict (possibly {})
"""
_sanity_check(fh)
dhead = _parse_header(fh)
dcmetrics_ascii, dcmetrics_name = _parse_char_metrics(fh)
doptional = _parse_optional(fh)
return dhead, dcmetrics_ascii, dcmetrics_name, doptional[0], doptional[1]
class AFM:
def __init__(self, fh):
"""
Parse the AFM file in file object *fh*
"""
(dhead, dcmetrics_ascii, dcmetrics_name, dkernpairs, dcomposite) = \
parse_afm(fh)
self._header = dhead
self._kern = dkernpairs
self._metrics = dcmetrics_ascii
self._metrics_by_name = dcmetrics_name
self._composite = dcomposite
def get_bbox_char(self, c, isord=False):
if not isord: c=ord(c)
wx, name, bbox = self._metrics[c]
return bbox
def string_width_height(self, s):
"""
Return the string width (including kerning) and string height
as a (*w*, *h*) tuple.
"""
if not len(s): return 0,0
totalw = 0
namelast = None
miny = 1e9
maxy = 0
for c in s:
if c == '\n': continue
wx, name, bbox = self._metrics[ord(c)]
l,b,w,h = bbox
# find the width with kerning
try: kp = self._kern[ (namelast, name) ]
except KeyError: kp = 0
totalw += wx + kp
# find the max y
thismax = b+h
if thismax>maxy: maxy = thismax
# find the min y
thismin = b
if thismin<miny: miny = thismin
return totalw, maxy-miny
def get_str_bbox_and_descent(self, s):
"""
Return the string bounding box
"""
if not len(s): return 0,0,0,0
totalw = 0
namelast = None
miny = 1e9
maxy = 0
left = 0
if not isinstance(s, unicode):
s = s.decode()
for c in s:
if c == '\n': continue
name = uni2type1.get(ord(c), 'question')
try:
wx, bbox = self._metrics_by_name[name]
except KeyError:
name = 'question'
wx, bbox = self._metrics_by_name[name]
l,b,w,h = bbox
if l<left: left = l
# find the width with kerning
try: kp = self._kern[ (namelast, name) ]
except KeyError: kp = 0
totalw += wx + kp
# find the max y
thismax = b+h
if thismax>maxy: maxy = thismax
# find the min y
thismin = b
if thismin<miny: miny = thismin
return left, miny, totalw, maxy-miny, -miny
def get_str_bbox(self, s):
"""
Return the string bounding box
"""
return self.get_str_bbox_and_descent(s)[:4]
def get_name_char(self, c, isord=False):
"""
Get the name of the character, ie, ';' is 'semicolon'
"""
if not isord: c=ord(c)
wx, name, bbox = self._metrics[c]
return name
def get_width_char(self, c, isord=False):
"""
Get the width of the character from the character metric WX
field
"""
if not isord: c=ord(c)
wx, name, bbox = self._metrics[c]
return wx
def get_width_from_char_name(self, name):
"""
Get the width of the character from a type1 character name
"""
wx, bbox = self._metrics_by_name[name]
return wx
def get_height_char(self, c, isord=False):
"""
Get the height of character *c* from the bounding box. This
is the ink height (space is 0)
"""
if not isord: c=ord(c)
wx, name, bbox = self._metrics[c]
return bbox[-1]
def get_kern_dist(self, c1, c2):
"""
Return the kerning pair distance (possibly 0) for chars *c1*
and *c2*
"""
name1, name2 = self.get_name_char(c1), self.get_name_char(c2)
return self.get_kern_dist_from_name(name1, name2)
def get_kern_dist_from_name(self, name1, name2):
"""
Return the kerning pair distance (possibly 0) for chars
*name1* and *name2*
"""
try: return self._kern[ (name1, name2) ]
except: return 0
def get_fontname(self):
"Return the font name, eg, 'Times-Roman'"
return self._header['FontName']
def get_fullname(self):
"Return the font full name, eg, 'Times-Roman'"
name = self._header.get('FullName')
if name is None: # use FontName as a substitute
name = self._header['FontName']
return name
def get_familyname(self):
"Return the font family name, eg, 'Times'"
name = self._header.get('FamilyName')
if name is not None:
return name
# FamilyName not specified so we'll make a guess
name = self.get_fullname()
extras = r'(?i)([ -](regular|plain|italic|oblique|bold|semibold|light|ultralight|extra|condensed))+$'
return re.sub(extras, '', name)
def get_weight(self):
"Return the font weight, eg, 'Bold' or 'Roman'"
return self._header['Weight']
def get_angle(self):
"Return the fontangle as float"
return self._header['ItalicAngle']
def get_capheight(self):
"Return the cap height as float"
return self._header['CapHeight']
def get_xheight(self):
"Return the xheight as float"
return self._header['XHeight']
def get_underline_thickness(self):
"Return the underline thickness as float"
return self._header['UnderlineThickness']
def get_horizontal_stem_width(self):
"""
Return the standard horizontal stem width as float, or *None* if
not specified in AFM file.
"""
return self._header.get('StdHW', None)
def get_vertical_stem_width(self):
"""
Return the standard vertical stem width as float, or *None* if
not specified in AFM file.
"""
return self._header.get('StdVW', None)
if __name__=='__main__':
#pathname = '/usr/local/lib/R/afm/'
pathname = '/usr/local/share/fonts/afms/adobe'
for fname in os.listdir(pathname):
fh = file(os.path.join(pathname,fname))
afm = AFM(fh)
w,h = afm.string_width_height('John Hunter is the Man!')
| agpl-3.0 |
nejucomo/sgg | spiralgalaxygame/demiurge.py | 1 | 4151 | import random
from math import sin, pi
from spiralgalaxygame.body import Body, BodyKind
from spiralgalaxygame.geometry import Vector, Circle
from spiralgalaxygame.discdist import DiscreteDistribution
def generate_galaxy_bodies(randgen = random.random,
parentmu = 1000,
parentsigma = 100,
galacticradius = 1000,
spokes = 4,
spin = 3.8,
diffusion = 13.5,
tightness = 0.35):
for i in range(int(random.lognormvariate(parentmu, parentsigma))):
for body in generate_star(randgen, galacticradius, spokes, spin, diffusion, tightness):
yield body
# Notes:
#
# Parameters named u are expected to be uniform random samples on [0, 1).
#
# The "Standard Length Unit" is a convenient unit of measurement of
# game objects, where the smallest ships have a radius close to 1 SLU.
def generate_star(randgen, galacticradius, spokes, spin, diffusion, tightness):
"""randgen is a function which generates uniform random samples [0, 1)."""
(kind, tightnessfactor, minrad, radrange, childmu, childsigma) = select_star_info(randgen())
adjustedtightness = tightness * tightnessfactor
bdfc = select_body_distance(galacticradius, adjustedtightness, randgen())
angle = select_angle(spokes, diffusion, spin, bdfc, randgen())
bodyradius = select_star_radius(minrad, radrange, randgen())
circle = Circle(Vector.from_angle_and_radius(angle, bdfc), bodyradius)
parent = Body(kind, circle)
yield parent
for i in range(int(random.lognormvariate(childmu, childsigma))):
yield generate_child(randgen, circle)
def generate_child(randgen, pcircle):
(kind, solarradius, minrad, radrange) = select_child_info(randgen())
bdfp = pcircle.radius + select_body_distance(solarradius, tigthness = 0.5, u = randgen())
angle = randgen * 2 * pi
center = pcircle.center + Vector.from_angle_and_radius(angle, bdfp)
bodyradius = select_star_radius(minrad, radrange)
return Body(kind, Circle(center, bodyradius))
def select_body_distance(galacticradius, tightness, u):
"""Given galacticradius in SLUs, a tightness parameter, and a u sample, return a distance in SLUs."""
t = sin(0.5 * pi * u) ** tightness
k = (t + u**4) / 2
return galacticradius * k
def select_angle(spokes, diffusion, spin, bdfc, u):
"""Given spokes, diffusion, spin, and bdfc (body distance from core) and a u, return galactic angle."""
return select_base_angle(spokes, diffusion, spin, u) + spin * bdfc
def select_base_angle(spokes, diffusion, u):
factor = spokes * pi
a = sin(factor * u)
b = abs(a) ** diffusion
return u - b / factor
def select_star_radius(minradius, radiusrange, u):
return minradius + radiusrange * u**2
select_star_info = DiscreteDistribution(
# items: (kind, tightnessfactor, minrad, radrange, childmu, childsigma)
# Note: blue and green planets are never parent bodies.
(99, (BodyKind.star_white, 0.9, 100, 100, 0.35, 0.55)),
(60, (BodyKind.star_yellow, 0.85, 80, 200, 0.50, 0.50)),
(40, (BodyKind.star_red, 0.67, 40, 120, 0.20, 0.30)),
(7, (BodyKind.planet_grey, 1.0, 10, 80, -0.50, 0.11)),
(1, (BodyKind.planet_brown, 0.9, 30, 40, -0.40, 0.15)),
(10, (BodyKind.black_hole, 0.4, 1, 10, -0.30, 0.30)),
(17, (BodyKind.dust_cloud, 1.0, 80, 400, -1.00, 0.00)),
(13, (BodyKind.gas_cloud, 0.6, 80, 800, -1.00, 0.00)),
)
select_child_info = DiscreteDistribution(
# items: (kind, solarradius, minrad, radrange)
# Note: dust clouds and gas clouds are never children.
(1, (BodyKind.star_white, 1000, 60, 100)),
(1, (BodyKind.star_yellow, 1100, 50, 130)),
(2, (BodyKind.star_red, 1300, 20, 50)),
(100, (BodyKind.planet_blue, 1400, 10, 80)),
(120, (BodyKind.planet_grey, 2500, 10, 60)),
(90, (BodyKind.planet_green, 1200, 15, 60)),
(80, (BodyKind.planet_brown, 1800, 25, 30)),
(5, (BodyKind.black_hole, 700, 1, 6)),
)
| agpl-3.0 |
badjr/pysal | pysal/core/IOHandlers/mat.py | 20 | 4377 | import pysal
import os.path
import scipy.io as sio
import pysal.core.FileIO as FileIO
from pysal.weights import W
from pysal.weights.util import full, full2W
from warnings import warn
__author__ = "Myunghwa Hwang <[email protected]>"
__all__ = ["MatIO"]
class MatIO(FileIO.FileIO):
"""
Opens, reads, and writes weights file objects in MATLAB Level 4-5 MAT format.
MAT files are used in Dr. LeSage's MATLAB Econometrics library.
The MAT file format can handle both full and sparse matrices,
and it allows for a matrix dimension greater than 256.
In PySAL, row and column headers of a MATLAB array are ignored.
PySAL uses matlab io tools in scipy.
Thus, it is subject to all limits that loadmat and savemat in scipy have.
Notes
-----
If a given weights object contains too many observations to
write it out as a full matrix,
PySAL writes out the object as a sparse matrix.
References
----------
MathWorks (2011) "MATLAB 7 MAT-File Format" at
http://www.mathworks.com/help/pdf_doc/matlab/matfile_format.pdf.
scipy matlab io
http://docs.scipy.org/doc/scipy/reference/tutorial/io.html
"""
FORMATS = ['mat']
MODES = ['r', 'w']
def __init__(self, *args, **kwargs):
self._varName = 'Unknown'
FileIO.FileIO.__init__(self, *args, **kwargs)
self.file = open(self.dataPath, self.mode + 'b')
def _set_varName(self, val):
if issubclass(type(val), basestring):
self._varName = val
def _get_varName(self):
return self._varName
varName = property(fget=_get_varName, fset=_set_varName)
def read(self, n=-1):
self._complain_ifclosed(self.closed)
return self._read()
def seek(self, pos):
if pos == 0:
self.file.seek(0)
self.pos = 0
def _read(self):
"""Reads MATLAB mat file
Returns a pysal.weights.weights.W object
Examples
--------
Type 'dir(w)' at the interpreter to see what methods are supported.
Open a MATLAB mat file and read it into a pysal weights object
>>> w = pysal.open(pysal.examples.get_path('spat-sym-us.mat'),'r').read()
Get the number of observations from the header
>>> w.n
46
Get the mean number of neighbors
>>> w.mean_neighbors
4.0869565217391308
Get neighbor distances for a single observation
>>> w[1]
{25: 1, 3: 1, 28: 1, 39: 1}
"""
if self.pos > 0:
raise StopIteration
mat = sio.loadmat(self.file)
mat_keys = [k for k in mat if not k.startswith("_")]
full_w = mat[mat_keys[0]]
self.pos += 1
return full2W(full_w)
def write(self, obj):
"""
Parameters
----------
.write(weightsObject)
accepts a weights object
Returns
------
a MATLAB mat file
write a weights object to the opened mat file.
Examples
--------
>>> import tempfile, pysal, os
>>> testfile = pysal.open(pysal.examples.get_path('spat-sym-us.mat'),'r')
>>> w = testfile.read()
Create a temporary file for this example
>>> f = tempfile.NamedTemporaryFile(suffix='.mat')
Reassign to new var
>>> fname = f.name
Close the temporary named file
>>> f.close()
Open the new file in write mode
>>> o = pysal.open(fname,'w')
Write the Weights object into the open file
>>> o.write(w)
>>> o.close()
Read in the newly created mat file
>>> wnew = pysal.open(fname,'r').read()
Compare values from old to new
>>> wnew.pct_nonzero == w.pct_nonzero
True
Clean up temporary file created for this example
>>> os.remove(fname)
"""
self._complain_ifclosed(self.closed)
if issubclass(type(obj), W):
try:
w = full(obj)[0]
except ValueError:
w = obj.sparse
sio.savemat(self.file, {'WEIGHT': w})
self.pos += 1
else:
raise TypeError("Expected a pysal weights object, got: %s" % (
type(obj)))
def close(self):
self.file.close()
FileIO.FileIO.close(self)
| bsd-3-clause |
rpoleski/MulensModel | examples/use_cases/use_case_14_coordinate_system.py | 1 | 1988 | """
Use Case 14 - jcy
MulensModel will be written assuming the coordinate system
(t0,u0,alpha) are defined relative to the center of mass. This is not
always the most efficient choice for fitting. This use case covers the
conversion from center of magnification coordinates to center of mass
coordinates. Essentially, it is the responsibility of the user to
convert from their MCMC coordinate system to the center of mass
coordinate system needed for the magnification calculation.
"""
import astropy.units as u
import numpy as np
import MulensModel as mm
raise NotImplementedError('frame_origin not implemented for Model')
def convert_cof_mag2mass(t0, te, u0, alpha, s, q):
"""
function to convert from center of magnification to center of mass
coordinates. Note that this function is for illustration only. It has
not been tested and may have sign errors.
"""
if s <= 1.0:
return t0, u0
else:
delta = q / (1. + q) / s
delta_u0 = delta * np.sin(alpha * np.pi / 180.)
delta_tau = delta * np.cos(alpha * np.pi / 180.)
t0_prime = t0 + delta_tau * te
u0_prime = u0 + delta_u0
return t0_prime, u0_prime
# Define model parameters in CoMAGN system
t0_center_of_mag = 7000.
u0_center_of_mag = 0.1
alpha_center_of_mag = 30.*u.deg
te = 30.
print('Center of magnification: {0}, {1}'.format(
t0_center_of_mag, u0_center_of_mag))
s = 1.1
q = 0.001
# Get parameters in CoMASS system
(t0_center_of_mass, u0_center_of_mass) = convert_cof_mag2mass(
t0_center_of_mag, te, u0_center_of_mag, alpha_center_of_mag, s, q)
print('Center of mass: {0}, {1}'.format(t0_center_of_mass, u0_center_of_mass))
# How does this get passed to a minimizer?
# Alternatively,
model = mm.Model(
{'t_0': 2457000., 'u_0': 0.1, 't_E': 30., 'rho': 0.001,
'alpha': 30*u.deg, 's': 1.1, 'q': 0.001},
frame_origin='magnification')
print(model.parameters.t_0, model.parameters.u_0)
| mit |
kmoocdev2/edx-platform | cms/djangoapps/models/settings/course_metadata.py | 1 | 8977 | """
Django module for Course Metadata class -- manages advanced settings and related parameters
"""
from django.conf import settings
from django.utils.translation import ugettext as _
from six import text_type
from xblock.fields import Scope
from xblock_django.models import XBlockStudioConfigurationFlag
from xmodule.modulestore.django import modulestore
class CourseMetadata(object):
'''
For CRUD operations on metadata fields which do not have specific editors
on the other pages including any user generated ones.
The objects have no predefined attrs but instead are obj encodings of the
editable metadata.
'''
# The list of fields that wouldn't be shown in Advanced Settings.
# Should not be used directly. Instead the filtered_list method should
# be used if the field needs to be filtered depending on the feature flag.
FILTERED_LIST = [
'cohort_config',
'xml_attributes',
'start',
'end',
'enrollment_start',
'enrollment_end',
'certificate_available_date',
'tabs',
'graceperiod',
'show_timezone',
'format',
'graded',
'hide_from_toc',
'pdf_textbooks',
'user_partitions',
'name', # from xblock
'tags', # from xblock
'visible_to_staff_only',
'group_access',
'pre_requisite_courses',
'entrance_exam_enabled',
'entrance_exam_minimum_score_pct',
'entrance_exam_id',
'is_entrance_exam',
'in_entrance_exam',
'language',
'certificates',
'minimum_grade_credit',
'default_time_limit_minutes',
'is_proctored_enabled',
'is_time_limited',
'is_practice_exam',
'exam_review_rules',
'hide_after_due',
'self_paced',
'show_correctness',
'chrome',
'default_tab',
'highlights_enabled_for_messaging',
]
@classmethod
def filtered_list(cls):
"""
Filter fields based on feature flag, i.e. enabled, disabled.
"""
# Copy the filtered list to avoid permanently changing the class attribute.
filtered_list = list(cls.FILTERED_LIST)
# Do not show giturl if feature is not enabled.
if not settings.FEATURES.get('ENABLE_EXPORT_GIT'):
filtered_list.append('giturl')
# Do not show edxnotes if the feature is disabled.
if not settings.FEATURES.get('ENABLE_EDXNOTES'):
filtered_list.append('edxnotes')
# Do not show video_upload_pipeline if the feature is disabled.
if not settings.FEATURES.get('ENABLE_VIDEO_UPLOAD_PIPELINE'):
filtered_list.append('video_upload_pipeline')
# Do not show video auto advance if the feature is disabled
if not settings.FEATURES.get('ENABLE_AUTOADVANCE_VIDEOS'):
filtered_list.append('video_auto_advance')
# Do not show social sharing url field if the feature is disabled.
if (not hasattr(settings, 'SOCIAL_SHARING_SETTINGS') or
not getattr(settings, 'SOCIAL_SHARING_SETTINGS', {}).get("CUSTOM_COURSE_URLS")):
filtered_list.append('social_sharing_url')
# Do not show teams configuration if feature is disabled.
if not settings.FEATURES.get('ENABLE_TEAMS'):
filtered_list.append('teams_configuration')
if not settings.FEATURES.get('ENABLE_VIDEO_BUMPER'):
filtered_list.append('video_bumper')
# Do not show enable_ccx if feature is not enabled.
if not settings.FEATURES.get('CUSTOM_COURSES_EDX'):
filtered_list.append('enable_ccx')
filtered_list.append('ccx_connector')
# Do not show "Issue Open Badges" in Studio Advanced Settings
# if the feature is disabled.
if not settings.FEATURES.get('ENABLE_OPENBADGES'):
filtered_list.append('issue_badges')
# If the XBlockStudioConfiguration table is not being used, there is no need to
# display the "Allow Unsupported XBlocks" setting.
if not XBlockStudioConfigurationFlag.is_enabled():
filtered_list.append('allow_unsupported_xblocks')
return filtered_list
@classmethod
def fetch(cls, descriptor):
"""
Fetch the key:value editable course details for the given course from
persistence and return a CourseMetadata model.
"""
result = {}
metadata = cls.fetch_all(descriptor)
for key, value in metadata.iteritems():
if key in cls.filtered_list():
continue
result[key] = value
return result
@classmethod
def fetch_all(cls, descriptor):
"""
Fetches all key:value pairs from persistence and returns a CourseMetadata model.
"""
result = {}
for field in descriptor.fields.values():
if field.scope != Scope.settings:
continue
field_help = _(field.help) # pylint: disable=translation-of-non-string
help_args = field.runtime_options.get('help_format_args')
if help_args is not None:
field_help = field_help.format(**help_args)
result[field.name] = {
'value': field.read_json(descriptor),
'display_name': _(field.display_name), # pylint: disable=translation-of-non-string
'help': field_help,
'deprecated': field.runtime_options.get('deprecated', False),
'hidden': field.runtime_options.get('hidden', False)
}
return result
@classmethod
def update_from_json(cls, descriptor, jsondict, user, filter_tabs=True):
"""
Decode the json into CourseMetadata and save any changed attrs to the db.
Ensures none of the fields are in the blacklist.
"""
filtered_list = cls.filtered_list()
# Don't filter on the tab attribute if filter_tabs is False.
if not filter_tabs:
filtered_list.remove("tabs")
# Validate the values before actually setting them.
key_values = {}
for key, model in jsondict.iteritems():
# should it be an error if one of the filtered list items is in the payload?
if key in filtered_list:
continue
try:
val = model['value']
if hasattr(descriptor, key) and getattr(descriptor, key) != val:
key_values[key] = descriptor.fields[key].from_json(val)
except (TypeError, ValueError) as err:
raise ValueError(_("Incorrect format for field '{name}'. {detailed_message}").format(
name=model['display_name'], detailed_message=text_type(err)))
return cls.update_from_dict(key_values, descriptor, user)
@classmethod
def validate_and_update_from_json(cls, descriptor, jsondict, user, filter_tabs=True):
"""
Validate the values in the json dict (validated by xblock fields from_json method)
If all fields validate, go ahead and update those values on the object and return it without
persisting it to the DB.
If not, return the error objects list.
Returns:
did_validate: whether values pass validation or not
errors: list of error objects
result: the updated course metadata or None if error
"""
filtered_list = cls.filtered_list()
if not filter_tabs:
filtered_list.remove("tabs")
filtered_dict = dict((k, v) for k, v in jsondict.iteritems() if k not in filtered_list)
did_validate = True
errors = []
key_values = {}
updated_data = None
for key, model in filtered_dict.iteritems():
try:
if key == 'need_lock':
continue
val = model['value']
if hasattr(descriptor, key) and getattr(descriptor, key) != val:
key_values[key] = descriptor.fields[key].from_json(val)
except (TypeError, ValueError) as err:
did_validate = False
errors.append({'message': text_type(err), 'model': model})
# If did validate, go ahead and update the metadata
if did_validate:
updated_data = cls.update_from_dict(key_values, descriptor, user, save=False)
return did_validate, errors, updated_data
@classmethod
def update_from_dict(cls, key_values, descriptor, user, save=True):
"""
Update metadata descriptor from key_values. Saves to modulestore if save is true.
"""
for key, value in key_values.iteritems():
setattr(descriptor, key, value)
if save and len(key_values):
modulestore().update_item(descriptor, user.id)
return cls.fetch(descriptor)
| agpl-3.0 |
rebstar6/servo | python/servo/devenv_commands.py | 5 | 6435 | # Copyright 2013 The Servo Project Developers. See the COPYRIGHT
# file at the top-level directory of this distribution.
#
# Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
# http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
# <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
# option. This file may not be copied, modified, or distributed
# except according to those terms.
from __future__ import print_function, unicode_literals
from os import path, getcwd, listdir
import subprocess
import sys
from mach.decorators import (
CommandArgument,
CommandProvider,
Command,
)
from servo.command_base import CommandBase, cd, call
@CommandProvider
class MachCommands(CommandBase):
@Command('cargo',
description='Run Cargo',
category='devenv')
@CommandArgument(
'params', default=None, nargs='...',
help="Command-line arguments to be passed through to Cargo")
def cargo(self, params):
if not params:
params = []
if self.context.topdir == getcwd():
with cd(path.join('components', 'servo')):
return call(["cargo"] + params, env=self.build_env())
return call(['cargo'] + params, env=self.build_env())
@Command('cargo-update',
description='Same as update-cargo',
category='devenv')
@CommandArgument(
'params', default=None, nargs='...',
help='Command-line arguments to be passed through to cargo update')
@CommandArgument(
'--package', '-p', default=None,
help='Updates selected package')
@CommandArgument(
'--all-packages', '-a', action='store_true',
help='Updates all packages')
def cargo_update(self, params=None, package=None, all_packages=None):
self.update_cargo(params, package, all_packages)
@Command('update-cargo',
description='Update Cargo dependencies',
category='devenv')
@CommandArgument(
'params', default=None, nargs='...',
help='Command-line arguments to be passed through to cargo update')
@CommandArgument(
'--package', '-p', default=None,
help='Updates the selected package')
@CommandArgument(
'--all-packages', '-a', action='store_true',
help='Updates all packages. NOTE! This is very likely to break your ' +
'working copy, making it impossible to build servo. Only do ' +
'this if you really know what you are doing.')
def update_cargo(self, params=None, package=None, all_packages=None):
if not params:
params = []
if package:
params += ["-p", package]
elif all_packages:
params = []
else:
print("Please choose package to update with the --package (-p) ")
print("flag or update all packages with --all-packages (-a) flag")
sys.exit(1)
cargo_paths = [path.join('components', 'servo'),
path.join('ports', 'cef'),
path.join('ports', 'geckolib')]
for cargo_path in cargo_paths:
with cd(cargo_path):
print(cargo_path)
call(["cargo", "update"] + params,
env=self.build_env())
@Command('clippy',
description='Run Clippy',
category='devenv')
def clippy(self):
features = "--features=script/plugins/clippy"
with cd(path.join(self.context.topdir, "components", "servo")):
return subprocess.call(["cargo", "build", features],
env=self.build_env())
@Command('rustc',
description='Run the Rust compiler',
category='devenv')
@CommandArgument(
'params', default=None, nargs='...',
help="Command-line arguments to be passed through to rustc")
def rustc(self, params):
if params is None:
params = []
return call(["rustc"] + params, env=self.build_env())
@Command('rust-root',
description='Print the path to the root of the Rust compiler',
category='devenv')
def rust_root(self):
print(self.config["tools"]["rust-root"])
@Command('grep',
description='`git grep` for selected directories.',
category='devenv')
@CommandArgument(
'params', default=None, nargs='...',
help="Command-line arguments to be passed through to `git grep`")
def grep(self, params):
if not params:
params = []
# get all directories under tests/
tests_dirs = listdir('tests')
# Directories to be excluded under tests/
excluded_tests_dirs = ['wpt', 'jquery']
tests_dirs = filter(lambda dir: dir not in excluded_tests_dirs, tests_dirs)
# Set of directories in project root
root_dirs = ['components', 'ports', 'python', 'etc', 'resources']
# Generate absolute paths for directories in tests/ and project-root/
tests_dirs_abs = [path.join(self.context.topdir, 'tests', s) for s in tests_dirs]
root_dirs_abs = [path.join(self.context.topdir, s) for s in root_dirs]
# Absolute paths for all directories to be considered
grep_paths = root_dirs_abs + tests_dirs_abs
return call(
["git"] + ["grep"] + params + ['--'] + grep_paths + [':(exclude)*.min.js'],
env=self.build_env())
@Command('wpt-upgrade',
description='upgrade wptrunner.',
category='devenv')
def upgrade_wpt_runner(self):
with cd(path.join(self.context.topdir, 'tests', 'wpt', 'harness')):
code = call(["git", "init"], env=self.build_env())
if code:
return code
code = call(
["git", "remote", "add", "upstream", "https://github.com/w3c/wptrunner.git"], env=self.build_env())
if code:
return code
code = call(["git", "fetch", "upstream"], env=self.build_env())
if code:
return code
code = call(["git", "reset", "--hard", "remotes/upstream/master"], env=self.build_env())
if code:
return code
code = call(["rm", "-rf", ".git"], env=self.build_env())
if code:
return code
return 0
| mpl-2.0 |
keyurpatel076/MissionPlannerGit | Lib/site-packages/scipy/fftpack/pseudo_diffs.py | 57 | 12479 | """
Differential and pseudo-differential operators.
"""
# Created by Pearu Peterson, September 2002
__all__ = ['diff',
'tilbert','itilbert','hilbert','ihilbert',
'cs_diff','cc_diff','sc_diff','ss_diff',
'shift']
from numpy import pi, asarray, sin, cos, sinh, cosh, tanh, iscomplexobj
import convolve
from scipy.fftpack.basic import _datacopied
import atexit
atexit.register(convolve.destroy_convolve_cache)
del atexit
_cache = {}
def diff(x,order=1,period=None,
_cache = _cache):
""" diff(x, order=1, period=2*pi) -> y
Return k-th derivative (or integral) of a periodic sequence x.
If x_j and y_j are Fourier coefficients of periodic functions x
and y, respectively, then
y_j = pow(sqrt(-1)*j*2*pi/period, order) * x_j
y_0 = 0 if order is not 0.
Optional input:
order
The order of differentiation. Default order is 1. If order is
negative, then integration is carried out under the assumption
that x_0==0.
period
The assumed period of the sequence. Default is 2*pi.
Notes:
If sum(x,axis=0)=0 then
diff(diff(x,k),-k)==x (within numerical accuracy)
For odd order and even len(x), the Nyquist mode is taken zero.
"""
tmp = asarray(x)
if order==0:
return tmp
if iscomplexobj(tmp):
return diff(tmp.real,order,period)+1j*diff(tmp.imag,order,period)
if period is not None:
c = 2*pi/period
else:
c = 1.0
n = len(x)
omega = _cache.get((n,order,c))
if omega is None:
if len(_cache)>20:
while _cache: _cache.popitem()
def kernel(k,order=order,c=c):
if k:
return pow(c*k,order)
return 0
omega = convolve.init_convolution_kernel(n,kernel,d=order,
zero_nyquist=1)
_cache[(n,order,c)] = omega
overwrite_x = _datacopied(tmp, x)
return convolve.convolve(tmp,omega,swap_real_imag=order%2,
overwrite_x=overwrite_x)
del _cache
_cache = {}
def tilbert(x,h,period=None,
_cache = _cache):
""" tilbert(x, h, period=2*pi) -> y
Return h-Tilbert transform of a periodic sequence x.
If x_j and y_j are Fourier coefficients of periodic functions x
and y, respectively, then
y_j = sqrt(-1)*coth(j*h*2*pi/period) * x_j
y_0 = 0
Input:
h
Defines the parameter of the Tilbert transform.
period
The assumed period of the sequence. Default period is 2*pi.
Notes:
If sum(x,axis=0)==0 and n=len(x) is odd then
tilbert(itilbert(x)) == x
If 2*pi*h/period is approximately 10 or larger then numerically
tilbert == hilbert
(theoretically oo-Tilbert == Hilbert).
For even len(x), the Nyquist mode of x is taken zero.
"""
tmp = asarray(x)
if iscomplexobj(tmp):
return tilbert(tmp.real,h,period)+\
1j*tilbert(tmp.imag,h,period)
if period is not None:
h = h*2*pi/period
n = len(x)
omega = _cache.get((n,h))
if omega is None:
if len(_cache)>20:
while _cache: _cache.popitem()
def kernel(k,h=h):
if k: return 1.0/tanh(h*k)
return 0
omega = convolve.init_convolution_kernel(n,kernel,d=1)
_cache[(n,h)] = omega
overwrite_x = _datacopied(tmp, x)
return convolve.convolve(tmp,omega,swap_real_imag=1,overwrite_x=overwrite_x)
del _cache
_cache = {}
def itilbert(x,h,period=None,
_cache = _cache):
""" itilbert(x, h, period=2*pi) -> y
Return inverse h-Tilbert transform of a periodic sequence x.
If x_j and y_j are Fourier coefficients of periodic functions x
and y, respectively, then
y_j = -sqrt(-1)*tanh(j*h*2*pi/period) * x_j
y_0 = 0
Optional input: see tilbert.__doc__
"""
tmp = asarray(x)
if iscomplexobj(tmp):
return itilbert(tmp.real,h,period)+\
1j*itilbert(tmp.imag,h,period)
if period is not None:
h = h*2*pi/period
n = len(x)
omega = _cache.get((n,h))
if omega is None:
if len(_cache)>20:
while _cache: _cache.popitem()
def kernel(k,h=h):
if k: return -tanh(h*k)
return 0
omega = convolve.init_convolution_kernel(n,kernel,d=1)
_cache[(n,h)] = omega
overwrite_x = _datacopied(tmp, x)
return convolve.convolve(tmp,omega,swap_real_imag=1,overwrite_x=overwrite_x)
del _cache
_cache = {}
def hilbert(x,
_cache=_cache):
""" hilbert(x) -> y
Return Hilbert transform of a periodic sequence x.
If x_j and y_j are Fourier coefficients of periodic functions x
and y, respectively, then
y_j = sqrt(-1)*sign(j) * x_j
y_0 = 0
Notes:
If sum(x,axis=0)==0 then
hilbert(ihilbert(x)) == x
For even len(x), the Nyquist mode of x is taken zero.
"""
tmp = asarray(x)
if iscomplexobj(tmp):
return hilbert(tmp.real)+1j*hilbert(tmp.imag)
n = len(x)
omega = _cache.get(n)
if omega is None:
if len(_cache)>20:
while _cache: _cache.popitem()
def kernel(k):
if k>0: return 1.0
elif k<0: return -1.0
return 0.0
omega = convolve.init_convolution_kernel(n,kernel,d=1)
_cache[n] = omega
overwrite_x = _datacopied(tmp, x)
return convolve.convolve(tmp,omega,swap_real_imag=1,overwrite_x=overwrite_x)
del _cache
def ihilbert(x):
""" ihilbert(x) -> y
Return inverse Hilbert transform of a periodic sequence x.
If x_j and y_j are Fourier coefficients of periodic functions x
and y, respectively, then
y_j = -sqrt(-1)*sign(j) * x_j
y_0 = 0
"""
return -hilbert(x)
_cache = {}
def cs_diff(x, a, b, period=None,
_cache = _cache):
""" cs_diff(x, a, b, period=2*pi) -> y
Return (a,b)-cosh/sinh pseudo-derivative of a periodic sequence x.
If x_j and y_j are Fourier coefficients of periodic functions x
and y, respectively, then
y_j = -sqrt(-1)*cosh(j*a*2*pi/period)/sinh(j*b*2*pi/period) * x_j
y_0 = 0
Input:
a,b
Defines the parameters of the cosh/sinh pseudo-differential
operator.
period
The period of the sequence. Default period is 2*pi.
Notes:
For even len(x), the Nyquist mode of x is taken zero.
"""
tmp = asarray(x)
if iscomplexobj(tmp):
return cs_diff(tmp.real,a,b,period)+\
1j*cs_diff(tmp.imag,a,b,period)
if period is not None:
a = a*2*pi/period
b = b*2*pi/period
n = len(x)
omega = _cache.get((n,a,b))
if omega is None:
if len(_cache)>20:
while _cache: _cache.popitem()
def kernel(k,a=a,b=b):
if k: return -cosh(a*k)/sinh(b*k)
return 0
omega = convolve.init_convolution_kernel(n,kernel,d=1)
_cache[(n,a,b)] = omega
overwrite_x = _datacopied(tmp, x)
return convolve.convolve(tmp,omega,swap_real_imag=1,overwrite_x=overwrite_x)
del _cache
_cache = {}
def sc_diff(x, a, b, period=None,
_cache = _cache):
"""
Return (a,b)-sinh/cosh pseudo-derivative of a periodic sequence x.
If x_j and y_j are Fourier coefficients of periodic functions x
and y, respectively, then::
y_j = sqrt(-1)*sinh(j*a*2*pi/period)/cosh(j*b*2*pi/period) * x_j
y_0 = 0
Parameters
----------
x : array_like
Input array.
a,b : float
Defines the parameters of the sinh/cosh pseudo-differential
operator.
period : float, optional
The period of the sequence x. Default is 2*pi.
Notes
-----
``sc_diff(cs_diff(x,a,b),b,a) == x``
For even ``len(x)``, the Nyquist mode of x is taken as zero.
"""
tmp = asarray(x)
if iscomplexobj(tmp):
return sc_diff(tmp.real,a,b,period)+\
1j*sc_diff(tmp.imag,a,b,period)
if period is not None:
a = a*2*pi/period
b = b*2*pi/period
n = len(x)
omega = _cache.get((n,a,b))
if omega is None:
if len(_cache)>20:
while _cache: _cache.popitem()
def kernel(k,a=a,b=b):
if k: return sinh(a*k)/cosh(b*k)
return 0
omega = convolve.init_convolution_kernel(n,kernel,d=1)
_cache[(n,a,b)] = omega
overwrite_x = _datacopied(tmp, x)
return convolve.convolve(tmp,omega,swap_real_imag=1,overwrite_x=overwrite_x)
del _cache
_cache = {}
def ss_diff(x, a, b, period=None,
_cache = _cache):
""" ss_diff(x, a, b, period=2*pi) -> y
Return (a,b)-sinh/sinh pseudo-derivative of a periodic sequence x.
If x_j and y_j are Fourier coefficients of periodic functions x
and y, respectively, then
y_j = sinh(j*a*2*pi/period)/sinh(j*b*2*pi/period) * x_j
y_0 = a/b * x_0
Input:
a,b
Defines the parameters of the sinh/sinh pseudo-differential
operator.
period
The period of the sequence x. Default is 2*pi.
Notes:
ss_diff(ss_diff(x,a,b),b,a) == x
"""
tmp = asarray(x)
if iscomplexobj(tmp):
return ss_diff(tmp.real,a,b,period)+\
1j*ss_diff(tmp.imag,a,b,period)
if period is not None:
a = a*2*pi/period
b = b*2*pi/period
n = len(x)
omega = _cache.get((n,a,b))
if omega is None:
if len(_cache)>20:
while _cache: _cache.popitem()
def kernel(k,a=a,b=b):
if k: return sinh(a*k)/sinh(b*k)
return float(a)/b
omega = convolve.init_convolution_kernel(n,kernel)
_cache[(n,a,b)] = omega
overwrite_x = _datacopied(tmp, x)
return convolve.convolve(tmp,omega,overwrite_x=overwrite_x)
del _cache
_cache = {}
def cc_diff(x, a, b, period=None,
_cache = _cache):
""" cc_diff(x, a, b, period=2*pi) -> y
Return (a,b)-cosh/cosh pseudo-derivative of a periodic sequence x.
If x_j and y_j are Fourier coefficients of periodic functions x
and y, respectively, then
y_j = cosh(j*a*2*pi/period)/cosh(j*b*2*pi/period) * x_j
Input:
a,b
Defines the parameters of the sinh/sinh pseudo-differential
operator.
Optional input:
period
The period of the sequence x. Default is 2*pi.
Notes:
cc_diff(cc_diff(x,a,b),b,a) == x
"""
tmp = asarray(x)
if iscomplexobj(tmp):
return cc_diff(tmp.real,a,b,period)+\
1j*cc_diff(tmp.imag,a,b,period)
if period is not None:
a = a*2*pi/period
b = b*2*pi/period
n = len(x)
omega = _cache.get((n,a,b))
if omega is None:
if len(_cache)>20:
while _cache: _cache.popitem()
def kernel(k,a=a,b=b):
return cosh(a*k)/cosh(b*k)
omega = convolve.init_convolution_kernel(n,kernel)
_cache[(n,a,b)] = omega
overwrite_x = _datacopied(tmp, x)
return convolve.convolve(tmp,omega,overwrite_x=overwrite_x)
del _cache
_cache = {}
def shift(x, a, period=None,
_cache = _cache):
""" shift(x, a, period=2*pi) -> y
Shift periodic sequence x by a: y(u) = x(u+a).
If x_j and y_j are Fourier coefficients of periodic functions x
and y, respectively, then
y_j = exp(j*a*2*pi/period*sqrt(-1)) * x_f
Optional input:
period
The period of the sequences x and y. Default period is 2*pi.
"""
tmp = asarray(x)
if iscomplexobj(tmp):
return shift(tmp.real,a,period)+1j*shift(tmp.imag,a,period)
if period is not None:
a = a*2*pi/period
n = len(x)
omega = _cache.get((n,a))
if omega is None:
if len(_cache)>20:
while _cache: _cache.popitem()
def kernel_real(k,a=a): return cos(a*k)
def kernel_imag(k,a=a): return sin(a*k)
omega_real = convolve.init_convolution_kernel(n,kernel_real,d=0,
zero_nyquist=0)
omega_imag = convolve.init_convolution_kernel(n,kernel_imag,d=1,
zero_nyquist=0)
_cache[(n,a)] = omega_real,omega_imag
else:
omega_real,omega_imag = omega
overwrite_x = _datacopied(tmp, x)
return convolve.convolve_z(tmp,omega_real,omega_imag,
overwrite_x=overwrite_x)
del _cache
| gpl-3.0 |
luceatnobis/youtube-dl | youtube_dl/extractor/hotstar.py | 33 | 3711 | # coding: utf-8
from __future__ import unicode_literals
from .common import InfoExtractor
from ..utils import (
ExtractorError,
determine_ext,
int_or_none,
)
class HotStarIE(InfoExtractor):
_VALID_URL = r'https?://(?:www\.)?hotstar\.com/(?:.+?[/-])?(?P<id>\d{10})'
_TESTS = [{
'url': 'http://www.hotstar.com/on-air-with-aib--english-1000076273',
'info_dict': {
'id': '1000076273',
'ext': 'mp4',
'title': 'On Air With AIB - English',
'description': 'md5:c957d8868e9bc793ccb813691cc4c434',
'timestamp': 1447227000,
'upload_date': '20151111',
'duration': 381,
},
'params': {
# m3u8 download
'skip_download': True,
}
}, {
'url': 'http://www.hotstar.com/sports/cricket/rajitha-sizzles-on-debut-with-329/2001477583',
'only_matching': True,
}, {
'url': 'http://www.hotstar.com/1000000515',
'only_matching': True,
}]
def _download_json(self, url_or_request, video_id, note='Downloading JSON metadata', fatal=True, query=None):
json_data = super(HotStarIE, self)._download_json(
url_or_request, video_id, note, fatal=fatal, query=query)
if json_data['resultCode'] != 'OK':
if fatal:
raise ExtractorError(json_data['errorDescription'])
return None
return json_data['resultObj']
def _real_extract(self, url):
video_id = self._match_id(url)
video_data = self._download_json(
'http://account.hotstar.com/AVS/besc', video_id, query={
'action': 'GetAggregatedContentDetails',
'channel': 'PCTV',
'contentId': video_id,
})['contentInfo'][0]
title = video_data['episodeTitle']
if video_data.get('encrypted') == 'Y':
raise ExtractorError('This video is DRM protected.', expected=True)
formats = []
for f in ('JIO',):
format_data = self._download_json(
'http://getcdn.hotstar.com/AVS/besc',
video_id, 'Downloading %s JSON metadata' % f,
fatal=False, query={
'action': 'GetCDN',
'asJson': 'Y',
'channel': f,
'id': video_id,
'type': 'VOD',
})
if format_data:
format_url = format_data.get('src')
if not format_url:
continue
ext = determine_ext(format_url)
if ext == 'm3u8':
formats.extend(self._extract_m3u8_formats(
format_url, video_id, 'mp4',
m3u8_id='hls', fatal=False))
elif ext == 'f4m':
# produce broken files
continue
else:
formats.append({
'url': format_url,
'width': int_or_none(format_data.get('width')),
'height': int_or_none(format_data.get('height')),
})
self._sort_formats(formats)
return {
'id': video_id,
'title': title,
'description': video_data.get('description'),
'duration': int_or_none(video_data.get('duration')),
'timestamp': int_or_none(video_data.get('broadcastDate')),
'formats': formats,
'episode': title,
'episode_number': int_or_none(video_data.get('episodeNumber')),
'series': video_data.get('contentTitle'),
}
| unlicense |
hryamzik/ansible | lib/ansible/modules/cloud/amazon/rds_snapshot_facts.py | 32 | 12499 | #!/usr/bin/python
# Copyright (c) 2014-2017 Ansible Project
# Copyright (c) 2017, 2018 Will Thames
# Copyright (c) 2017, 2018 Michael De La Rue
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
ANSIBLE_METADATA = {'status': ['preview'],
'supported_by': 'community',
'metadata_version': '1.1'}
DOCUMENTATION = '''
---
module: rds_snapshot_facts
version_added: "2.6"
short_description: obtain facts about one or more RDS snapshots
description:
- obtain facts about one or more RDS snapshots. These can be for unclustered snapshots or snapshots of clustered DBs (Aurora)
- Aurora snapshot facts may be obtained if no identifier parameters are passed or if one of the cluster parameters are passed.
options:
db_snapshot_identifier:
description:
- Name of an RDS (unclustered) snapshot. Mutually exclusive with I(db_instance_identifier), I(db_cluster_identifier), I(db_cluster_snapshot_identifier)
required: false
aliases:
- snapshot_name
db_instance_identifier:
description:
- RDS instance name for which to find snapshots. Mutually exclusive with I(db_snapshot_identifier), I(db_cluster_identifier),
I(db_cluster_snapshot_identifier)
required: false
db_cluster_identifier:
description:
- RDS cluster name for which to find snapshots. Mutually exclusive with I(db_snapshot_identifier), I(db_instance_identifier),
I(db_cluster_snapshot_identifier)
required: false
db_cluster_snapshot_identifier:
description:
- Name of an RDS cluster snapshot. Mutually exclusive with I(db_instance_identifier), I(db_snapshot_identifier), I(db_cluster_identifier)
required: false
snapshot_type:
description:
- Type of snapshot to find. By default both automated and manual
snapshots will be returned.
required: false
choices: ['automated', 'manual', 'shared', 'public']
requirements:
- "python >= 2.6"
- "boto3"
author:
- "Will Thames (@willthames)"
extends_documentation_fragment:
- aws
- ec2
'''
EXAMPLES = '''
# Get facts about an snapshot
- rds_snapshot_facts:
db_snapshot_identifier: snapshot_name
register: new_database_facts
# Get all RDS snapshots for an RDS instance
- rds_snapshot_facts:
db_instance_identifier: helloworld-rds-master
'''
RETURN = '''
snapshots:
description: List of non-clustered snapshots
returned: When cluster parameters are not passed
type: complex
contains:
allocated_storage:
description: How many gigabytes of storage are allocated
returned: always
type: int
sample: 10
availability_zone:
description: The availability zone of the database from which the snapshot was taken
returned: always
type: string
sample: us-west-2b
db_instance_identifier:
description: Database instance identifier
returned: always
type: string
sample: hello-world-rds
db_snapshot_arn:
description: Snapshot ARN
returned: always
type: string
sample: arn:aws:rds:us-west-2:111111111111:snapshot:rds:hello-world-rds-us1-2018-05-16-04-03
db_snapshot_identifier:
description: Snapshot name
returned: always
type: string
sample: rds:hello-world-rds-us1-2018-05-16-04-03
encrypted:
description: Whether the snapshot was encrypted
returned: always
type: bool
sample: true
engine:
description: Database engine
returned: always
type: string
sample: postgres
engine_version:
description: Database engine version
returned: always
type: string
sample: 9.5.10
iam_database_authentication_enabled:
description: Whether database authentication through IAM is enabled
returned: always
type: bool
sample: false
instance_create_time:
description: Time the Instance was created
returned: always
type: string
sample: '2017-10-10T04:00:07.434000+00:00'
kms_key_id:
description: ID of the KMS Key encrypting the snapshot
returned: always
type: string
sample: arn:aws:kms:us-west-2:111111111111:key/abcd1234-1234-aaaa-0000-1234567890ab
license_model:
description: License model
returned: always
type: string
sample: postgresql-license
master_username:
description: Database master username
returned: always
type: string
sample: dbadmin
option_group_name:
description: Database option group name
returned: always
type: string
sample: default:postgres-9-5
percent_progress:
description: Perecent progress of snapshot
returned: always
type: int
sample: 100
snapshot_create_time:
description: Time snapshot was created
returned: always
type: string
sample: '2018-05-16T04:03:33.871000+00:00'
snapshot_type:
description: Type of snapshot
returned: always
type: string
sample: automated
status:
description: Status of snapshot
returned: always
type: string
sample: available
storage_type:
description: Storage type of underlying DB
returned: always
type: string
sample: gp2
tags:
description: Snapshot tags
returned: always
type: complex
contains: {}
vpc_id:
description: ID of VPC containing the DB
returned: always
type: string
sample: vpc-abcd1234
cluster_snapshots:
description: List of cluster snapshots
returned: always
type: complex
contains:
allocated_storage:
description: How many gigabytes of storage are allocated
returned: always
type: int
sample: 1
availability_zones:
description: The availability zones of the database from which the snapshot was taken
returned: always
type: list
sample:
- ca-central-1a
- ca-central-1b
cluster_create_time:
description: Date and time the cluster was created
returned: always
type: string
sample: '2018-05-17T00:13:40.223000+00:00'
db_cluster_identifier:
description: Database cluster identifier
returned: always
type: string
sample: test-aurora-cluster
db_cluster_snapshot_arn:
description: ARN of the database snapshot
returned: always
type: string
sample: arn:aws:rds:ca-central-1:111111111111:cluster-snapshot:test-aurora-snapshot
db_cluster_snapshot_identifier:
description: Snapshot identifier
returned: always
type: string
sample: test-aurora-snapshot
engine:
description: Database engine
returned: always
type: string
sample: aurora
engine_version:
description: Database engine version
returned: always
type: string
sample: 5.6.10a
iam_database_authentication_enabled:
description: Whether database authentication through IAM is enabled
returned: always
type: bool
sample: false
kms_key_id:
description: ID of the KMS Key encrypting the snapshot
returned: always
type: string
sample: arn:aws:kms:ca-central-1:111111111111:key/abcd1234-abcd-1111-aaaa-0123456789ab
license_model:
description: License model
returned: always
type: string
sample: aurora
master_username:
description: Database master username
returned: always
type: string
sample: shertel
percent_progress:
description: Perecent progress of snapshot
returned: always
type: int
sample: 0
port:
description: Database port
returned: always
type: int
sample: 0
snapshot_create_time:
description: Date and time when the snapshot was created
returned: always
type: string
sample: '2018-05-17T00:23:23.731000+00:00'
snapshot_type:
description: Type of snapshot
returned: always
type: string
sample: manual
status:
description: Status of snapshot
returned: always
type: string
sample: creating
storage_encrypted:
description: Whether the snapshot is encrypted
returned: always
type: bool
sample: true
tags:
description: Tags of the snapshot
returned: always
type: complex
contains: {}
vpc_id:
description: VPC of the database
returned: always
type: string
sample: vpc-abcd1234
'''
from ansible.module_utils.aws.core import AnsibleAWSModule, is_boto3_error_code
from ansible.module_utils.ec2 import AWSRetry, boto3_tag_list_to_ansible_dict, camel_dict_to_snake_dict
try:
import botocore
except BaseException:
pass # caught by imported HAS_BOTO3
def common_snapshot_facts(module, conn, method, prefix, params):
paginator = conn.get_paginator(method)
try:
results = paginator.paginate(**params).build_full_result()['%ss' % prefix]
except is_boto3_error_code('%sNotFound' % prefix):
results = []
except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: # pylint: disable=duplicate-except
module.fail_json_aws(e, "trying to get snapshot information")
for snapshot in results:
try:
snapshot['Tags'] = boto3_tag_list_to_ansible_dict(conn.list_tags_for_resource(ResourceName=snapshot['%sArn' % prefix],
aws_retry=True)['TagList'])
except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
module.fail_json_aws(e, "Couldn't get tags for snapshot %s" % snapshot['%sIdentifier' % prefix])
return [camel_dict_to_snake_dict(snapshot, ignore_list=['Tags']) for snapshot in results]
def cluster_snapshot_facts(module, conn):
snapshot_name = module.params.get('db_cluster_snapshot_identifier')
snapshot_type = module.params.get('snapshot_type')
instance_name = module.params.get('db_cluster_instance_identifier')
params = dict()
if snapshot_name:
params['DBClusterSnapshotIdentifier'] = snapshot_name
if instance_name:
params['DBClusterInstanceIdentifier'] = instance_name
if snapshot_type:
params['SnapshotType'] = snapshot_type
if snapshot_type == 'public':
params['IsPublic'] = True
elif snapshot_type == 'shared':
params['IsShared'] = True
return common_snapshot_facts(module, conn, 'describe_db_cluster_snapshots', 'DBClusterSnapshot', params)
def standalone_snapshot_facts(module, conn):
snapshot_name = module.params.get('db_snapshot_identifier')
snapshot_type = module.params.get('snapshot_type')
instance_name = module.params.get('db_instance_identifier')
params = dict()
if snapshot_name:
params['DBSnapshotIdentifier'] = snapshot_name
if instance_name:
params['DBInstanceIdentifier'] = instance_name
if snapshot_type:
params['SnapshotType'] = snapshot_type
if snapshot_type == 'public':
params['IsPublic'] = True
elif snapshot_type == 'shared':
params['IsShared'] = True
return common_snapshot_facts(module, conn, 'describe_db_snapshots', 'DBSnapshot', params)
def main():
argument_spec = dict(
db_snapshot_identifier=dict(aliases=['snapshot_name']),
db_instance_identifier=dict(),
db_cluster_identifier=dict(),
db_cluster_snapshot_identifier=dict(),
snapshot_type=dict(choices=['automated', 'manual', 'shared', 'public'])
)
module = AnsibleAWSModule(
argument_spec=argument_spec,
supports_check_mode=True,
mutually_exclusive=[['db_snapshot_identifier', 'db_instance_identifier', 'db_cluster_identifier', 'db_cluster_snapshot_identifier']]
)
conn = module.client('rds', retry_decorator=AWSRetry.jittered_backoff(retries=10))
results = dict()
if not module.params['db_cluster_identifier'] and not module.params['db_cluster_snapshot_identifier']:
results['snapshots'] = standalone_snapshot_facts(module, conn)
if not module.params['db_snapshot_identifier'] and not module.params['db_instance_identifier']:
results['cluster_snapshots'] = cluster_snapshot_facts(module, conn)
module.exit_json(changed=False, **results)
if __name__ == '__main__':
main()
| gpl-3.0 |
dparaujo/projeto | app_academico/semestre/migrations/0001_initial.py | 1 | 1044 | # -*- coding: utf-8 -*-
# Generated by Django 1.10.1 on 2016-10-30 23:47
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='TblAcademicoSemestre',
fields=[
('codigo', models.AutoField(primary_key=True, serialize=False, verbose_name='C\xf3digo')),
('descricao', models.CharField(max_length=100, unique=True, verbose_name='Descri\xe7\xe3o')),
('data', models.DateField(auto_now_add=True, verbose_name='Data de cadastro')),
('hora', models.TimeField(auto_now_add=True, verbose_name='Hora de cadastro')),
('ativo', models.BooleanField(choices=[(True, 'Sim'), (False, 'N\xe3o')], verbose_name='Ativo')),
],
options={
'ordering': ['codigo'],
'db_table': 'tbl_academico_semestre',
},
),
]
| gpl-3.0 |
mnull/taccoin | contrib/bitrpc/bitrpc.py | 1 | 7836 | from jsonrpc import ServiceProxy
import sys
import string
# ===== BEGIN USER SETTINGS =====
# if you do not set these you will be prompted for a password for every command
rpcuser = ""
rpcpass = ""
# ====== END USER SETTINGS ======
if rpcpass == "":
access = ServiceProxy("http://127.0.0.1:9332")
else:
access = ServiceProxy("http://"+rpcuser+":"+rpcpass+"@127.0.0.1:9332")
cmd = sys.argv[1].lower()
if cmd == "backupwallet":
try:
path = raw_input("Enter destination path/filename: ")
print access.backupwallet(path)
except:
print "\n---An error occurred---\n"
elif cmd == "getaccount":
try:
addr = raw_input("Enter a taccoin address: ")
print access.getaccount(addr)
except:
print "\n---An error occurred---\n"
elif cmd == "getaccountaddress":
try:
acct = raw_input("Enter an account name: ")
print access.getaccountaddress(acct)
except:
print "\n---An error occurred---\n"
elif cmd == "getaddressesbyaccount":
try:
acct = raw_input("Enter an account name: ")
print access.getaddressesbyaccount(acct)
except:
print "\n---An error occurred---\n"
elif cmd == "getbalance":
try:
acct = raw_input("Enter an account (optional): ")
mc = raw_input("Minimum confirmations (optional): ")
try:
print access.getbalance(acct, mc)
except:
print access.getbalance()
except:
print "\n---An error occurred---\n"
elif cmd == "getblockbycount":
try:
height = raw_input("Height: ")
print access.getblockbycount(height)
except:
print "\n---An error occurred---\n"
elif cmd == "getblockcount":
try:
print access.getblockcount()
except:
print "\n---An error occurred---\n"
elif cmd == "getblocknumber":
try:
print access.getblocknumber()
except:
print "\n---An error occurred---\n"
elif cmd == "getconnectioncount":
try:
print access.getconnectioncount()
except:
print "\n---An error occurred---\n"
elif cmd == "getdifficulty":
try:
print access.getdifficulty()
except:
print "\n---An error occurred---\n"
elif cmd == "getgenerate":
try:
print access.getgenerate()
except:
print "\n---An error occurred---\n"
elif cmd == "gethashespersec":
try:
print access.gethashespersec()
except:
print "\n---An error occurred---\n"
elif cmd == "getinfo":
try:
print access.getinfo()
except:
print "\n---An error occurred---\n"
elif cmd == "getnewaddress":
try:
acct = raw_input("Enter an account name: ")
try:
print access.getnewaddress(acct)
except:
print access.getnewaddress()
except:
print "\n---An error occurred---\n"
elif cmd == "getreceivedbyaccount":
try:
acct = raw_input("Enter an account (optional): ")
mc = raw_input("Minimum confirmations (optional): ")
try:
print access.getreceivedbyaccount(acct, mc)
except:
print access.getreceivedbyaccount()
except:
print "\n---An error occurred---\n"
elif cmd == "getreceivedbyaddress":
try:
addr = raw_input("Enter a taccoin address (optional): ")
mc = raw_input("Minimum confirmations (optional): ")
try:
print access.getreceivedbyaddress(addr, mc)
except:
print access.getreceivedbyaddress()
except:
print "\n---An error occurred---\n"
elif cmd == "gettransaction":
try:
txid = raw_input("Enter a transaction ID: ")
print access.gettransaction(txid)
except:
print "\n---An error occurred---\n"
elif cmd == "getwork":
try:
data = raw_input("Data (optional): ")
try:
print access.gettransaction(data)
except:
print access.gettransaction()
except:
print "\n---An error occurred---\n"
elif cmd == "help":
try:
cmd = raw_input("Command (optional): ")
try:
print access.help(cmd)
except:
print access.help()
except:
print "\n---An error occurred---\n"
elif cmd == "listaccounts":
try:
mc = raw_input("Minimum confirmations (optional): ")
try:
print access.listaccounts(mc)
except:
print access.listaccounts()
except:
print "\n---An error occurred---\n"
elif cmd == "listreceivedbyaccount":
try:
mc = raw_input("Minimum confirmations (optional): ")
incemp = raw_input("Include empty? (true/false, optional): ")
try:
print access.listreceivedbyaccount(mc, incemp)
except:
print access.listreceivedbyaccount()
except:
print "\n---An error occurred---\n"
elif cmd == "listreceivedbyaddress":
try:
mc = raw_input("Minimum confirmations (optional): ")
incemp = raw_input("Include empty? (true/false, optional): ")
try:
print access.listreceivedbyaddress(mc, incemp)
except:
print access.listreceivedbyaddress()
except:
print "\n---An error occurred---\n"
elif cmd == "listtransactions":
try:
acct = raw_input("Account (optional): ")
count = raw_input("Number of transactions (optional): ")
frm = raw_input("Skip (optional):")
try:
print access.listtransactions(acct, count, frm)
except:
print access.listtransactions()
except:
print "\n---An error occurred---\n"
elif cmd == "move":
try:
frm = raw_input("From: ")
to = raw_input("To: ")
amt = raw_input("Amount:")
mc = raw_input("Minimum confirmations (optional): ")
comment = raw_input("Comment (optional): ")
try:
print access.move(frm, to, amt, mc, comment)
except:
print access.move(frm, to, amt)
except:
print "\n---An error occurred---\n"
elif cmd == "sendfrom":
try:
frm = raw_input("From: ")
to = raw_input("To: ")
amt = raw_input("Amount:")
mc = raw_input("Minimum confirmations (optional): ")
comment = raw_input("Comment (optional): ")
commentto = raw_input("Comment-to (optional): ")
try:
print access.sendfrom(frm, to, amt, mc, comment, commentto)
except:
print access.sendfrom(frm, to, amt)
except:
print "\n---An error occurred---\n"
elif cmd == "sendmany":
try:
frm = raw_input("From: ")
to = raw_input("To (in format address1:amount1,address2:amount2,...): ")
mc = raw_input("Minimum confirmations (optional): ")
comment = raw_input("Comment (optional): ")
try:
print access.sendmany(frm,to,mc,comment)
except:
print access.sendmany(frm,to)
except:
print "\n---An error occurred---\n"
elif cmd == "sendtoaddress":
try:
to = raw_input("To (in format address1:amount1,address2:amount2,...): ")
amt = raw_input("Amount:")
comment = raw_input("Comment (optional): ")
commentto = raw_input("Comment-to (optional): ")
try:
print access.sendtoaddress(to,amt,comment,commentto)
except:
print access.sendtoaddress(to,amt)
except:
print "\n---An error occurred---\n"
elif cmd == "setaccount":
try:
addr = raw_input("Address: ")
acct = raw_input("Account:")
print access.setaccount(addr,acct)
except:
print "\n---An error occurred---\n"
elif cmd == "setgenerate":
try:
gen= raw_input("Generate? (true/false): ")
cpus = raw_input("Max processors/cores (-1 for unlimited, optional):")
try:
print access.setgenerate(gen, cpus)
except:
print access.setgenerate(gen)
except:
print "\n---An error occurred---\n"
elif cmd == "settxfee":
try:
amt = raw_input("Amount:")
print access.settxfee(amt)
except:
print "\n---An error occurred---\n"
elif cmd == "stop":
try:
print access.stop()
except:
print "\n---An error occurred---\n"
elif cmd == "validateaddress":
try:
addr = raw_input("Address: ")
print access.validateaddress(addr)
except:
print "\n---An error occurred---\n"
elif cmd == "walletpassphrase":
try:
pwd = raw_input("Enter wallet passphrase: ")
access.walletpassphrase(pwd, 60)
print "\n---Wallet unlocked---\n"
except:
print "\n---An error occurred---\n"
elif cmd == "walletpassphrasechange":
try:
pwd = raw_input("Enter old wallet passphrase: ")
pwd2 = raw_input("Enter new wallet passphrase: ")
access.walletpassphrasechange(pwd, pwd2)
print
print "\n---Passphrase changed---\n"
except:
print
print "\n---An error occurred---\n"
print
else:
print "Command not found or not supported"
| mit |
dgzurita/odoo | addons/base_import_module/tests/test_module/__openerp__.py | 377 | 1290 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2013-Today OpenERP SA (<http://www.openerp.com>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
{
'name': 'Test Module',
'category': 'Website',
'summary': 'Custom',
'version': '1.0',
'description': """
Test
""",
'author': 'OpenERP SA',
'depends': ['website'],
'data': [
'test.xml',
],
'installable': True,
'application': True,
}
| agpl-3.0 |
cstipkovic/spidermonkey-research | testing/marionette/harness/marionette/runner/mixins/browsermob-proxy-py/docs/conf.py | 2 | 7900 | # -*- coding: utf-8 -*-
#
# BrowserMob Proxy documentation build configuration file, created by
# sphinx-quickstart on Fri May 24 12:37:12 2013.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys, os
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
sys.path.insert(0, os.path.abspath('../'))
# -- General configuration -----------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['sphinx.ext.coverage', 'sphinx.ext.viewcode', 'sphinx.ext.autodoc']
autoclass_content = 'both'
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'BrowserMob Proxy'
copyright = u'2014, David Burns'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '0.6.0'
# The full version, including alpha/beta/rc tags.
release = '0.6.0'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'default'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'BrowserMobProxydoc'
# -- Options for LaTeX output --------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index', 'BrowserMobProxy.tex', u'BrowserMob Proxy Documentation',
u'David Burns', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output --------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'browsermobproxy', u'BrowserMob Proxy Documentation',
[u'David Burns'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output ------------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'BrowserMobProxy', u'BrowserMob Proxy Documentation',
u'David Burns', 'BrowserMobProxy', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
| mpl-2.0 |
nicholedwight/nichole-theme | node_modules/grunt-docker/node_modules/docker/node_modules/pygmentize-bundled/vendor/pygments/pygments/styles/bw.py | 364 | 1355 | # -*- coding: utf-8 -*-
"""
pygments.styles.bw
~~~~~~~~~~~~~~~~~~
Simple black/white only style.
:copyright: Copyright 2006-2013 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
from pygments.style import Style
from pygments.token import Keyword, Name, Comment, String, Error, \
Operator, Generic
class BlackWhiteStyle(Style):
background_color = "#ffffff"
default_style = ""
styles = {
Comment: "italic",
Comment.Preproc: "noitalic",
Keyword: "bold",
Keyword.Pseudo: "nobold",
Keyword.Type: "nobold",
Operator.Word: "bold",
Name.Class: "bold",
Name.Namespace: "bold",
Name.Exception: "bold",
Name.Entity: "bold",
Name.Tag: "bold",
String: "italic",
String.Interpol: "bold",
String.Escape: "bold",
Generic.Heading: "bold",
Generic.Subheading: "bold",
Generic.Emph: "italic",
Generic.Strong: "bold",
Generic.Prompt: "bold",
Error: "border:#FF0000"
}
| mit |
n-west/gnuradio-volk | gr-analog/python/analog/qa_pll_refout.py | 17 | 7816 | #!/usr/bin/env python
#
# Copyright 2004,2010,2012,2013 Free Software Foundation, Inc.
#
# This file is part of GNU Radio
#
# GNU Radio is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3, or (at your option)
# any later version.
#
# GNU Radio is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with GNU Radio; see the file COPYING. If not, write to
# the Free Software Foundation, Inc., 51 Franklin Street,
# Boston, MA 02110-1301, USA.
#
import math
from gnuradio import gr, gr_unittest, analog, blocks
class test_pll_refout(gr_unittest.TestCase):
def setUp(self):
self.tb = gr.top_block()
def tearDown(self):
self.tb = None
def test_pll_refout(self):
expected_result = ((1+0j),
(1+6.4087357643e-10j),
(0.999985277653+0.00542619498447j),
(0.999868750572+0.0162021834403j),
(0.99948567152+0.0320679470897j),
(0.99860727787+0.0527590736747j),
(0.996953129768+0.0780025869608j),
(0.994203746319+0.107512556016j),
(0.990011692047+0.140985429287j),
(0.984013140202+0.178095817566j),
(0.975838363171+0.218493551016j),
(0.965121984482+0.261800557375j),
(0.95151245594+0.307610183954j),
(0.934681296349+0.355486690998j),
(0.914401650429+0.404808044434j),
(0.890356600285+0.455263823271j),
(0.862329125404+0.506348133087j),
(0.830152392387+0.557536482811j),
(0.793714106083+0.608290970325j),
(0.752960026264+0.658066213131j),
(0.707896590233+0.706316053867j),
(0.658591926098+0.752500295639j),
(0.605175673962+0.796091973782j),
(0.547837555408+0.836584687233j),
(0.48682525754+0.873499393463j),
(0.42244040966+0.906390726566j),
(0.355197101831+0.934791445732j),
(0.285494059324+0.958380460739j),
(0.213591173291+0.976923108101j),
(0.139945343137+0.990159213543j),
(0.065038472414+0.997882783413j),
(-0.0106285437942+0.999943494797j),
(-0.0865436866879+0.996248066425j),
(-0.162189796567+0.986759603024j),
(-0.23705175519+0.971496999264j),
(-0.310622543097+0.950533330441j),
(-0.38240903616+0.923993110657j),
(-0.451937526464+0.89204955101j),
(-0.518758952618+0.854920566082j),
(-0.582311093807+0.812966048717j),
(-0.642372369766+0.76639264822j),
(-0.698591887951+0.715520322323j),
(-0.750654160976+0.660695314407j),
(-0.798280358315+0.602286040783j),
(-0.841228663921+0.540679454803j),
(-0.87929558754+0.476276367903j),
(-0.912315964699+0.409486919641j),
(-0.940161883831+0.340728074312j),
(-0.962742805481+0.270418733358j),
(-0.980004072189+0.198977485299j),
(-0.991925954819+0.126818284392j),
(-0.99851256609+0.0545223206282j),
(-0.999846458435-0.0175215266645j),
(-0.996021270752-0.0891158208251j),
(-0.987133920193-0.159895718098j),
(-0.973306238651-0.2295101583j),
(-0.954683184624-0.297624111176j),
(-0.931430280209-0.363919824362j),
(-0.903732538223-0.428097635508j),
(-0.871792256832-0.489875763655j),
(-0.835827112198-0.548992812634j),
(-0.796068251133-0.605206847191j),
(-0.752758979797-0.658296227455j),
(-0.706152498722-0.70805978775j),
(-0.656641483307-0.754202902317j),
(-0.604367733002-0.79670548439j),
(-0.549597978592-0.835429251194j),
(-0.492602348328-0.870254516602j),
(-0.433654457331-0.901079237461j),
(-0.373029649258-0.927819430828j),
(-0.31100410223-0.950408577919j),
(-0.247853919864-0.968797445297j),
(-0.183855071664-0.982953369617j),
(-0.119282215834-0.992860376835j),
(-0.0544078871608-0.998518764973j),
(0.0104992967099-0.999944865704j),
(0.0749994292855-0.997183561325j),
(0.138844624162-0.990314185619j),
(0.201967850327-0.979392170906j),
(0.264124274254-0.964488625526j),
(0.325075358152-0.945688128471j),
(0.3845885396-0.92308807373j),
(0.442438393831-0.89679890871j),
(0.498407125473-0.866943061352j),
(0.552284479141-0.833655714989j),
(0.603869199753-0.797083437443j),
(0.652970373631-0.757383465767j),
(0.69940674305-0.714723825455j),
(0.743007957935-0.66928255558j),
(0.78350687027-0.62138313055j),
(0.820889055729-0.571087777615j),
(0.855021059513-0.51859331131j),
(0.885780930519-0.46410369873j),
(0.913058102131-0.407829582691j),
(0.936754107475-0.349988251925j),
(0.956783294678-0.290801793337j),
(0.973072886467-0.230497643352j),
(0.985563337803-0.169307261705j),
(0.9942086339-0.1074674353j),
(0.9989772439-0.0452152714133j))
sampling_freq = 10e3
freq = sampling_freq / 100
loop_bw = math.pi/100.0
maxf = 1
minf = -1
src = analog.sig_source_c(sampling_freq, analog.GR_COS_WAVE, freq, 1.0)
pll = analog.pll_refout_cc(loop_bw, maxf, minf)
head = blocks.head(gr.sizeof_gr_complex, int (freq))
dst = blocks.vector_sink_c()
self.tb.connect(src, pll, head)
self.tb.connect(head, dst)
self.tb.run()
dst_data = dst.data()
self.assertComplexTuplesAlmostEqual(expected_result, dst_data, 4)
if __name__ == '__main__':
gr_unittest.run(test_pll_refout, "test_pll_refout.xml")
| gpl-3.0 |
saleemjaveds/https-github.com-openstack-nova | nova/tests/virt/hyperv/test_pathutils.py | 12 | 2298 | # Copyright 2014 IBM Corp.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import os
import mock
from nova import test
from nova.virt.hyperv import constants
from nova.virt.hyperv import pathutils
class PathUtilsTestCase(test.NoDBTestCase):
"""Unit tests for the Hyper-V PathUtils class."""
def setUp(self):
self.fake_instance_dir = os.path.join('C:', 'fake_instance_dir')
self.fake_instance_name = 'fake_instance_name'
self._pathutils = pathutils.PathUtils()
super(PathUtilsTestCase, self).setUp()
def _mock_lookup_configdrive_path(self, ext):
self._pathutils.get_instance_dir = mock.MagicMock(
return_value=self.fake_instance_dir)
def mock_exists(*args, **kwargs):
path = args[0]
return True if path[(path.rfind('.') + 1):] == ext else False
self._pathutils.exists = mock_exists
configdrive_path = self._pathutils.lookup_configdrive_path(
self.fake_instance_name)
return configdrive_path
def test_lookup_configdrive_path(self):
for format_ext in constants.DISK_FORMAT_MAP:
configdrive_path = self._mock_lookup_configdrive_path(format_ext)
fake_path = os.path.join(self.fake_instance_dir,
'configdrive.' + format_ext)
self.assertEqual(configdrive_path, fake_path)
def test_lookup_configdrive_path_non_exist(self):
self._pathutils.get_instance_dir = mock.MagicMock(
return_value=self.fake_instance_dir)
self._pathutils.exists = mock.MagicMock(return_value=False)
configdrive_path = self._pathutils.lookup_configdrive_path(
self.fake_instance_name)
self.assertIsNone(configdrive_path)
| apache-2.0 |
ted-gould/nova | nova/virt/hyperv/migrationops.py | 15 | 13161 | # Copyright 2013 Cloudbase Solutions Srl
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Management class for migration / resize operations.
"""
import os
from oslo_log import log as logging
from oslo_utils import excutils
from oslo_utils import units
from nova import exception
from nova.i18n import _, _LE
from nova import objects
from nova.virt import configdrive
from nova.virt.hyperv import imagecache
from nova.virt.hyperv import utilsfactory
from nova.virt.hyperv import vmops
from nova.virt.hyperv import vmutils
from nova.virt.hyperv import volumeops
LOG = logging.getLogger(__name__)
class MigrationOps(object):
def __init__(self):
self._hostutils = utilsfactory.get_hostutils()
self._vmutils = utilsfactory.get_vmutils()
self._vhdutils = utilsfactory.get_vhdutils()
self._pathutils = utilsfactory.get_pathutils()
self._volumeops = volumeops.VolumeOps()
self._vmops = vmops.VMOps()
self._imagecache = imagecache.ImageCache()
def _migrate_disk_files(self, instance_name, disk_files, dest):
# TODO(mikal): it would be nice if this method took a full instance,
# because it could then be passed to the log messages below.
same_host = False
if dest in self._hostutils.get_local_ips():
same_host = True
LOG.debug("Migration target is the source host")
else:
LOG.debug("Migration target host: %s", dest)
instance_path = self._pathutils.get_instance_dir(instance_name)
revert_path = self._pathutils.get_instance_migr_revert_dir(
instance_name, remove_dir=True, create_dir=True)
dest_path = None
try:
if same_host:
# Since source and target are the same, we copy the files to
# a temporary location before moving them into place
dest_path = '%s_tmp' % instance_path
if self._pathutils.exists(dest_path):
self._pathutils.rmtree(dest_path)
self._pathutils.makedirs(dest_path)
else:
dest_path = self._pathutils.get_instance_dir(
instance_name, dest, remove_dir=True)
for disk_file in disk_files:
# Skip the config drive as the instance is already configured
if os.path.basename(disk_file).lower() != 'configdrive.vhd':
LOG.debug('Copying disk "%(disk_file)s" to '
'"%(dest_path)s"',
{'disk_file': disk_file, 'dest_path': dest_path})
self._pathutils.copy(disk_file, dest_path)
self._pathutils.move_folder_files(instance_path, revert_path)
if same_host:
self._pathutils.move_folder_files(dest_path, instance_path)
except Exception:
with excutils.save_and_reraise_exception():
self._cleanup_failed_disk_migration(instance_path, revert_path,
dest_path)
def _cleanup_failed_disk_migration(self, instance_path,
revert_path, dest_path):
try:
if dest_path and self._pathutils.exists(dest_path):
self._pathutils.rmtree(dest_path)
if self._pathutils.exists(revert_path):
self._pathutils.rename(revert_path, instance_path)
except Exception as ex:
# Log and ignore this exception
LOG.exception(ex)
LOG.error(_LE("Cannot cleanup migration files"))
def _check_target_flavor(self, instance, flavor):
new_root_gb = flavor.root_gb
curr_root_gb = instance.root_gb
if new_root_gb < curr_root_gb:
raise exception.InstanceFaultRollback(
vmutils.VHDResizeException(
_("Cannot resize the root disk to a smaller size. "
"Current size: %(curr_root_gb)s GB. Requested size: "
"%(new_root_gb)s GB") %
{'curr_root_gb': curr_root_gb,
'new_root_gb': new_root_gb}))
def migrate_disk_and_power_off(self, context, instance, dest,
flavor, network_info,
block_device_info=None, timeout=0,
retry_interval=0):
LOG.debug("migrate_disk_and_power_off called", instance=instance)
self._check_target_flavor(instance, flavor)
self._vmops.power_off(instance, timeout, retry_interval)
(disk_files,
volume_drives) = self._vmutils.get_vm_storage_paths(instance.name)
if disk_files:
self._migrate_disk_files(instance.name, disk_files, dest)
self._vmops.destroy(instance, destroy_disks=False)
# disk_info is not used
return ""
def confirm_migration(self, migration, instance, network_info):
LOG.debug("confirm_migration called", instance=instance)
self._pathutils.get_instance_migr_revert_dir(instance.name,
remove_dir=True)
def _revert_migration_files(self, instance_name):
instance_path = self._pathutils.get_instance_dir(
instance_name, create_dir=False, remove_dir=True)
revert_path = self._pathutils.get_instance_migr_revert_dir(
instance_name)
self._pathutils.rename(revert_path, instance_path)
def _check_and_attach_config_drive(self, instance, vm_gen):
if configdrive.required_by(instance):
configdrive_path = self._pathutils.lookup_configdrive_path(
instance.name)
if configdrive_path:
self._vmops.attach_config_drive(instance, configdrive_path,
vm_gen)
else:
raise vmutils.HyperVException(
_("Config drive is required by instance: %s, "
"but it does not exist.") % instance.name)
def finish_revert_migration(self, context, instance, network_info,
block_device_info=None, power_on=True):
LOG.debug("finish_revert_migration called", instance=instance)
instance_name = instance.name
self._revert_migration_files(instance_name)
if self._volumeops.ebs_root_in_block_devices(block_device_info):
root_vhd_path = None
else:
root_vhd_path = self._pathutils.lookup_root_vhd_path(instance_name)
eph_vhd_path = self._pathutils.lookup_ephemeral_vhd_path(instance_name)
image_meta = objects.ImageMeta.from_instance(instance)
vm_gen = self._vmops.get_image_vm_generation(root_vhd_path, image_meta)
self._vmops.create_instance(instance, network_info, block_device_info,
root_vhd_path, eph_vhd_path, vm_gen)
self._check_and_attach_config_drive(instance, vm_gen)
if power_on:
self._vmops.power_on(instance)
def _merge_base_vhd(self, diff_vhd_path, base_vhd_path):
base_vhd_copy_path = os.path.join(os.path.dirname(diff_vhd_path),
os.path.basename(base_vhd_path))
try:
LOG.debug('Copying base disk %(base_vhd_path)s to '
'%(base_vhd_copy_path)s',
{'base_vhd_path': base_vhd_path,
'base_vhd_copy_path': base_vhd_copy_path})
self._pathutils.copyfile(base_vhd_path, base_vhd_copy_path)
LOG.debug("Reconnecting copied base VHD "
"%(base_vhd_copy_path)s and diff "
"VHD %(diff_vhd_path)s",
{'base_vhd_copy_path': base_vhd_copy_path,
'diff_vhd_path': diff_vhd_path})
self._vhdutils.reconnect_parent_vhd(diff_vhd_path,
base_vhd_copy_path)
LOG.debug("Merging base disk %(base_vhd_copy_path)s and "
"diff disk %(diff_vhd_path)s",
{'base_vhd_copy_path': base_vhd_copy_path,
'diff_vhd_path': diff_vhd_path})
self._vhdutils.merge_vhd(diff_vhd_path, base_vhd_copy_path)
# Replace the differential VHD with the merged one
self._pathutils.rename(base_vhd_copy_path, diff_vhd_path)
except Exception:
with excutils.save_and_reraise_exception():
if self._pathutils.exists(base_vhd_copy_path):
self._pathutils.remove(base_vhd_copy_path)
def _check_resize_vhd(self, vhd_path, vhd_info, new_size):
curr_size = vhd_info['MaxInternalSize']
if new_size < curr_size:
raise vmutils.VHDResizeException(_("Cannot resize a VHD "
"to a smaller size"))
elif new_size > curr_size:
self._resize_vhd(vhd_path, new_size)
def _resize_vhd(self, vhd_path, new_size):
if vhd_path.split('.')[-1].lower() == "vhd":
LOG.debug("Getting parent disk info for disk: %s", vhd_path)
base_disk_path = self._vhdutils.get_vhd_parent_path(vhd_path)
if base_disk_path:
# A differential VHD cannot be resized. This limitation
# does not apply to the VHDX format.
self._merge_base_vhd(vhd_path, base_disk_path)
LOG.debug("Resizing disk \"%(vhd_path)s\" to new max "
"size %(new_size)s",
{'vhd_path': vhd_path, 'new_size': new_size})
self._vhdutils.resize_vhd(vhd_path, new_size)
def _check_base_disk(self, context, instance, diff_vhd_path,
src_base_disk_path):
base_vhd_path = self._imagecache.get_cached_image(context, instance)
# If the location of the base host differs between source
# and target hosts we need to reconnect the base disk
if src_base_disk_path.lower() != base_vhd_path.lower():
LOG.debug("Reconnecting copied base VHD "
"%(base_vhd_path)s and diff "
"VHD %(diff_vhd_path)s",
{'base_vhd_path': base_vhd_path,
'diff_vhd_path': diff_vhd_path})
self._vhdutils.reconnect_parent_vhd(diff_vhd_path,
base_vhd_path)
def finish_migration(self, context, migration, instance, disk_info,
network_info, image_meta, resize_instance=False,
block_device_info=None, power_on=True):
LOG.debug("finish_migration called", instance=instance)
instance_name = instance.name
if self._volumeops.ebs_root_in_block_devices(block_device_info):
root_vhd_path = None
else:
root_vhd_path = self._pathutils.lookup_root_vhd_path(instance_name)
if not root_vhd_path:
raise vmutils.HyperVException(_("Cannot find boot VHD "
"file for instance: %s") %
instance_name)
root_vhd_info = self._vhdutils.get_vhd_info(root_vhd_path)
src_base_disk_path = root_vhd_info.get("ParentPath")
if src_base_disk_path:
self._check_base_disk(context, instance, root_vhd_path,
src_base_disk_path)
if resize_instance:
new_size = instance.root_gb * units.Gi
self._check_resize_vhd(root_vhd_path, root_vhd_info, new_size)
eph_vhd_path = self._pathutils.lookup_ephemeral_vhd_path(instance_name)
if resize_instance:
new_size = instance.get('ephemeral_gb', 0) * units.Gi
if not eph_vhd_path:
if new_size:
eph_vhd_path = self._vmops.create_ephemeral_vhd(instance)
else:
eph_vhd_info = self._vhdutils.get_vhd_info(eph_vhd_path)
self._check_resize_vhd(eph_vhd_path, eph_vhd_info, new_size)
vm_gen = self._vmops.get_image_vm_generation(root_vhd_path, image_meta)
self._vmops.create_instance(instance, network_info, block_device_info,
root_vhd_path, eph_vhd_path, vm_gen)
self._check_and_attach_config_drive(instance, vm_gen)
if power_on:
self._vmops.power_on(instance)
| apache-2.0 |
pratikmallya/hue | desktop/core/src/desktop/lib/metrics/file_reporter.py | 19 | 2262 | # Licensed to Cloudera, Inc. under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. Cloudera, Inc. licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import logging
import os
import tempfile
import threading
from pyformance.reporters.reporter import Reporter
from desktop.lib.metrics import global_registry
LOG = logging.getLogger(__name__)
class FileReporter(Reporter):
def __init__(self, location, *args, **kwargs):
super(FileReporter, self).__init__(*args, **kwargs)
self.location = location
def report_now(self, registry=None, timestamp=None):
dirname = os.path.dirname(self.location)
if not os.path.exists(dirname):
try:
os.makedirs(dirname)
except OSError, e:
LOG.error('failed to make the directory %s: %s' % (dirname, e))
# Write the metrics to a temporary file, then atomically
# rename the file to the real location.
f = tempfile.NamedTemporaryFile(
dir=dirname,
delete=False)
try:
json.dump(self.registry.dump_metrics(), f)
f.close()
os.rename(f.name, self.location)
except Exception:
LOG.exception('failed to write metrics to file')
os.remove(f.name)
raise
_reporter = None
def start_file_reporter():
from desktop.conf import METRICS
global _reporter
if _reporter is None:
location = METRICS.LOCATION.get()
interval = METRICS.COLLECTION_INTERVAL.get()
if location is not None and interval is not None:
_reporter = FileReporter(
location,
reporting_interval=interval / 1000.0,
registry=global_registry())
_reporter.start()
| apache-2.0 |
naparuba/opsbro | data/global-configuration/packs/mongodb/collectors/pymongo/mongo_replica_set_client.py | 53 | 1955 | # Copyright 2011-2015 MongoDB, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you
# may not use this file except in compliance with the License. You
# may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied. See the License for the specific language governing
# permissions and limitations under the License.
"""Deprecated. See :doc:`/examples/high_availability`."""
import warnings
from pymongo import mongo_client
class MongoReplicaSetClient(mongo_client.MongoClient):
"""Deprecated alias for :class:`~pymongo.mongo_client.MongoClient`.
:class:`~pymongo.mongo_replica_set_client.MongoReplicaSetClient`
will be removed in a future version of PyMongo.
.. versionchanged:: 3.0
:class:`~pymongo.mongo_client.MongoClient` is now the one and only
client class for a standalone server, mongos, or replica set.
It includes the functionality that had been split into
:class:`~pymongo.mongo_replica_set_client.MongoReplicaSetClient`: it
can connect to a replica set, discover all its members, and monitor
the set for stepdowns, elections, and reconfigs.
The ``refresh`` method is removed from
:class:`~pymongo.mongo_replica_set_client.MongoReplicaSetClient`,
as are the ``seeds`` and ``hosts`` properties.
"""
def __init__(self, *args, **kwargs):
warnings.warn('MongoReplicaSetClient is deprecated, use MongoClient'
' to connect to a replica set',
DeprecationWarning, stacklevel=2)
super(MongoReplicaSetClient, self).__init__(*args, **kwargs)
def __repr__(self):
return "MongoReplicaSetClient(%s)" % (self._repr_helper(),)
| mit |
Taapat/enigma2-openpli-fulan | lib/python/Tools/ASCIItranslit.py | 85 | 4020 | # -*- coding:utf-8 -*-
ASCIItranslit = {
0x0022: "''",
0x002A: "_",
0x002F: "_",
0x003A: "_",
0x003C: "_",
0x003D: "_",
0x003E: "_",
0x003F: "_",
0x005C: "_",
0x007C: "_",
0x007F: "",
0x00A0: "_",
0x00A1: "!",
0x00A2: "c",
0x00A3: "lb",
0x00A4: "",
0x00A5: "yen",
0x00A6: "I",
0x00A7: "SS",
0x00A8: "'",
0x00A9: "(c)",
0x00AA: "a",
0x00AB: "<<",
0x00AC: "not",
0x00AD: "-",
0x00AE: "(R)",
0x00AF: "",
0x00B0: "^0",
0x00B1: "+-",
0x00B2: "^2",
0x00B3: "^3",
0x00B4: "'",
0x00B5: "u",
0x00B6: "P",
0x00B7: ".",
0x00B8: ",",
0x00B9: "^1",
0x00BA: "o",
0x00BB: ">>",
0x00BC: "1_4 ",
0x00BD: "1_2 ",
0x00BE: "3_4 ",
0x00BF: "_",
0x00C0: "`A",
0x00C1: "'A",
0x00C2: "^A",
0x00C3: "~A",
0x00C4: "Ae",
0x00C5: "A",
0x00C6: "AE",
0x00C7: "C",
0x00C8: "`E",
0x00C9: "'E",
0x00CA: "^E",
0x00CB: "E",
0x00CC: "`I",
0x00CD: "'I",
0x00CE: "^I",
0x00CF: "I",
0x00D0: "D",
0x00D1: "~N",
0x00D2: "`O",
0x00D3: "'O",
0x00D4: "^O",
0x00D5: "~O",
0x00D6: "Oe",
0x00D7: "x",
0x00D8: "O",
0x00D9: "`U",
0x00DA: "'U",
0x00DB: "^U",
0x00DC: "Ue",
0x00DD: "'Y",
0x00DE: "Th",
0x00DF: "ss",
0x00E0: "`a",
0x00E1: "'a",
0x00E2: "^a",
0x00E3: "~a",
0x00E4: "AE",
0x00E5: "a",
0x00E6: "ae",
0x00E7: "c",
0x00E8: "`e",
0x00E9: "'e",
0x00EA: "^e",
0x00EB: "e",
0x00EC: "`i",
0x00ED: "'i",
0x00EE: "^i",
0x00EF: "i",
0x00F0: "d",
0x00F1: "~n",
0x00F2: "`o",
0x00F3: "'o",
0x00F4: "^o",
0x00F5: "~o",
0x00F6: "oe",
0x00F7: "_",
0x00F8: "o",
0x00F9: "`u",
0x00FA: "'u",
0x00FB: "^u",
0x00FC: "ue",
0x00FD: "'y",
0x00FE: "th",
0x00FF: "Y",
0x0100: "A",
0x0101: "a",
0x0102: "A",
0x0103: "a",
0x0104: "A",
0x0105: "a",
0x0106: "'C",
0x0107: "'c",
0x0108: "^C",
0x0109: "^c",
0x010A: "C",
0x010B: "c",
0x010C: "C",
0x010D: "c",
0x010E: "D",
0x010F: "d",
0x0110: "D",
0x0111: "d",
0x0112: "E",
0x0113: "e",
0x0114: "E",
0x0115: "e",
0x0116: "E",
0x0117: "e",
0x0118: "E",
0x0119: "e",
0x011A: "E",
0x011B: "e",
0x011C: "^G",
0x011D: "^g",
0x011E: "G",
0x011F: "g",
0x0120: "G",
0x0121: "g",
0x0122: "G",
0x0123: "g",
0x0124: "^H",
0x0125: "^h",
0x0126: "H",
0x0127: "h",
0x0128: "~I",
0x0129: "~i",
0x012A: "I",
0x012B: "i",
0x012C: "I",
0x012D: "i",
0x012E: "I",
0x012F: "i",
0x0130: "I",
0x0131: "i",
0x0132: "IJ",
0x0133: "ij",
0x0134: "^J",
0x0135: "^j",
0x0136: "K",
0x0137: "k",
0x0138: "",
0x0139: "L",
0x013A: "l",
0x013B: "L",
0x013C: "l",
0x013D: "L",
0x013E: "l",
0x013F: "L",
0x0140: "l",
0x0141: "L",
0x0142: "l",
0x0143: "'N",
0x0144: "'n",
0x0145: "N",
0x0146: "n",
0x0147: "N",
0x0148: "n",
0x0149: "n",
0x014A: "_",
0x014B: "_",
0x014C: "O",
0x014D: "o",
0x014E: "O",
0x014F: "o",
0x0150: "''o",
0x0152: "OE",
0x0153: "oe",
0x0154: "'R",
0x0155: "'r",
0x0156: "R",
0x0157: "r",
0x0158: "R",
0x0159: "r",
0x015A: "'s",
0x015B: "'s",
0x015C: "^S",
0x015D: "^s",
0x015E: "S",
0x015F: "s",
0x0160: "S",
0x0161: "s",
0x0162: "T",
0x0163: "t",
0x0164: "T",
0x0165: "t",
0x0166: "T",
0x0167: "t",
0x0168: "~U",
0x0169: "~u",
0x016A: "U",
0x016B: "u",
0x016C: "U",
0x016D: "u",
0x016E: "U",
0x016F: "u",
0x0170: "''u",
0x0172: "U",
0x0173: "u",
0x0174: "^W",
0x0175: "^w",
0x0176: "^Y",
0x0177: "^y",
0x0178: "Y",
0x0179: "'Z",
0x017A: "'z",
0x017B: "Z",
0x017C: "z",
0x017D: "Z",
0x017E: "z",
0x017F: "s",
0x018F: "_",
0x0192: "f",
0x01C4: "DZ",
0x01C5: "DZ",
0x01C6: "DZ",
0x01C7: "LJ",
0x01C8: "Lj",
0x01C9: "lj",
0x01CA: "NJ",
0x01CB: "Nj",
0x01CC: "nj",
0x01F1: "DZ",
0x01F2: "Dz",
0x01F3: "dz",
0x0218: "S",
0x0219: "s",
0x021A: "T",
0x021B: "t",
0x0259: "_",
0x20AC: "EUR" }
def legacyEncode(string):
string2 = ""
for z, char in enumerate(string.decode("utf-8")):
i = ord(char)
if i < 33:
string2 += "_"
elif i in ASCIItranslit:
string2 += ASCIItranslit[i]
else:
try:
string2 += char.encode('ascii', 'strict')
except:
string2 += "_"
return string2.upper()
| gpl-2.0 |
y12uc231/edx-platform | lms/djangoapps/instructor/features/common.py | 47 | 4576 | """
Define common steps for instructor dashboard acceptance tests.
"""
# pylint: disable=missing-docstring
# pylint: disable=redefined-outer-name
from __future__ import absolute_import
from lettuce import world, step
from mock import patch
from nose.tools import assert_in # pylint: disable=no-name-in-module
from courseware.tests.factories import StaffFactory, InstructorFactory
@step(u'Given I am "([^"]*)" for a very large course')
def make_staff_or_instructor_for_large_course(step, role):
make_large_course(step, role)
@patch.dict('courseware.access.settings.FEATURES', {"MAX_ENROLLMENT_INSTR_BUTTONS": 0})
def make_large_course(step, role):
i_am_staff_or_instructor(step, role)
@step(u'Given I am "([^"]*)" for a course')
def i_am_staff_or_instructor(step, role): # pylint: disable=unused-argument
## In summary: makes a test course, makes a new Staff or Instructor user
## (depending on `role`), and logs that user in to the course
# Store the role
assert_in(role, ['instructor', 'staff'])
# Clear existing courses to avoid conflicts
world.clear_courses()
# Create a new course
course = world.CourseFactory.create(
org='edx',
number='999',
display_name='Test Course'
)
world.course_key = course.id
world.role = 'instructor'
# Log in as the an instructor or staff for the course
if role == 'instructor':
# Make & register an instructor for the course
world.instructor = InstructorFactory(course_key=world.course_key)
world.enroll_user(world.instructor, world.course_key)
world.log_in(
username=world.instructor.username,
password='test',
email=world.instructor.email,
name=world.instructor.profile.name
)
else:
world.role = 'staff'
# Make & register a staff member
world.staff = StaffFactory(course_key=world.course_key)
world.enroll_user(world.staff, world.course_key)
world.log_in(
username=world.staff.username,
password='test',
email=world.staff.email,
name=world.staff.profile.name
)
def go_to_section(section_name):
# section name should be one of
# course_info, membership, student_admin, data_download, analytics, send_email
world.visit(u'/courses/{}'.format(world.course_key))
world.css_click(u'a[href="/courses/{}/instructor"]'.format(world.course_key))
world.css_click('a[data-section="{0}"]'.format(section_name))
@step(u'I click "([^"]*)"')
def click_a_button(step, button): # pylint: disable=unused-argument
if button == "Generate Grade Report":
# Go to the data download section of the instructor dash
go_to_section("data_download")
# Click generate grade report button
world.css_click('input[name="calculate-grades-csv"]')
# Expect to see a message that grade report is being generated
expected_msg = "Your grade report is being generated! You can view the status of the generation task in the 'Pending Instructor Tasks' section."
world.wait_for_visible('#report-request-response')
assert_in(
expected_msg, world.css_text('#report-request-response'),
msg="Could not find grade report generation success message."
)
elif button == "Grading Configuration":
# Go to the data download section of the instructor dash
go_to_section("data_download")
world.css_click('input[name="dump-gradeconf"]')
elif button == "List enrolled students' profile information":
# Go to the data download section of the instructor dash
go_to_section("data_download")
world.css_click('input[name="list-profiles"]')
elif button == "Download profile information as a CSV":
# Go to the data download section of the instructor dash
go_to_section("data_download")
world.css_click('input[name="list-profiles-csv"]')
else:
raise ValueError("Unrecognized button option " + button)
@step(u'I visit the "([^"]*)" tab')
def click_a_button(step, tab_name): # pylint: disable=unused-argument
# course_info, membership, student_admin, data_download, analytics, send_email
tab_name_dict = {
'Course Info': 'course_info',
'Membership': 'membership',
'Student Admin': 'student_admin',
'Data Download': 'data_download',
'Analytics': 'analytics',
'Email': 'send_email',
}
go_to_section(tab_name_dict[tab_name])
| agpl-3.0 |
jmartinezchaine/OpenERP | openerp/workflow/wkf_logs.py | 15 | 1523 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2009 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
#
# May be uncommented to logs workflows modifications
#
import openerp.netsvc as netsvc
def log(cr,ident,act_id,info=''):
return
# msg = """
#res_type: %r
#res_id: %d
#uid: %d
#act_id: %d
#info: %s
#""" % (ident[1], ident[2], ident[0], act_id, info)
#cr.execute('insert into wkf_logs (res_type, res_id, uid, act_id, time, info) values (%s,%s,%s,%s,current_time,%s)', (ident[1],int(ident[2]),int(ident[0]),int(act_id),info))
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
lertech/extra-addons | website_disable_odoo/__init__.py | 1 | 1185 | # -*- encoding: utf-8 -*-
# Python source code encoding : https://www.python.org/dev/peps/pep-0263/
##############################################################################
#
# OpenERP, Odoo Source Management Solution
# Copyright (c) 2015 Antiun Ingeniería S.L. (http://www.antiun.com)
# Antonio Espinosa <[email protected]>
# Daniel Góme-Zurita <[email protected]>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published
# by the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
| gpl-3.0 |
wolverineav/neutron | neutron/db/portsecurity_db.py | 3 | 2785 | # Copyright 2013 VMware, Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from neutron.api.v2 import attributes as attrs
from neutron.common import utils
from neutron.db import db_base_plugin_v2
from neutron.db import portsecurity_db_common
from neutron.extensions import portsecurity as psec
class PortSecurityDbMixin(portsecurity_db_common.PortSecurityDbCommon):
# Register dict extend functions for ports and networks
db_base_plugin_v2.NeutronDbPluginV2.register_dict_extend_funcs(
attrs.NETWORKS, ['_extend_port_security_dict'])
db_base_plugin_v2.NeutronDbPluginV2.register_dict_extend_funcs(
attrs.PORTS, ['_extend_port_security_dict'])
def _extend_port_security_dict(self, response_data, db_data):
if ('port-security' in
getattr(self, 'supported_extension_aliases', [])):
super(PortSecurityDbMixin, self)._extend_port_security_dict(
response_data, db_data)
def _determine_port_security_and_has_ip(self, context, port):
"""Returns a tuple of booleans (port_security_enabled, has_ip).
Port_security is the value associated with the port if one is present
otherwise the value associated with the network is returned. has_ip is
if the port is associated with an ip or not.
"""
has_ip = self._ip_on_port(port)
# we don't apply security groups for dhcp, router
if port.get('device_owner') and utils.is_port_trusted(port):
return (False, has_ip)
if attrs.is_attr_set(port.get(psec.PORTSECURITY)):
port_security_enabled = port[psec.PORTSECURITY]
# If port has an ip and security_groups are passed in
# conveniently set port_security_enabled to true this way
# user doesn't also have to pass in port_security_enabled=True
# when creating ports.
elif (has_ip and attrs.is_attr_set(port.get('security_groups'))):
port_security_enabled = True
else:
port_security_enabled = self._get_network_security_binding(
context, port['network_id'])
return (port_security_enabled, has_ip)
def _ip_on_port(self, port):
return bool(port.get('fixed_ips'))
| apache-2.0 |
route-nazionale/event_subscribe | event_subscribe/default_settings.py | 1 | 2179 | """
Django settings for event_subscribe project.
For more information on this file, see
https://docs.djangoproject.com/en/dev/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/dev/ref/settings/
"""
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
import os, locale
BASE_DIR = os.path.dirname(os.path.dirname(__file__))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/dev/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'o*4+4o2wia&n8_i02q9rxhhyjzzb_ueqcn=y!(ws2-z7pgydoi'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
TEMPLATE_DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = (
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'base',
'subscribe',
)
MIDDLEWARE_CLASSES = (
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
)
ROOT_URLCONF = 'event_subscribe.urls'
WSGI_APPLICATION = 'event_subscribe.wsgi.application'
# Database
# https://docs.djangoproject.com/en/dev/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Internationalization
# https://docs.djangoproject.com/en/dev/topics/i18n/
LANGUAGE_CODE = 'it'
TIME_ZONE = 'Europe/Rome'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/dev/howto/static-files/
STATIC_URL = '/static/'
# useful for strftime
locale.setlocale(locale.LC_ALL, 'it_IT.UTF8')
| agpl-3.0 |
sadanandb/pmt | src/tactic/ui/widget/__init__.py | 5 | 1116 | ###########################################################
#
# Copyright (c) 2005-2008, Southpaw Technology
# All Rights Reserved
#
# PROPRIETARY INFORMATION. This software is proprietary to
# Southpaw Technology, and is not to be reproduced, transmitted,
# or disclosed in any way without written permission.
#
#
#
# This module contains a collection of generalized utility widgets
from upload_wdg import *
from sobject_group_wdg import *
from calendar_wdg import *
from sobject_calendar_wdg import *
from data_export_wdg import *
from misc_input_wdg import *
from button_wdg import *
from button_new_wdg import *
from gear_menu_wdg import *
from chooser_wdg import *
from smart_select_wdg import *
from proxy_wdg import *
from checkin_wdg import *
from discussion_wdg import *
from text_wdg import *
from file_browser_wdg import *
from format_value_wdg import *
from embed_wdg import *
from swap_display_wdg import *
from reset_password_wdg import *
from title_wdg import *
from ckeditor_wdg import *
from video_wdg import *
#from color_input_wdg import *
#from preview_change_wdg import *
| epl-1.0 |
40223112/w16test | ref/gear.py | 68 | 21704 |
import cherrypy
import os
import sys
# 這個程式要計算正齒輪的齒面寬, 資料庫連結希望使用 pybean 與 SQLite
# 導入 pybean 模組與所要使用的 Store 及 SQLiteWriter 方法
from pybean import Store, SQLiteWriter
import math
# 確定程式檔案所在目錄, 在 Windows 有最後的反斜線
_curdir = os.path.join(os.getcwd(), os.path.dirname(__file__))
# 將所在目錄設為系統搜尋目錄
sys.path.append(_curdir)
if 'OPENSHIFT_REPO_DIR' in os.environ.keys():
# while program is executed in OpenShift
download_root_dir = os.environ['OPENSHIFT_DATA_DIR']
data_dir = os.environ['OPENSHIFT_DATA_DIR']
else:
# while program is executed in localhost
download_root_dir = _curdir + "/local_data/"
data_dir = _curdir + "/local_data/"
# 這是 Gear 設計資料表的定義
'''
lewis.db 中有兩個資料表, steel 與 lewis
CREATE TABLE steel (
serialno INTEGER,
unsno TEXT,
aisino TEXT,
treatment TEXT,
yield_str INTEGER,
tensile_str INTEGER,
stretch_ratio INTEGER,
sectional_shr INTEGER,
brinell INTEGER
);
CREATE TABLE lewis (
serialno INTEGER PRIMARY KEY
NOT NULL,
gearno INTEGER,
type1 NUMERIC,
type4 NUMERIC,
type3 NUMERIC,
type2 NUMERIC
);
'''
class Gear(object):
def __init__(self):
# hope to create downloads and images directories
if not os.path.isdir(download_root_dir+"downloads"):
try:
os.makedirs(download_root_dir+"downloads")
except:
print("mkdir error")
if not os.path.isdir(download_root_dir+"images"):
try:
os.makedirs(download_root_dir+"images")
except:
print("mkdir error")
if not os.path.isdir(download_root_dir+"tmp"):
try:
os.makedirs(download_root_dir+"tmp")
except:
print("mkdir error")
@cherrypy.expose
def default(self, attr='default', *args, **kwargs):
raise cherrypy.HTTPRedirect("/")
# 各組利用 index 引導隨後的程式執行
@cherrypy.expose
def index(self, *args, **kwargs):
# 進行資料庫檔案連結, 並且取出所有資料
try:
# 利用 Store 建立資料庫檔案對應物件, 並且設定 frozen=True 表示不要開放動態資料表的建立
# 因為程式以 application 所在目錄執行, 因此利用相對目錄連結 lewis.db 資料庫檔案
SQLite連結 = Store(SQLiteWriter(_curdir+"/lewis.db", frozen=True))
#material = SQLite連結.find_one("steel","serialno = ?",[序號])
# str(SQLite連結.count("steel")) 將傳回 70, 表示資料庫中有 70 筆資料
material = SQLite連結.find("steel")
# 所傳回的 material 為 iterator
'''
outstring = ""
for material_item in material:
outstring += str(material_item.serialno) + ":" + material_item.unsno + "_" + material_item.treatment + "<br />"
return outstring
'''
except:
return "抱歉! 資料庫無法連線<br />"
outstring = '''
<form id=entry method=post action="gear_width">
請填妥下列參數,以完成適當的齒尺寸大小設計。<br />
馬達馬力:<input type=text name=horsepower id=horsepower value=100 size=10>horse power<br />
馬達轉速:<input type=text name=rpm id=rpm value=1120 size=10>rpm<br />
齒輪減速比: <input type=text name=ratio id=ratio value=4 size=10><br />
齒形:<select name=toothtype id=toothtype>
<option value=type1>壓力角20度,a=0.8,b=1.0
<option value=type2>壓力角20度,a=1.0,b=1.25
<option value=type3>壓力角25度,a=1.0,b=1.25
<option value=type4>壓力角25度,a=1.0,b=1.35
</select><br />
安全係數:<input type=text name=safetyfactor id=safetyfactor value=3 size=10><br />
齒輪材質:<select name=material_serialno id=material_serialno>
'''
for material_item in material:
outstring += "<option value=" + str(material_item.serialno) + ">UNS - " + \
material_item.unsno + " - " + material_item.treatment
outstring += "</select><br />"
outstring += "小齒輪齒數:<input type=text name=npinion id=npinion value=18 size=10><br />"
outstring += "<input type=submit id=submit value=進行運算>"
outstring += "</form>"
return outstring
The 5 problems
(The following problems are ridiculously simple, but you'd be surprise to discover how many people struggle with them. To the point of not getting anything done at all. Seriously.)
Problem 1
Write three functions that compute the sum of the numbers in a given list using a for-loop, a while-loop, and recursion.
Problem 2
Write a function that combines two lists by alternatingly taking elements. For example: given the two lists [a, b, c] and [1, 2, 3], the function should return [a, 1, b, 2, c, 3].
Problem 3
Write a function that computes the list of the first 100 Fibonacci numbers. By definition, the first two numbers in the Fibonacci sequence are 0 and 1, and each subsequent number is the sum of the previous two. As an example, here are the first 10 Fibonnaci numbers: 0, 1, 1, 2, 3, 5, 8, 13, 21, and 34.
Problem 4
Write a function that given a list of non negative integers, arranges them such that they form the largest possible number. For example, given [50, 2, 1, 9], the largest formed number is 95021.
Problem 5
Write a program that outputs all possibilities to put + or - or nothing between the numbers 1, 2, ..., 9 (in this order) such that the result is always 100. For example: 1 + 2 + 34 – 5 + 67 – 8 + 9 = 100.
Problem 1
Write three functions that compute the sum of the numbers in a given list using a for-loop, a while-loop, and recursion.
def for_sum(mylist):
sum = 0
for i in range(len(mylist)):
sum += mylist[i]
return sum
mylist = [1, 4, 5, 3, 7]
sum = for_sum(mylist)
g.es("sum is:", sum)
def while_sum(mylist):
i = 0
sum = 0
while i < len(mylist):
sum += mylist[i]
i += 1
return sum
mylist = [1, 4, 5, 3, 7]
sum = while_sum(mylist)
g.es("sum is:", sum)
def recur_sum(mylist):
if len(mylist) == 1:
return mylist[0]
else:
g.es(mylist[0],"+ 遞迴加(", mylist[1:], ")")
return mylist[0] + recur_sum(mylist[1:])
mylist = [1, 4, 5, 3, 7]
sum = recur_sum(mylist)
g.es("sum is:", sum)
Problem 2
Write a function that combines two lists by alternatingly taking elements. For example: given the two lists [a, b, c] and [1, 2, 3], the function should return [a, 1, b, 2, c, 3].
Problem 3
Write a function that computes the list of the first 100 Fibonacci numbers. By definition, the first two numbers in the Fibonacci sequence are 0 and 1, and each subsequent number is the sum of the previous two. As an example, here are the first 10 Fibonnaci numbers: 0, 1, 1, 2, 3, 5, 8, 13, 21, and 34.
Problem 4
Write a function that given a list of non negative integers, arranges them such that they form the largest possible number. For example, given [50, 2, 1, 9], the largest formed number is 95021.
Problem 5
Write a program that outputs all possibilities to put + or - or nothing between the numbers 1, 2, ..., 9 (in this order) such that the result is always 100. For example: 1 + 2 + 34 – 5 + 67 – 8 + 9 = 100.
@cherrypy.expose
def interpolation(self, small_gear_no=18, gear_type=1):
SQLite連結 = Store(SQLiteWriter(_curdir+"/lewis.db", frozen=True))
# 使用內插法求值
# 找出比目標齒數大的其中的最小的,就是最鄰近的大值
lewis_factor = SQLite連結.find_one("lewis","gearno > ?",[small_gear_no])
if(gear_type == 1):
larger_formfactor = lewis_factor.type1
elif(gear_type == 2):
larger_formfactor = lewis_factor.type2
elif(gear_type == 3):
larger_formfactor = lewis_factor.type3
else:
larger_formfactor = lewis_factor.type4
larger_toothnumber = lewis_factor.gearno
# 找出比目標齒數小的其中的最大的,就是最鄰近的小值
lewis_factor = SQLite連結.find_one("lewis","gearno < ? order by gearno DESC",[small_gear_no])
if(gear_type == 1):
smaller_formfactor = lewis_factor.type1
elif(gear_type == 2):
smaller_formfactor = lewis_factor.type2
elif(gear_type == 3):
smaller_formfactor = lewis_factor.type3
else:
smaller_formfactor = lewis_factor.type4
smaller_toothnumber = lewis_factor.gearno
calculated_factor = larger_formfactor + (small_gear_no - larger_toothnumber) * (larger_formfactor - smaller_formfactor) / (larger_toothnumber - smaller_toothnumber)
# 只傳回小數點後五位數
return str(round(calculated_factor, 5))
# 改寫為齒面寬的設計函式
@cherrypy.expose
def gear_width(self, horsepower=100, rpm=1000, ratio=4, toothtype=1, safetyfactor=2, material_serialno=1, npinion=18):
SQLite連結 = Store(SQLiteWriter(_curdir+"/lewis.db", frozen=True))
outstring = ""
# 根據所選用的齒形決定壓力角
if(toothtype == 1 or toothtype == 2):
壓力角 = 20
else:
壓力角 = 25
# 根據壓力角決定最小齒數
if(壓力角== 20):
最小齒數 = 18
else:
最小齒數 = 12
# 直接設最小齒數
if int(npinion) <= 最小齒數:
npinion = 最小齒數
# 大於400的齒數則視為齒條(Rack)
if int(npinion) >= 400:
npinion = 400
# 根據所選用的材料查詢強度值
# 由 material之序號查 steel 表以得材料之降伏強度S單位為 kpsi 因此查得的值要成乘上1000
# 利用 Store 建立資料庫檔案對應物件, 並且設定 frozen=True 表示不要開放動態資料表的建立
#SQLite連結 = Store(SQLiteWriter("lewis.db", frozen=True))
# 指定 steel 資料表
steel = SQLite連結.new("steel")
# 資料查詢
#material = SQLite連結.find_one("steel","unsno=? and treatment=?",[unsno, treatment])
material = SQLite連結.find_one("steel","serialno=?",[material_serialno])
# 列出 steel 資料表中的資料筆數
#print(SQLite連結.count("steel"))
#print (material.yield_str)
strengthstress = material.yield_str*1000
# 由小齒輪的齒數與齒形類別,查詢lewis form factor
# 先查驗是否有直接對應值
on_table = SQLite連結.count("lewis","gearno=?",[npinion])
if on_table == 1:
# 直接進入設計運算
#print("直接運算")
#print(on_table)
lewis_factor = SQLite連結.find_one("lewis","gearno=?",[npinion])
#print(lewis_factor.type1)
# 根據齒形查出 formfactor 值
if(toothtype == 1):
formfactor = lewis_factor.type1
elif(toothtype == 2):
formfactor = lewis_factor.type2
elif(toothtype == 3):
formfactor = lewis_factor.type3
else:
formfactor = lewis_factor.type4
else:
# 沒有直接對應值, 必須進行查表內插運算後, 再執行設計運算
#print("必須內插")
#print(interpolation(npinion, gear_type))
formfactor = self.interpolation(npinion, toothtype)
# 開始進行設計運算
ngear = int(npinion) * int(ratio)
# 重要的最佳化設計---儘量用整數的diametralpitch
# 先嘗試用整數算若 diametralpitch 找到100 仍無所獲則改用 0.25 作為增量再不行則宣告 fail
counter = 0
i = 0.1
facewidth = 0
circularpitch = 0
while (facewidth <= 3 * circularpitch or facewidth >= 5 * circularpitch):
diametralpitch = i
#circularpitch = 3.14159/diametralpitch
circularpitch = math.pi/diametralpitch
pitchdiameter = int(npinion)/diametralpitch
#pitchlinevelocity = 3.14159*pitchdiameter*rpm/12
pitchlinevelocity = math.pi*pitchdiameter * float(rpm)/12
transmittedload = 33000*float(horsepower)/pitchlinevelocity
velocityfactor = 1200/(1200 + pitchlinevelocity)
# formfactor is Lewis form factor
# formfactor need to get from table 13-3 and determined ty teeth number and type of tooth
# formfactor = 0.293
# 90 is the value get from table corresponding to material type
facewidth = transmittedload*diametralpitch*float(safetyfactor)/velocityfactor/formfactor/strengthstress
if(counter>5000):
outstring += "超過5000次的設計運算,仍無法找到答案!<br />"
outstring += "可能所選用的傳遞功率過大,或無足夠強度的材料可以使用!<br />"
# 離開while迴圈
break
i += 0.1
counter += 1
facewidth = round(facewidth, 4)
if(counter<5000):
# 先載入 cube 程式測試
#outstring = self.cube_weblink()
# 再載入 gear 程式測試
outstring = self.gear_weblink()
outstring += "進行"+str(counter)+"次重複運算後,得到合用的facewidth值為:"+str(facewidth)
return outstring
@cherrypy.expose
def cube_weblink(self):
outstring = '''<script type="text/javascript" src="/static/weblink/pfcUtils.js"></script>
<script type="text/javascript" src="/static/weblink/wl_header.js">
document.writeln ("Error loading Pro/Web.Link header!");
</script>
<script type="text/javascript" language="JavaScript">
// 若第三輸入為 false, 表示僅載入 session, 但是不顯示
// ret 為 model open return
var ret = document.pwl.pwlMdlOpen("cube.prt", "v:/tmp", false);
if (!ret.Status) {
alert("pwlMdlOpen failed (" + ret.ErrorCode + ")");
}
//將 ProE 執行階段設為變數 session
var session = pfcGetProESession();
// 在視窗中打開零件檔案, 並且顯示出來
var window = session.OpenFile(pfcCreate("pfcModelDescriptor").CreateFromFileName("cube.prt"));
var solid = session.GetModel("cube.prt",pfcCreate("pfcModelType").MDL_PART);
var length,width,myf,myn,i,j,volume,count,d1Value,d2Value;
// 將模型檔中的 length 變數設為 javascript 中的 length 變數
length = solid.GetParam("a1");
// 將模型檔中的 width 變數設為 javascript 中的 width 變數
width = solid.GetParam("a2");
//改變零件尺寸
//myf=20;
//myn=20;
volume=0;
count=0;
try
{
// 以下採用 URL 輸入對應變數
//createParametersFromArguments ();
// 以下則直接利用 javascript 程式改變零件參數
for(i=0;i<=5;i++)
{
//for(j=0;j<=2;j++)
//{
myf=20.0;
myn=10.0+i*0.5;
// 設定變數值, 利用 ModelItem 中的 CreateDoubleParamValue 轉換成 Pro/Web.Link 所需要的浮點數值
d1Value = pfcCreate ("MpfcModelItem").CreateDoubleParamValue(myf);
d2Value = pfcCreate ("MpfcModelItem").CreateDoubleParamValue(myn);
// 將處理好的變數值, 指定給對應的零件變數
length.Value = d1Value;
width.Value = d2Value;
//零件尺寸重新設定後, 呼叫 Regenerate 更新模型
solid.Regenerate(void null);
//利用 GetMassProperty 取得模型的質量相關物件
properties = solid.GetMassProperty(void null);
//volume = volume + properties.Volume;
volume = properties.Volume;
count = count + 1;
alert("執行第"+count+"次,零件總體積:"+volume);
// 將零件存為新檔案
var newfile = document.pwl.pwlMdlSaveAs("cube.prt", "v:/tmp", "cube"+count+".prt");
if (!newfile.Status) {
alert("pwlMdlSaveAs failed (" + newfile.ErrorCode + ")");
}
//} // 內圈 for 迴圈
} //外圈 for 迴圈
//alert("共執行:"+count+"次,零件總體積:"+volume);
//alert("零件體積:"+properties.Volume);
//alert("零件體積取整數:"+Math.round(properties.Volume));
}
catch(err)
{
alert ("Exception occurred: "+pfcGetExceptionType (err));
}
</script>
'''
return outstring
@cherrypy.expose
def gear_weblink(self, facewidth=5, n=18):
outstring = '''<script type="text/javascript" src="/static/weblink/pfcUtils.js"></script>
<script type="text/javascript" src="/static/weblink/wl_header.js">// <![CDATA[
document.writeln ("Error loading Pro/Web.Link header!");
// ]]></script>
<script type="text/javascript" language="JavaScript">// <![CDATA[
if (!pfcIsWindows()) netscape.security.PrivilegeManager.enablePrivilege("UniversalXPConnect");
// 若第三輸入為 false, 表示僅載入 session, 但是不顯示
// ret 為 model open return
var ret = document.pwl.pwlMdlOpen("gear.prt", "v:/", false);
if (!ret.Status) {
alert("pwlMdlOpen failed (" + ret.ErrorCode + ")");
}
//將 ProE 執行階段設為變數 session
var session = pfcGetProESession();
// 在視窗中打開零件檔案, 並且顯示出來
var window = session.OpenFile(pfcCreate("pfcModelDescriptor").CreateFromFileName("gear.prt"));
var solid = session.GetModel("gear.prt",pfcCreate("pfcModelType").MDL_PART);
var length,width,myf,myn,i,j,volume,count,d1Value,d2Value;
// 將模型檔中的 length 變數設為 javascript 中的 length 變數
length = solid.GetParam("n");
// 將模型檔中的 width 變數設為 javascript 中的 width 變數
width = solid.GetParam("face_width");
//改變零件尺寸
//myf=20;
//myn=20;
volume=0;
count=0;
try
{
// 以下採用 URL 輸入對應變數
//createParametersFromArguments ();
// 以下則直接利用 javascript 程式改變零件參數
for(i=0;i<=5;i++)
{
//for(j=0;j<=2;j++)
//{
myf=25+i*2;
myn=10.0+i*0.5;
// 設定變數值, 利用 ModelItem 中的 CreateDoubleParamValue 轉換成 Pro/Web.Link 所需要的浮點數值
//d1Value = pfcCreate ("MpfcModelItem").CreateDoubleParamValue(myf);
d1Value = pfcCreate ("MpfcModelItem").CreateIntParamValue(myf);
d2Value = pfcCreate ("MpfcModelItem").CreateDoubleParamValue(myn);
// 將處理好的變數值, 指定給對應的零件變數
length.Value = d1Value;
width.Value = d2Value;
//零件尺寸重新設定後, 呼叫 Regenerate 更新模型
solid.Regenerate(void null);
//利用 GetMassProperty 取得模型的質量相關物件
properties = solid.GetMassProperty(void null);
//volume = volume + properties.Volume;
volume = properties.Volume;
count = count + 1;
alert("執行第"+count+"次,零件總體積:"+volume);
// 將零件存為新檔案
var newfile = document.pwl.pwlMdlSaveAs("gear.prt", "v:/", "mygear_"+count+".prt");
if (!newfile.Status) {
alert("pwlMdlSaveAs failed (" + newfile.ErrorCode + ")");
}
//} // 內圈 for 迴圈
} //外圈 for 迴圈
//alert("共執行:"+count+"次,零件總體積:"+volume);
//alert("零件體積:"+properties.Volume);
//alert("零件體積取整數:"+Math.round(properties.Volume));
}
catch(err)
{
alert ("Exception occurred: "+pfcGetExceptionType (err));
}
// ]]></script>
'''
return outstring
root = Gear()
# setup static, images and downloads directories
application_conf = {
'/static':{
'tools.staticdir.on': True,
'tools.staticdir.dir': _curdir+"/static"},
'/images':{
'tools.staticdir.on': True,
'tools.staticdir.dir': data_dir+"/images"},
'/downloads':{
'tools.staticdir.on': True,
'tools.staticdir.dir': data_dir+"/downloads"}
}
# if inOpenshift ('OPENSHIFT_REPO_DIR' exists in environment variables) or not inOpenshift
if __name__ == '__main__':
if 'OPENSHIFT_REPO_DIR' in os.environ.keys():
# operate in OpenShift
application = cherrypy.Application(root, config = application_conf)
else:
# operate in localhost
cherrypy.quickstart(root, config = application_conf)
| agpl-3.0 |
sinkuri256/python-for-android | python3-alpha/python3-src/Lib/shlex.py | 51 | 11100 | """A lexical analyzer class for simple shell-like syntaxes."""
# Module and documentation by Eric S. Raymond, 21 Dec 1998
# Input stacking and error message cleanup added by ESR, March 2000
# push_source() and pop_source() made explicit by ESR, January 2001.
# Posix compliance, split(), string arguments, and
# iterator interface by Gustavo Niemeyer, April 2003.
import os.path
import sys
from collections import deque
from io import StringIO
__all__ = ["shlex", "split"]
class shlex:
"A lexical analyzer class for simple shell-like syntaxes."
def __init__(self, instream=None, infile=None, posix=False):
if isinstance(instream, str):
instream = StringIO(instream)
if instream is not None:
self.instream = instream
self.infile = infile
else:
self.instream = sys.stdin
self.infile = None
self.posix = posix
if posix:
self.eof = None
else:
self.eof = ''
self.commenters = '#'
self.wordchars = ('abcdfeghijklmnopqrstuvwxyz'
'ABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789_')
if self.posix:
self.wordchars += ('ßàáâãäåæçèéêëìíîïðñòóôõöøùúûüýþÿ'
'ÀÁÂÃÄÅÆÇÈÉÊËÌÍÎÏÐÑÒÓÔÕÖØÙÚÛÜÝÞ')
self.whitespace = ' \t\r\n'
self.whitespace_split = False
self.quotes = '\'"'
self.escape = '\\'
self.escapedquotes = '"'
self.state = ' '
self.pushback = deque()
self.lineno = 1
self.debug = 0
self.token = ''
self.filestack = deque()
self.source = None
if self.debug:
print('shlex: reading from %s, line %d' \
% (self.instream, self.lineno))
def push_token(self, tok):
"Push a token onto the stack popped by the get_token method"
if self.debug >= 1:
print("shlex: pushing token " + repr(tok))
self.pushback.appendleft(tok)
def push_source(self, newstream, newfile=None):
"Push an input source onto the lexer's input source stack."
if isinstance(newstream, str):
newstream = StringIO(newstream)
self.filestack.appendleft((self.infile, self.instream, self.lineno))
self.infile = newfile
self.instream = newstream
self.lineno = 1
if self.debug:
if newfile is not None:
print('shlex: pushing to file %s' % (self.infile,))
else:
print('shlex: pushing to stream %s' % (self.instream,))
def pop_source(self):
"Pop the input source stack."
self.instream.close()
(self.infile, self.instream, self.lineno) = self.filestack.popleft()
if self.debug:
print('shlex: popping to %s, line %d' \
% (self.instream, self.lineno))
self.state = ' '
def get_token(self):
"Get a token from the input stream (or from stack if it's nonempty)"
if self.pushback:
tok = self.pushback.popleft()
if self.debug >= 1:
print("shlex: popping token " + repr(tok))
return tok
# No pushback. Get a token.
raw = self.read_token()
# Handle inclusions
if self.source is not None:
while raw == self.source:
spec = self.sourcehook(self.read_token())
if spec:
(newfile, newstream) = spec
self.push_source(newstream, newfile)
raw = self.get_token()
# Maybe we got EOF instead?
while raw == self.eof:
if not self.filestack:
return self.eof
else:
self.pop_source()
raw = self.get_token()
# Neither inclusion nor EOF
if self.debug >= 1:
if raw != self.eof:
print("shlex: token=" + repr(raw))
else:
print("shlex: token=EOF")
return raw
def read_token(self):
quoted = False
escapedstate = ' '
while True:
nextchar = self.instream.read(1)
if nextchar == '\n':
self.lineno = self.lineno + 1
if self.debug >= 3:
print("shlex: in state", repr(self.state), \
"I see character:", repr(nextchar))
if self.state is None:
self.token = '' # past end of file
break
elif self.state == ' ':
if not nextchar:
self.state = None # end of file
break
elif nextchar in self.whitespace:
if self.debug >= 2:
print("shlex: I see whitespace in whitespace state")
if self.token or (self.posix and quoted):
break # emit current token
else:
continue
elif nextchar in self.commenters:
self.instream.readline()
self.lineno = self.lineno + 1
elif self.posix and nextchar in self.escape:
escapedstate = 'a'
self.state = nextchar
elif nextchar in self.wordchars:
self.token = nextchar
self.state = 'a'
elif nextchar in self.quotes:
if not self.posix:
self.token = nextchar
self.state = nextchar
elif self.whitespace_split:
self.token = nextchar
self.state = 'a'
else:
self.token = nextchar
if self.token or (self.posix and quoted):
break # emit current token
else:
continue
elif self.state in self.quotes:
quoted = True
if not nextchar: # end of file
if self.debug >= 2:
print("shlex: I see EOF in quotes state")
# XXX what error should be raised here?
raise ValueError("No closing quotation")
if nextchar == self.state:
if not self.posix:
self.token = self.token + nextchar
self.state = ' '
break
else:
self.state = 'a'
elif self.posix and nextchar in self.escape and \
self.state in self.escapedquotes:
escapedstate = self.state
self.state = nextchar
else:
self.token = self.token + nextchar
elif self.state in self.escape:
if not nextchar: # end of file
if self.debug >= 2:
print("shlex: I see EOF in escape state")
# XXX what error should be raised here?
raise ValueError("No escaped character")
# In posix shells, only the quote itself or the escape
# character may be escaped within quotes.
if escapedstate in self.quotes and \
nextchar != self.state and nextchar != escapedstate:
self.token = self.token + self.state
self.token = self.token + nextchar
self.state = escapedstate
elif self.state == 'a':
if not nextchar:
self.state = None # end of file
break
elif nextchar in self.whitespace:
if self.debug >= 2:
print("shlex: I see whitespace in word state")
self.state = ' '
if self.token or (self.posix and quoted):
break # emit current token
else:
continue
elif nextchar in self.commenters:
self.instream.readline()
self.lineno = self.lineno + 1
if self.posix:
self.state = ' '
if self.token or (self.posix and quoted):
break # emit current token
else:
continue
elif self.posix and nextchar in self.quotes:
self.state = nextchar
elif self.posix and nextchar in self.escape:
escapedstate = 'a'
self.state = nextchar
elif nextchar in self.wordchars or nextchar in self.quotes \
or self.whitespace_split:
self.token = self.token + nextchar
else:
self.pushback.appendleft(nextchar)
if self.debug >= 2:
print("shlex: I see punctuation in word state")
self.state = ' '
if self.token:
break # emit current token
else:
continue
result = self.token
self.token = ''
if self.posix and not quoted and result == '':
result = None
if self.debug > 1:
if result:
print("shlex: raw token=" + repr(result))
else:
print("shlex: raw token=EOF")
return result
def sourcehook(self, newfile):
"Hook called on a filename to be sourced."
if newfile[0] == '"':
newfile = newfile[1:-1]
# This implements cpp-like semantics for relative-path inclusion.
if isinstance(self.infile, str) and not os.path.isabs(newfile):
newfile = os.path.join(os.path.dirname(self.infile), newfile)
return (newfile, open(newfile, "r"))
def error_leader(self, infile=None, lineno=None):
"Emit a C-compiler-like, Emacs-friendly error-message leader."
if infile is None:
infile = self.infile
if lineno is None:
lineno = self.lineno
return "\"%s\", line %d: " % (infile, lineno)
def __iter__(self):
return self
def __next__(self):
token = self.get_token()
if token == self.eof:
raise StopIteration
return token
def split(s, comments=False, posix=True):
lex = shlex(s, posix=posix)
lex.whitespace_split = True
if not comments:
lex.commenters = ''
return list(lex)
if __name__ == '__main__':
if len(sys.argv) == 1:
lexer = shlex()
else:
file = sys.argv[1]
lexer = shlex(open(file), file)
while 1:
tt = lexer.get_token()
if tt:
print("Token: " + repr(tt))
else:
break
| apache-2.0 |
adrientetar/robofab | Lib/robofab/tools/glifExport.py | 1 | 2477 | """Tool for exporting GLIFs from FontLab"""
import FL
import os
from robofab.interface.all.dialogs import ProgressBar
from robofab.glifLib import GlyphSet
from robofab.tools.glifImport import GlyphPlaceholder
from robofab.pens.flPen import drawFLGlyphOntoPointPen
def exportGlyph(glyphName, flGlyph, glyphSet):
"""Export a FontLab glyph."""
glyph = GlyphPlaceholder()
glyph.width = flGlyph.width
glyph.unicodes = flGlyph.unicodes
if flGlyph.note:
glyph.note = flGlyph.note
customdata = flGlyph.customdata
if customdata:
from io import StringIO
from robofab.plistlib import readPlist, Data
f = StringIO(customdata)
try:
glyph.lib = readPlist(f)
except: # XXX ugh, plistlib can raise lots of things
# Anyway, customdata does not contain valid plist data,
# but we don't need to toss it!
glyph.lib = {"org.robofab.fontlab.customdata": Data(customdata)}
def drawPoints(pen):
# whoohoo, nested scopes are cool.
drawFLGlyphOntoPointPen(flGlyph, pen)
glyphSet.writeGlyph(glyphName, glyph, drawPoints)
def exportGlyphs(font, glyphs=None, dest=None, doProgress=True, bar=None):
"""Export all glyphs in a FontLab font"""
if dest is None:
dir, base = os.path.split(font.file_name)
base = base.split(".")[0] + ".glyphs"
dest = os.path.join(dir, base)
if not os.path.exists(dest):
os.makedirs(dest)
glyphSet = GlyphSet(dest)
if glyphs is None:
indices = list(range(len(font)))
else:
indices = []
for glyphName in glyphs:
indices.append(font.FindGlyph(glyphName))
barStart = 0
closeBar = False
if doProgress:
if not bar:
bar = ProgressBar("Exporting Glyphs", len(indices))
closeBar = True
else:
barStart = bar.getCurrentTick()
else:
bar = None
try:
done = {}
for i in range(len(indices)):
#if not (i % 10) and not bar.tick(i + barStart):
# raise KeyboardInterrupt
index = indices[i]
flGlyph = font[index]
if flGlyph is None:
continue
glyphName = flGlyph.name
if not glyphName:
print("can't dump glyph #%s, it has no glyph name" % i)
else:
if glyphName in done:
n = 1
while ("%s#%s" % (glyphName, n)) in done:
n += 1
glyphName = "%s#%s" % (glyphName, n)
done[glyphName] = None
exportGlyph(glyphName, flGlyph, glyphSet)
if bar and not i % 10:
bar.tick(barStart + i)
# Write out contents.plist
glyphSet.writeContents()
except KeyboardInterrupt:
if bar:
bar.close()
bar = None
if bar and closeBar:
bar.close()
| bsd-3-clause |
hnjamba/onaclone | onadata/libs/utils/csv_reader.py | 6 | 1612 | # vim: ai ts=4 sts=4 et sw=4 encoding=utf-8
import csv
class CsvReader(object):
"""
Typical usage::
csv_reader = CsvReader(path)
for d in csv_reader.iter_dicts():
Variable.objects.create(**d)
"""
def __init__(self, path):
self.open(path)
def open(self, path):
self._file = open(path, 'rU') # universal new-line mode
# http://stackoverflow.com/questions/904041/reading-a-utf8-csv-file-wit
# h-python/904085#904085
self._csv_reader = csv.reader(self._file)
def close(self):
self._file.close()
def __iter__(self):
return self
def next(self):
"""
A CsvReader object is iterable (since we have defined __iter__
and next methods. Each iteration of this object returns a row
of data.
"""
row = self._csv_reader.next()
return [cell for cell in row]
def _set_headers(self):
self._headers = self.next()
def iter_dicts(self):
self._set_headers()
for row in self:
result = {}
for key, value in zip(self._headers, row):
# note since we're reading this in from a csv file
# value is going to be a string or unicode string, we
# quite simply want to avoid including empty strings in our
# dict.
if value:
result[key] = value
# we only want to yield rows where there is something in
# the row.
if result:
yield result
self.close()
| bsd-2-clause |
georgejhunt/olpc-kernel | tools/perf/scripts/python/failed-syscalls-by-pid.py | 1996 | 2233 | # failed system call counts, by pid
# (c) 2010, Tom Zanussi <[email protected]>
# Licensed under the terms of the GNU GPL License version 2
#
# Displays system-wide failed system call totals, broken down by pid.
# If a [comm] arg is specified, only syscalls called by [comm] are displayed.
import os
import sys
sys.path.append(os.environ['PERF_EXEC_PATH'] + \
'/scripts/python/Perf-Trace-Util/lib/Perf/Trace')
from perf_trace_context import *
from Core import *
from Util import *
usage = "perf script -s syscall-counts-by-pid.py [comm|pid]\n";
for_comm = None
for_pid = None
if len(sys.argv) > 2:
sys.exit(usage)
if len(sys.argv) > 1:
try:
for_pid = int(sys.argv[1])
except:
for_comm = sys.argv[1]
syscalls = autodict()
def trace_begin():
print "Press control+C to stop and show the summary"
def trace_end():
print_error_totals()
def raw_syscalls__sys_exit(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
common_callchain, id, ret):
if (for_comm and common_comm != for_comm) or \
(for_pid and common_pid != for_pid ):
return
if ret < 0:
try:
syscalls[common_comm][common_pid][id][ret] += 1
except TypeError:
syscalls[common_comm][common_pid][id][ret] = 1
def syscalls__sys_exit(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
id, ret):
raw_syscalls__sys_exit(**locals())
def print_error_totals():
if for_comm is not None:
print "\nsyscall errors for %s:\n\n" % (for_comm),
else:
print "\nsyscall errors:\n\n",
print "%-30s %10s\n" % ("comm [pid]", "count"),
print "%-30s %10s\n" % ("------------------------------", \
"----------"),
comm_keys = syscalls.keys()
for comm in comm_keys:
pid_keys = syscalls[comm].keys()
for pid in pid_keys:
print "\n%s [%d]\n" % (comm, pid),
id_keys = syscalls[comm][pid].keys()
for id in id_keys:
print " syscall: %-16s\n" % syscall_name(id),
ret_keys = syscalls[comm][pid][id].keys()
for ret, val in sorted(syscalls[comm][pid][id].iteritems(), key = lambda(k, v): (v, k), reverse = True):
print " err = %-20s %10d\n" % (strerror(ret), val),
| gpl-2.0 |
ctrevino/DIGITS | digits/model/tasks/caffe_train.py | 1 | 38589 | # Copyright (c) 2014-2015, NVIDIA CORPORATION. All rights reserved.
import os
import re
import time
import math
import subprocess
import numpy as np
from google.protobuf import text_format
import caffe
try:
import caffe_pb2
except ImportError:
# See issue #32
from caffe.proto import caffe_pb2
from train import TrainTask
from digits.config import config_value
from digits.status import Status
from digits import utils, dataset
from digits.utils import subclass, override, constants
from digits.dataset import ImageClassificationDatasetJob
# NOTE: Increment this everytime the pickled object changes
PICKLE_VERSION = 2
@subclass
class CaffeTrainTask(TrainTask):
"""
Trains a caffe model
"""
CAFFE_LOG = 'caffe_output.log'
@staticmethod
def upgrade_network(network):
#TODO
pass
def __init__(self, network, **kwargs):
"""
Arguments:
network -- a caffe NetParameter defining the network
"""
super(CaffeTrainTask, self).__init__(**kwargs)
self.pickver_task_caffe_train = PICKLE_VERSION
self.network = network
self.current_iteration = 0
self.loaded_snapshot_file = None
self.loaded_snapshot_epoch = None
self.image_mean = None
self.solver = None
self.solver_file = constants.CAFFE_SOLVER_FILE
self.train_val_file = constants.CAFFE_TRAIN_VAL_FILE
self.snapshot_prefix = constants.CAFFE_SNAPSHOT_PREFIX
self.deploy_file = constants.CAFFE_DEPLOY_FILE
self.caffe_log_file = self.CAFFE_LOG
def __getstate__(self):
state = super(CaffeTrainTask, self).__getstate__()
# Don't pickle these things
if 'caffe_log' in state:
del state['caffe_log']
if '_transformer' in state:
del state['_transformer']
if '_caffe_net' in state:
del state['_caffe_net']
return state
def __setstate__(self, state):
super(CaffeTrainTask, self).__setstate__(state)
# Upgrade pickle file
if state['pickver_task_caffe_train'] == 1:
print 'upgrading %s' % self.job_id
self.caffe_log_file = self.CAFFE_LOG
self.pickver_task_caffe_train = PICKLE_VERSION
# Make changes to self
self.loaded_snapshot_file = None
self.loaded_snapshot_epoch = None
# These things don't get pickled
self.image_mean = None
### Task overrides
@override
def name(self):
return 'Train Caffe Model'
@override
def before_run(self):
super(CaffeTrainTask, self).before_run()
if isinstance(self.dataset, dataset.ImageClassificationDatasetJob):
self.save_prototxt_files()
else:
raise NotImplementedError
self.caffe_log = open(self.path(self.CAFFE_LOG), 'a')
self.saving_snapshot = False
self.receiving_train_output = False
self.receiving_val_output = False
self.last_train_update = None
return True
def save_prototxt_files(self):
"""
Save solver, train_val and deploy files to disk
"""
has_val_set = self.dataset.val_db_task() is not None
### Check what has been specified in self.network
tops = []
bottoms = {}
train_data_layer = None
val_data_layer = None
hidden_layers = caffe_pb2.NetParameter()
loss_layers = []
accuracy_layers = []
for layer in self.network.layer:
assert layer.type not in ['MemoryData', 'HDF5Data', 'ImageData'], 'unsupported data layer type'
if layer.type == 'Data':
for rule in layer.include:
if rule.phase == caffe_pb2.TRAIN:
assert train_data_layer is None, 'cannot specify two train data layers'
train_data_layer = layer
elif rule.phase == caffe_pb2.TEST:
assert val_data_layer is None, 'cannot specify two test data layers'
val_data_layer = layer
elif layer.type == 'SoftmaxWithLoss':
loss_layers.append(layer)
elif layer.type == 'Accuracy':
addThis = True
if layer.accuracy_param.HasField('top_k'):
if layer.accuracy_param.top_k >= len(self.get_labels()):
self.logger.warning('Removing layer %s because top_k=%s while there are are only %s labels in this dataset' % (layer.name, layer.accuracy_param.top_k, len(self.get_labels())))
addThis = False
if addThis:
accuracy_layers.append(layer)
else:
hidden_layers.layer.add().CopyFrom(layer)
if len(layer.bottom) == 1 and len(layer.top) == 1 and layer.bottom[0] == layer.top[0]:
pass
else:
for top in layer.top:
tops.append(top)
for bottom in layer.bottom:
bottoms[bottom] = True
if train_data_layer is None:
assert val_data_layer is None, 'cannot specify a test data layer without a train data layer'
assert len(loss_layers) > 0, 'must specify a loss layer'
network_outputs = []
for name in tops:
if name not in bottoms:
network_outputs.append(name)
assert len(network_outputs), 'network must have an output'
# Update num_output for any output InnerProduct layers automatically
for layer in hidden_layers.layer:
if layer.type == 'InnerProduct':
for top in layer.top:
if top in network_outputs:
layer.inner_product_param.num_output = len(self.get_labels())
break
### Write train_val file
train_val_network = caffe_pb2.NetParameter()
# data layers
if train_data_layer is not None:
if train_data_layer.HasField('data_param'):
assert not train_data_layer.data_param.HasField('source'), "don't set the data_param.source"
assert not train_data_layer.data_param.HasField('backend'), "don't set the data_param.backend"
max_crop_size = min(self.dataset.image_dims[0], self.dataset.image_dims[1])
if self.crop_size:
assert self.crop_size <= max_crop_size, 'crop_size is larger than the image size'
train_data_layer.transform_param.crop_size = self.crop_size
elif train_data_layer.transform_param.HasField('crop_size'):
cs = train_data_layer.transform_param.crop_size
if cs > max_crop_size:
# don't throw an error here
cs = max_crop_size
train_data_layer.transform_param.crop_size = cs
self.crop_size = cs
train_val_network.layer.add().CopyFrom(train_data_layer)
train_data_layer = train_val_network.layer[-1]
if val_data_layer is not None and has_val_set:
if val_data_layer.HasField('data_param'):
assert not val_data_layer.data_param.HasField('source'), "don't set the data_param.source"
assert not val_data_layer.data_param.HasField('backend'), "don't set the data_param.backend"
if self.crop_size:
# use our error checking from the train layer
val_data_layer.transform_param.crop_size = self.crop_size
train_val_network.layer.add().CopyFrom(val_data_layer)
val_data_layer = train_val_network.layer[-1]
else:
train_data_layer = train_val_network.layer.add(type = 'Data', name = 'data')
train_data_layer.top.append('data')
train_data_layer.top.append('label')
train_data_layer.include.add(phase = caffe_pb2.TRAIN)
train_data_layer.data_param.batch_size = constants.DEFAULT_BATCH_SIZE
if self.crop_size:
train_data_layer.transform_param.crop_size = self.crop_size
if has_val_set:
val_data_layer = train_val_network.layer.add(type = 'Data', name = 'data')
val_data_layer.top.append('data')
val_data_layer.top.append('label')
val_data_layer.include.add(phase = caffe_pb2.TEST)
val_data_layer.data_param.batch_size = constants.DEFAULT_BATCH_SIZE
if self.crop_size:
val_data_layer.transform_param.crop_size = self.crop_size
train_data_layer.data_param.source = self.dataset.path(self.dataset.train_db_task().db_name)
train_data_layer.data_param.backend = caffe_pb2.DataParameter.LMDB
if val_data_layer is not None and has_val_set:
val_data_layer.data_param.source = self.dataset.path(self.dataset.val_db_task().db_name)
val_data_layer.data_param.backend = caffe_pb2.DataParameter.LMDB
if self.use_mean:
mean_pixel = None
with open(self.dataset.path(self.dataset.train_db_task().mean_file)) as f:
blob = caffe_pb2.BlobProto()
blob.MergeFromString(f.read())
mean = np.reshape(blob.data,
(
self.dataset.image_dims[2],
self.dataset.image_dims[0],
self.dataset.image_dims[1],
)
)
mean_pixel = mean.mean(1).mean(1)
for value in mean_pixel:
train_data_layer.transform_param.mean_value.append(value)
if val_data_layer is not None and has_val_set:
for value in mean_pixel:
val_data_layer.transform_param.mean_value.append(value)
if self.batch_size:
train_data_layer.data_param.batch_size = self.batch_size
if val_data_layer is not None and has_val_set:
val_data_layer.data_param.batch_size = self.batch_size
else:
if not train_data_layer.data_param.HasField('batch_size'):
train_data_layer.data_param.batch_size = constants.DEFAULT_BATCH_SIZE
if val_data_layer is not None and has_val_set and not val_data_layer.data_param.HasField('batch_size'):
val_data_layer.data_param.batch_size = constants.DEFAULT_BATCH_SIZE
# hidden layers
train_val_network.MergeFrom(hidden_layers)
# output layers
train_val_network.layer.extend(loss_layers)
train_val_network.layer.extend(accuracy_layers)
with open(self.path(self.train_val_file), 'w') as outfile:
text_format.PrintMessage(train_val_network, outfile)
### Write deploy file
deploy_network = caffe_pb2.NetParameter()
# input
deploy_network.input.append('data')
deploy_network.input_dim.append(1)
deploy_network.input_dim.append(self.dataset.image_dims[2])
if self.crop_size:
deploy_network.input_dim.append(self.crop_size)
deploy_network.input_dim.append(self.crop_size)
else:
deploy_network.input_dim.append(self.dataset.image_dims[0])
deploy_network.input_dim.append(self.dataset.image_dims[1])
# hidden layers
deploy_network.MergeFrom(hidden_layers)
# output layers
if loss_layers[-1].type == 'SoftmaxWithLoss':
prob_layer = deploy_network.layer.add(
type = 'Softmax',
name = 'prob')
prob_layer.bottom.append(network_outputs[-1])
prob_layer.top.append('prob')
with open(self.path(self.deploy_file), 'w') as outfile:
text_format.PrintMessage(deploy_network, outfile)
### Write solver file
solver = caffe_pb2.SolverParameter()
# get enum value for solver type
solver.solver_type = getattr(solver, self.solver_type)
solver.net = self.train_val_file
# Set CPU/GPU mode
if config_value('caffe_root')['cuda_enabled'] and \
bool(config_value('gpu_list')):
solver.solver_mode = caffe_pb2.SolverParameter.GPU
else:
solver.solver_mode = caffe_pb2.SolverParameter.CPU
solver.snapshot_prefix = self.snapshot_prefix
# Epochs -> Iterations
train_iter = int(math.ceil(float(self.dataset.train_db_task().entries_count) / train_data_layer.data_param.batch_size))
solver.max_iter = train_iter * self.train_epochs
snapshot_interval = self.snapshot_interval * train_iter
if 0 < snapshot_interval <= 1:
solver.snapshot = 1 # don't round down
elif 1 < snapshot_interval < solver.max_iter:
solver.snapshot = int(snapshot_interval)
else:
solver.snapshot = 0 # only take one snapshot at the end
if has_val_set and self.val_interval:
solver.test_iter.append(int(math.ceil(float(self.dataset.val_db_task().entries_count) / val_data_layer.data_param.batch_size)))
val_interval = self.val_interval * train_iter
if 0 < val_interval <= 1:
solver.test_interval = 1 # don't round down
elif 1 < val_interval < solver.max_iter:
solver.test_interval = int(val_interval)
else:
solver.test_interval = solver.max_iter # only test once at the end
# Learning rate
solver.base_lr = self.learning_rate
solver.lr_policy = self.lr_policy['policy']
scale = float(solver.max_iter)/100.0
if solver.lr_policy == 'fixed':
pass
elif solver.lr_policy == 'step':
# stepsize = stepsize * scale
solver.stepsize = int(math.ceil(float(self.lr_policy['stepsize']) * scale))
solver.gamma = self.lr_policy['gamma']
elif solver.lr_policy == 'multistep':
for value in self.lr_policy['stepvalue']:
# stepvalue = stepvalue * scale
solver.stepvalue.append(int(math.ceil(float(value) * scale)))
solver.gamma = self.lr_policy['gamma']
elif solver.lr_policy == 'exp':
# gamma = gamma^(1/scale)
solver.gamma = math.pow(self.lr_policy['gamma'], 1.0/scale)
elif solver.lr_policy == 'inv':
# gamma = gamma / scale
solver.gamma = self.lr_policy['gamma'] / scale
solver.power = self.lr_policy['power']
elif solver.lr_policy == 'poly':
solver.power = self.lr_policy['power']
elif solver.lr_policy == 'sigmoid':
# gamma = -gamma / scale
solver.gamma = -1.0 * self.lr_policy['gamma'] / scale
# stepsize = stepsize * scale
solver.stepsize = int(math.ceil(float(self.lr_policy['stepsize']) * scale))
else:
raise Exception('Unknown lr_policy: "%s"' % solver.lr_policy)
# go with the suggested defaults
if solver.solver_type != solver.ADAGRAD:
solver.momentum = 0.9
solver.weight_decay = 0.0005
# Display 8x per epoch, or once per 5000 images, whichever is more frequent
solver.display = max(1, min(
int(math.floor(float(solver.max_iter) / (self.train_epochs * 8))),
int(math.ceil(5000.0 / train_data_layer.data_param.batch_size))
))
if self.random_seed is not None:
solver.random_seed = self.random_seed
with open(self.path(self.solver_file), 'w') as outfile:
text_format.PrintMessage(solver, outfile)
self.solver = solver # save for later
return True
def iteration_to_epoch(self, it):
return float(it * self.train_epochs) / self.solver.max_iter
@override
def task_arguments(self, resources):
args = [config_value('caffe_root')['executable'],
'train',
'--solver=%s' % self.path(self.solver_file),
]
if 'gpus' in resources:
identifiers = []
for identifier, value in resources['gpus']:
identifiers.append(identifier)
if len(identifiers) == 1:
args.append('--gpu=%s' % identifiers[0])
elif len(identifiers) > 1:
args.append('--gpus=%s' % ','.join(identifiers))
if self.pretrained_model:
args.append('--weights=%s' % self.path(self.pretrained_model))
return args
@override
def process_output(self, line):
float_exp = '(NaN|[-+]?[0-9]*\.?[0-9]+(e[-+]?[0-9]+)?)'
self.caffe_log.write('%s\n' % line)
self.caffe_log.flush()
# parse caffe output
timestamp, level, message = self.preprocess_output_caffe(line)
if not message:
return True
# iteration updates
match = re.match(r'Iteration (\d+)', message)
if match:
i = int(match.group(1))
self.new_iteration(i)
# net output
match = re.match(r'(Train|Test) net output #(\d+): (\S*) = %s' % float_exp, message, flags=re.IGNORECASE)
if match:
phase = match.group(1)
index = int(match.group(2))
name = match.group(3)
value = match.group(4)
assert value.lower() != 'nan', 'Network outputted NaN for "%s" (%s phase). Try decreasing your learning rate.' % (name, phase)
value = float(value)
# Find the layer type
kind = '?'
for layer in self.network.layer:
if name in layer.top:
kind = layer.type
break
if phase.lower() == 'train':
self.save_train_output(name, kind, value)
elif phase.lower() == 'test':
self.save_val_output(name, kind, value)
return True
# learning rate updates
match = re.match(r'Iteration (\d+).*lr = %s' % float_exp, message, flags=re.IGNORECASE)
if match:
i = int(match.group(1))
lr = float(match.group(2))
self.save_train_output('learning_rate', 'LearningRate', lr)
return True
# snapshot saved
if self.saving_snapshot:
if not message.startswith('Snapshotting solver state'):
self.logger.warning('caffe output format seems to have changed. Expected "Snapshotting solver state..." after "Snapshotting to..."')
else:
self.logger.debug('Snapshot saved.')
self.detect_snapshots()
self.send_snapshot_update()
self.saving_snapshot = False
return True
# snapshot starting
match = re.match(r'Snapshotting to (.*)\s*$', message)
if match:
self.saving_snapshot = True
return True
# memory requirement
match = re.match(r'Memory required for data:\s+(\d+)', message)
if match:
bytes_required = int(match.group(1))
#self.logger.debug('memory required: %s' % utils.sizeof_fmt(bytes_required))
return True
if level in ['error', 'critical']:
self.logger.error('%s: %s' % (self.name(), message))
self.exception = message
return True
return True
def preprocess_output_caffe(self, line):
"""
Takes line of output and parses it according to caffe's output format
Returns (timestamp, level, message) or (None, None, None)
"""
# NOTE: This must change when the logging format changes
# LMMDD HH:MM:SS.MICROS pid file:lineno] message
match = re.match(r'(\w)(\d{4} \S{8}).*]\s+(\S.*)$', line)
if match:
level = match.group(1)
# add the year because caffe omits it
timestr = '%s%s' % (time.strftime('%Y'), match.group(2))
message = match.group(3)
if level == 'I':
level = 'info'
elif level == 'W':
level = 'warning'
elif level == 'E':
level = 'error'
elif level == 'F': #FAIL
level = 'critical'
timestamp = time.mktime(time.strptime(timestr, '%Y%m%d %H:%M:%S'))
return (timestamp, level, message)
else:
#self.logger.warning('Unrecognized task output "%s"' % line)
return (None, None, None)
def new_iteration(self, it):
"""
Update current_iteration
"""
if self.current_iteration == it:
return
self.current_iteration = it
self.send_progress_update(self.iteration_to_epoch(it))
def send_snapshot_update(self):
"""
Sends socketio message about the snapshot list
"""
from digits.webapp import socketio
socketio.emit('task update',
{
'task': self.html_id(),
'update': 'snapshots',
'data': self.snapshot_list(),
},
namespace='/jobs',
room=self.job_id,
)
@override
def after_run(self):
super(CaffeTrainTask, self).after_run()
self.caffe_log.close()
@override
def after_runtime_error(self):
if os.path.exists(self.path(self.CAFFE_LOG)):
output = subprocess.check_output(['tail', '-n40', self.path(self.CAFFE_LOG)])
lines = []
for line in output.split('\n'):
# parse caffe header
timestamp, level, message = self.preprocess_output_caffe(line)
if message:
lines.append(message)
# return the last 20 lines
self.traceback = '\n'.join(lines[len(lines)-20:])
### TrainTask overrides
@override
def detect_snapshots(self):
self.snapshots = []
snapshot_dir = os.path.join(self.job_dir, os.path.dirname(self.snapshot_prefix))
snapshots = []
solverstates = []
for filename in os.listdir(snapshot_dir):
# find models
match = re.match(r'%s_iter_(\d+)\.caffemodel' % os.path.basename(self.snapshot_prefix), filename)
if match:
iteration = int(match.group(1))
epoch = float(iteration) / (float(self.solver.max_iter)/self.train_epochs)
# assert epoch.is_integer(), '%s is not an integer' % epoch
epoch = round(epoch,3)
# if epoch is int
if epoch == math.ceil(epoch):
# print epoch,math.ceil(epoch),int(epoch)
epoch = int(epoch)
snapshots.append( (
os.path.join(snapshot_dir, filename),
epoch
)
)
# find solverstates
match = re.match(r'%s_iter_(\d+)\.solverstate' % os.path.basename(self.snapshot_prefix), filename)
if match:
solverstates.append( (
os.path.join(snapshot_dir, filename),
int(match.group(1))
)
)
# delete all but the most recent solverstate
for filename, iteration in sorted(solverstates, key=lambda tup: tup[1])[:-1]:
#print 'Removing "%s"' % filename
os.remove(filename)
self.snapshots = sorted(snapshots, key=lambda tup: tup[1])
return len(self.snapshots) > 0
@override
def est_next_snapshot(self):
if self.status != Status.RUN or self.current_iteration == 0:
return None
elapsed = time.time() - self.status_updates[-1][1]
next_snapshot_iteration = (1 + self.current_iteration//self.snapshot_interval) * self.snapshot_interval
return (next_snapshot_iteration - self.current_iteration) * elapsed // self.current_iteration
@override
def can_view_weights(self):
return False
@override
def can_infer_one(self):
if isinstance(self.dataset, ImageClassificationDatasetJob):
return True
return False
@override
def infer_one(self, data, snapshot_epoch=None, layers=None):
if isinstance(self.dataset, ImageClassificationDatasetJob):
return self.classify_one(data,
snapshot_epoch=snapshot_epoch,
layers=layers,
)
raise NotImplementedError()
def classify_one(self, image, snapshot_epoch=None, layers=None):
"""
Classify an image
Returns (predictions, visualizations)
predictions -- an array of [ (label, confidence), ...] for each label, sorted by confidence
visualizations -- a list of dicts for the specified layers
Returns (None, None) if something goes wrong
Arguments:
image -- a np.array
Keyword arguments:
snapshot_epoch -- which snapshot to use
layers -- which layer activation[s] and weight[s] to visualize
"""
labels = self.get_labels()
net = self.get_net(snapshot_epoch)
# process image
if image.ndim == 2:
image = image[:,:,np.newaxis]
preprocessed = self.get_transformer().preprocess(
'data', image)
# reshape net input (if necessary)
test_shape = (1,) + preprocessed.shape
if net.blobs['data'].data.shape != test_shape:
net.blobs['data'].reshape(*test_shape)
# run inference
net.blobs['data'].data[...] = preprocessed
output = net.forward()
scores = output[net.outputs[-1]].flatten()
indices = (-scores).argsort()
predictions = []
for i in indices:
predictions.append( (labels[i], scores[i]) )
# add visualizations
visualizations = []
if layers and layers != 'none':
if layers == 'all':
added_activations = []
for layer in self.network.layer:
print 'Computing visualizations for "%s"...' % layer.name
if not layer.type.endswith(('Data', 'Loss', 'Accuracy')):
for bottom in layer.bottom:
if bottom in net.blobs and bottom not in added_activations:
data = net.blobs[bottom].data[0]
vis = self.get_layer_visualization(data)
mean, std, hist = self.get_layer_statistics(data)
visualizations.append(
{
'name': str(bottom),
'type': 'Activations',
'mean': mean,
'stddev': std,
'histogram': hist,
'image_html': utils.image.embed_image_html(vis),
}
)
added_activations.append(bottom)
if layer.name in net.params:
data = net.params[layer.name][0].data
if layer.type not in ['InnerProduct']:
vis = self.get_layer_visualization(data)
else:
vis = None
mean, std, hist = self.get_layer_statistics(data)
visualizations.append(
{
'name': str(layer.name),
'type': 'Weights (%s layer)' % layer.type,
'mean': mean,
'stddev': std,
'histogram': hist,
'image_html': utils.image.embed_image_html(vis),
}
)
for top in layer.top:
if top in net.blobs and top not in added_activations:
data = net.blobs[top].data[0]
normalize = True
# don't normalize softmax layers
if layer.type == 'Softmax':
normalize = False
vis = self.get_layer_visualization(data, normalize=normalize)
mean, std, hist = self.get_layer_statistics(data)
visualizations.append(
{
'name': str(top),
'type': 'Activation',
'mean': mean,
'stddev': std,
'histogram': hist,
'image_html': utils.image.embed_image_html(vis),
}
)
added_activations.append(top)
else:
raise NotImplementedError
return (predictions, visualizations)
def get_layer_visualization(self, data,
normalize = True,
max_width = 600,
):
"""
Returns a vis_square for the given layer data
Arguments:
data -- a np.ndarray
Keyword arguments:
normalize -- whether to normalize the data when visualizing
max_width -- maximum width for the vis_square
"""
if data.ndim == 1:
# interpret as 1x1 grayscale images
# (N, 1, 1)
data = data[:, np.newaxis, np.newaxis]
elif data.ndim == 2:
# interpret as 1x1 grayscale images
# (N, 1, 1)
data = data.reshape((data.shape[0]*data.shape[1], 1, 1))
elif data.ndim == 3:
if data.shape[0] == 3:
# interpret as a color image
# (1, H, W,3)
data = data[[2,1,0],...] # BGR to RGB (see issue #59)
data = data.transpose(1,2,0)
data = data[np.newaxis,...]
else:
# interpret as grayscale images
# (N, H, W)
pass
elif data.ndim == 4:
if data.shape[0] == 3:
# interpret as HxW color images
# (N, H, W, 3)
data = data.transpose(1,2,3,0)
data = data[:,:,:,[2,1,0]] # BGR to RGB (see issue #59)
elif data.shape[1] == 3:
# interpret as HxW color images
# (N, H, W, 3)
data = data.transpose(0,2,3,1)
data = data[:,:,:,[2,1,0]] # BGR to RGB (see issue #59)
else:
# interpret as HxW grayscale images
# (N, H, W)
data = data.reshape((data.shape[0]*data.shape[1], data.shape[2], data.shape[3]))
else:
raise RuntimeError('unrecognized data shape: %s' % (data.shape,))
# chop off data so that it will fit within max_width
padsize = 0
width = data.shape[2]
if width > max_width:
data = data[:1,:max_width,:max_width]
else:
if width > 1:
padsize = 1
width += 1
n = max(max_width/width,1)
n *= n
data = data[:n]
return utils.image.vis_square(data,
padsize = padsize,
normalize = normalize,
)
def get_layer_statistics(self, data):
"""
Returns statistics for the given layer data:
(mean, standard deviation, histogram)
histogram -- [y, x, ticks]
Arguments:
data -- a np.ndarray
"""
# XXX These calculations can be super slow
mean = np.mean(data)
std = np.std(data)
y, x = np.histogram(data, bins=20)
y = list(y)
ticks = x[[0,len(x)/2,-1]]
x = [(x[i]+x[i+1])/2.0 for i in xrange(len(x)-1)]
ticks = list(ticks)
return (mean, std, [y, x, ticks])
@override
def can_infer_many(self):
if isinstance(self.dataset, ImageClassificationDatasetJob):
return True
return False
@override
def infer_many(self, data, snapshot_epoch=None):
if isinstance(self.dataset, ImageClassificationDatasetJob):
return self.classify_many(data, snapshot_epoch=snapshot_epoch)
raise NotImplementedError()
def classify_many(self, images, snapshot_epoch=None):
"""
Returns (labels, results):
labels -- an array of strings
results -- a 2D np array:
[
[image0_label0_confidence, image0_label1_confidence, ...],
[image1_label0_confidence, image1_label1_confidence, ...],
...
]
Arguments:
images -- a list of np.arrays
Keyword arguments:
snapshot_epoch -- which snapshot to use
"""
labels = self.get_labels()
net = self.get_net(snapshot_epoch)
caffe_images = []
for image in images:
if image.ndim == 2:
caffe_images.append(image[:,:,np.newaxis])
else:
caffe_images.append(image)
caffe_images = np.array(caffe_images)
if self.batch_size:
data_shape = (self.batch_size, self.dataset.image_dims[2])
# TODO: grab batch_size from the TEST phase in train_val network
else:
data_shape = (constants.DEFAULT_BATCH_SIZE, self.dataset.image_dims[2])
if self.crop_size:
data_shape += (self.crop_size, self.crop_size)
else:
data_shape += (self.dataset.image_dims[0], self.dataset.image_dims[1])
scores = None
for chunk in [caffe_images[x:x+data_shape[0]] for x in xrange(0, len(caffe_images), data_shape[0])]:
new_shape = (len(chunk),) + data_shape[1:]
if net.blobs['data'].data.shape != new_shape:
net.blobs['data'].reshape(*new_shape)
for index, image in enumerate(chunk):
net.blobs['data'].data[index] = self.get_transformer().preprocess(
'data', image)
output = net.forward()[net.outputs[-1]]
if scores is None:
scores = output
else:
scores = np.vstack((scores, output))
print 'Processed %s/%s images' % (len(scores), len(caffe_images))
return (labels, scores)
def has_model(self):
"""
Returns True if there is a model that can be used
"""
return len(self.snapshots) > 0
def get_net(self, epoch=None):
"""
Returns an instance of caffe.Net
Keyword Arguments:
epoch -- which snapshot to load (default is -1 to load the most recently generated snapshot)
"""
if not self.has_model():
return False
file_to_load = None
if not epoch:
epoch = self.snapshots[-1][1]
file_to_load = self.snapshots[-1][0]
else:
for snapshot_file, snapshot_epoch in self.snapshots:
if snapshot_epoch == epoch:
file_to_load = snapshot_file
break
if file_to_load is None:
raise Exception('snapshot not found for epoch "%s"' % epoch)
# check if already loaded
if self.loaded_snapshot_file and self.loaded_snapshot_file == file_to_load \
and hasattr(self, '_caffe_net') and self._caffe_net is not None:
return self._caffe_net
if config_value('caffe_root')['cuda_enabled'] and\
config_value('gpu_list'):
caffe.set_mode_gpu()
# load a new model
self._caffe_net = caffe.Net(
self.path(self.deploy_file),
file_to_load,
caffe.TEST)
self.loaded_snapshot_epoch = epoch
self.loaded_snapshot_file = file_to_load
return self._caffe_net
def get_transformer(self):
"""
Returns an instance of caffe.io.Transformer
"""
# check if already loaded
if hasattr(self, '_transformer') and self._transformer is not None:
return self._transformer
data_shape = (1, self.dataset.image_dims[2])
if self.crop_size:
data_shape += (self.crop_size, self.crop_size)
else:
data_shape += (self.dataset.image_dims[0], self.dataset.image_dims[1])
t = caffe.io.Transformer(
inputs = {'data': data_shape}
)
t.set_transpose('data', (2,0,1)) # transpose to (channels, height, width)
if self.dataset.image_dims[2] == 3 and \
self.dataset.train_db_task().image_channel_order == 'BGR':
# channel swap
# XXX see issue #59
t.set_channel_swap('data', (2,1,0))
if self.use_mean:
# set mean
with open(self.dataset.path(self.dataset.train_db_task().mean_file)) as f:
blob = caffe_pb2.BlobProto()
blob.MergeFromString(f.read())
pixel = np.reshape(blob.data,
(
self.dataset.image_dims[2],
self.dataset.image_dims[0],
self.dataset.image_dims[1],
)
).mean(1).mean(1)
t.set_mean('data', pixel)
#t.set_raw_scale('data', 255) # [0,255] range instead of [0,1]
self._transformer = t
return self._transformer
| bsd-3-clause |
lupyuen/RaspberryPiImage | home/pi/GrovePi/Software/Python/others/temboo/Library/Google/Spreadsheets/RetrieveCellFeed.py | 4 | 5884 | # -*- coding: utf-8 -*-
###############################################################################
#
# RetrieveCellFeed
# Retrieves a list of cell names and values in a specified Google worksheet.
#
# Python versions 2.6, 2.7, 3.x
#
# Copyright 2014, Temboo Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND,
# either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
#
#
###############################################################################
from temboo.core.choreography import Choreography
from temboo.core.choreography import InputSet
from temboo.core.choreography import ResultSet
from temboo.core.choreography import ChoreographyExecution
import json
class RetrieveCellFeed(Choreography):
def __init__(self, temboo_session):
"""
Create a new instance of the RetrieveCellFeed Choreo. A TembooSession object, containing a valid
set of Temboo credentials, must be supplied.
"""
super(RetrieveCellFeed, self).__init__(temboo_session, '/Library/Google/Spreadsheets/RetrieveCellFeed')
def new_input_set(self):
return RetrieveCellFeedInputSet()
def _make_result_set(self, result, path):
return RetrieveCellFeedResultSet(result, path)
def _make_execution(self, session, exec_id, path):
return RetrieveCellFeedChoreographyExecution(session, exec_id, path)
class RetrieveCellFeedInputSet(InputSet):
"""
An InputSet with methods appropriate for specifying the inputs to the RetrieveCellFeed
Choreo. The InputSet object is used to specify input parameters when executing this Choreo.
"""
def set_AccessToken(self, value):
"""
Set the value of the AccessToken input for this Choreo. ((optional, string) A valid Access Token retrieved during the OAuth process. This is required unless you provide the ClientID, ClientSecret, and RefreshToken to generate a new Access Token.)
"""
super(RetrieveCellFeedInputSet, self)._set_input('AccessToken', value)
def set_ClientID(self, value):
"""
Set the value of the ClientID input for this Choreo. ((conditional, string) The Client ID provided by Google. Required unless providing a valid AccessToken.)
"""
super(RetrieveCellFeedInputSet, self)._set_input('ClientID', value)
def set_ClientSecret(self, value):
"""
Set the value of the ClientSecret input for this Choreo. ((conditional, string) The Client Secret provided by Google. Required unless providing a valid AccessToken.)
"""
super(RetrieveCellFeedInputSet, self)._set_input('ClientSecret', value)
def set_Password(self, value):
"""
Set the value of the Password input for this Choreo. ((optional, password) Deprecated (retained for backward compatibility only).)
"""
super(RetrieveCellFeedInputSet, self)._set_input('Password', value)
def set_RefreshToken(self, value):
"""
Set the value of the RefreshToken input for this Choreo. ((conditional, string) An OAuth Refresh Token used to generate a new Access Token when the original token is expired. Required unless providing a valid AccessToken.)
"""
super(RetrieveCellFeedInputSet, self)._set_input('RefreshToken', value)
def set_ResponseFormat(self, value):
"""
Set the value of the ResponseFormat input for this Choreo. ((optional, string) The format that the response should be in. Valid values are: xml (the default) and json.)
"""
super(RetrieveCellFeedInputSet, self)._set_input('ResponseFormat', value)
def set_SpreadsheetKey(self, value):
"""
Set the value of the SpreadsheetKey input for this Choreo. ((required, string) The unique key of the spreadsheet associated with the cells you want to retrieve.)
"""
super(RetrieveCellFeedInputSet, self)._set_input('SpreadsheetKey', value)
def set_Username(self, value):
"""
Set the value of the Username input for this Choreo. ((optional, string) Deprecated (retained for backward compatibility only).)
"""
super(RetrieveCellFeedInputSet, self)._set_input('Username', value)
def set_WorksheetId(self, value):
"""
Set the value of the WorksheetId input for this Choreo. ((required, string) The unique ID of the worksheet associated with the cells you want to retrieve.)
"""
super(RetrieveCellFeedInputSet, self)._set_input('WorksheetId', value)
class RetrieveCellFeedResultSet(ResultSet):
"""
A ResultSet with methods tailored to the values returned by the RetrieveCellFeed Choreo.
The ResultSet object is used to retrieve the results of a Choreo execution.
"""
def getJSONFromString(self, str):
return json.loads(str)
def get_Response(self):
"""
Retrieve the value for the "Response" output from this Choreo execution. (The response from Google.)
"""
return self._output.get('Response', None)
def get_NewAccessToken(self):
"""
Retrieve the value for the "NewAccessToken" output from this Choreo execution. ((string) Contains a new AccessToken when the RefreshToken is provided.)
"""
return self._output.get('NewAccessToken', None)
class RetrieveCellFeedChoreographyExecution(ChoreographyExecution):
def _make_result_set(self, response, path):
return RetrieveCellFeedResultSet(response, path)
| apache-2.0 |
CeON/CoAnSys | document-similarity/document-similarity-logic/src/main/python/analyse_unibi.py | 4 | 3007 | #! /usr/bin/python
import sys,re,string
sys.path.append("/home/pdendek/docsim-check/out/document-similarity-logic-1.6-SNAPSHOT-jar-with-dependencies.jar")
from pl.edu.icm.coansys.commons.java import DiacriticsRemover
def fillDict(inL,langs):
innerD = {}
for x in inL:
t,l = x
langs.add(l)
innerD[l] = t
return (innerD,langs)
def replaceInList(l,old,new):
try:
l.remove(old)
l.append(new)
except ValueError:
pass
return l
def replaceKInDict(d,old,new):
try:
d[new] = d.pop(old)
except KeyError:
pass
return d
def filterDiacritics(t):
r = DiacriticsRemover.removeDiacritics(t).lower() #remove diacritics, .toLowerCase()
r = re.sub('(\\w)[^a-zA-Z0-9\\s'+re.escape('!"#$%&\'()*+,-./:;<=>?@[\\]^_`{|}~')+']+(\\w)',r'\1\2',r) # remove weired signs within a string
r = re.sub(r'[^a-zA-Z0-9_\-]+',' ',r) # remove stand-alone weired signs
r = r.strip()
return r
def getFilteredItemOrNone(d,k):
r = None
try:
r = filterDiacritics(d[k])
except KeyError:
pass
return r
@outputSchema("b:{ t:( key:chararray, title:chararray, abstract:chararray, ccs:{ cc:( type:chararray, code:chararray) }, lang:chararray ) }")
def groupByLangAndFilter(key,tis,abstrs,ccs):
langs = set()
unpacked_ccs = [(cc[0],thiscc[0]) for cc in ccs for thiscc in cc[1]]
ltis,langs = fillDict(tis,langs)
labstrs,langs = fillDict(abstrs,langs)
langs = replaceInList(list(langs),u'',u'und')
ltis = replaceKInDict(ltis,u'',u'und')
labstrs = replaceKInDict(labstrs,u'',u'und')
out = [(key+u'-----'+unicode(lang),getFilteredItemOrNone(ltis,lang),getFilteredItemOrNone(labstrs,lang),unpacked_ccs,lang) for lang in langs]
return out
@outputSchema("b:{ t:( key:chararray, title:chararray, abstract:chararray, ccs:{ cc:( type:chararray, code:chararray) }, lang:chararray ) }")
def groupByLangAndFilter(key,tis,abstrs,ccs,startsWith):
langs = set()
unpacked_ccs = [(cc[0],thiscc[0]) for cc in ccs if cc[0].startswith(startsWith) for thiscc in cc[1] ]
ltis,langs = fillDict(tis,langs)
labstrs,langs = fillDict(abstrs,langs)
langs = replaceInList(list(langs),u'',u'und')
ltis = replaceKInDict(ltis,u'',u'und')
labstrs = replaceKInDict(labstrs,u'',u'und')
out = [(key+u'-----'+unicode(lang),getFilteredItemOrNone(ltis,lang),getFilteredItemOrNone(labstrs,lang),unpacked_ccs,lang) for lang in langs]
return out
@outputSchema("t:( key:chararray, text:chararray, ccs:{ cc:( type:chararray, code:chararray) } )")
def mergeAndFilter(key,tis,abstrs,ccs):
unpacked_ccs = [(cc[0],thiscc[0]) for cc in ccs for thiscc in cc[1]]
return (key,' '.join([filterDiacritics(o[0]) for o in tis+abstrs]), unpacked_ccs)
@outputSchema("t:( key:chararray, text:chararray, ccs:{ cc:( type:chararray, code:chararray) } )")
def mergeAndFilter(key,tis,abstrs,ccs,startsWith):
unpacked_ccs = [(cc[0],thiscc[0]) for cc in ccs if cc[0].startswith(startsWith) for thiscc in cc[1]]
t = ' '.join([o[0] for o in tis+abstrs])
t = t.lower()
return (key,' '.join([filterDiacritics(o[0]) for o in tis+abstrs]), unpacked_ccs)
| agpl-3.0 |
openchange/openchange | mapiproxy/services/ocsmanager/ocsmanager/controllers/authenticate.py | 8 | 3494 | import logging
import hashlib
import os
from base64 import urlsafe_b64encode as encode
from base64 import urlsafe_b64decode as decode
from pylons import request, response, session, tmpl_context as c, url
from pylons.controllers.util import abort, redirect
from pylons.decorators.rest import restrict
from ocsmanager.model import AuthenticateModel
from ocsmanager.lib.base import BaseController, render
log = logging.getLogger(__name__)
class AuthenticateController(BaseController):
def _auth_abort(self, code, message):
c.code = code
c.message = message
return render('/error.xml')
@restrict('POST')
def token(self):
""" Return a session token, one-time hash and password hash
for the user.
"""
# Ensure Content-type is text/xml
if request.headers.get("Content-Type", "").startswith("text/xml") is False:
return self._auth_abort(417, 'Invalid Parameter')
# Retrieve request XML body
payload = request.body
if payload is None:
log.error('Empty payload in auth:token()')
return self._auth_abort(417, 'Invalid Parameter')
# Retrieve the salt from the model
authModel = AuthenticateModel.AuthenticateModel()
login = authModel.getTokenLogin(payload)
if login is None:
return self._auth_abort(417, 'Invalid Parameter')
salt = authModel.getTokenLoginSalt(login)
if salt is None:
log.debug('Invalid user %s', login)
salt = encode(hashlib.sha1(os.urandom(4)).digest())
session['token'] = encode(hashlib.sha1(os.urandom(8)).digest())
session['token_salt'] = encode(hashlib.sha1(os.urandom(8)).digest())
session['salt'] = salt
session['login'] = login
session.save()
c.token_salt = session['token_salt']
c.salt = salt
response.set_cookie('token', session['token'])
response.headers['content-type'] = 'text/xml; charset=utf-8'
return render('/token.xml')
@restrict('POST')
def login(self):
"""Authenticate the user on ocsmanager.
"""
if not "ocsmanager" in request.cookies: return self._auth_abort(403, 'Invalid Session')
if not "token" in session: return self._auth_abort(403, 'Invalid Session')
if not "token" in request.cookies: return self._auth_abort(403, 'Invalid Token')
if request.cookies.get('token') != session['token']: return self._auth_abort(403, 'Invalid Token')
if not "login" in session: return self._auth_abort(403, 'Invalid Session')
payload = request.body
if payload is None:
log.error('Empty payload in auth:login()')
return self._auth_abort(417, 'Invalid Parameter')
authModel = AuthenticateModel.AuthenticateModel()
(error, msg) = authModel.verifyPassword(session['login'], session['token_salt'], session['salt'], payload)
if error is True:
response.delete_cookie('token')
session['token'] = None
return self._auth_abort(401, 'Invalid credentials')
# Authentication was successful, remove auth token - no longer needed
session['token'] = None
response.delete_cookie('token')
session['tokenLogin'] = hashlib.sha1(os.urandom(8)).hexdigest()
session.save()
c.tokenLogin = encode(session['tokenLogin'])
c.ttl = 10
return render('/login.xml')
| gpl-3.0 |
JGulbronson/rmc | data/aggregator.py | 2 | 14325 | import argparse
import mongoengine
import redis
import sys
import rmc.models as m
import rmc.shared.constants as c
import rmc.shared.facebook as facebook
import rmc.shared.util as rmc_util
import rmc.data.crawler as rmc_crawler
import rmc.data.processor as rmc_processor
# TODO(mack): remove duplication of fields throughout code
# TODO(mack): deprecate overall rating
r = redis.StrictRedis(host=c.REDIS_HOST, port=c.REDIS_PORT, db=c.REDIS_DB)
PROFESSOR_RATING_FIELDS = [
'easiness',
'clarity',
'passion',
]
COURSE_RATING_FIELDS = [
'easiness',
'interest',
'usefulness',
'overall',
]
def increment_ratings(courses, get_rating_fn, get_fields_fn, ucs):
for uc in ucs:
ratings = get_rating_fn(courses, uc)
if not ratings:
continue
for field_key, field_value in get_fields_fn(uc):
if field_value is not None:
ratings[field_key].add_rating(field_value)
def increment_aggregate_ratings(courses, get_rating_fn, get_fields_fn, ucs):
for uc in ucs:
ratings = get_rating_fn(courses, uc)
if not ratings:
continue
for field_key, field_value in get_fields_fn(uc):
if field_value is not None:
ratings[field_key].add_aggregate_rating(field_value)
def update_mongo_course_rating():
# course => ratings
def get_rating_fn(courses, uc):
if uc.course_id not in courses:
obj = {}
for field in COURSE_RATING_FIELDS:
obj[field] = m.AggregateRating()
courses[uc.course_id] = obj
return courses[uc.course_id]
def get_fields_fn(uc):
easiness = uc.course_review.easiness
interest = uc.course_review.interest
usefulness = uc.course_review.usefulness
if easiness and interest:
overall = (easiness + interest) / 2
elif easiness:
overall = easiness
else:
overall = interest
return [
('easiness', easiness),
('interest', interest),
('overall', overall),
('usefulness', usefulness),
]
def get_aggregate_fields_fn(uc):
easiness = uc.easiness
interest = uc.interest
# TODO(mack): add usefulness metric
def calculate_overall_rating(e, i):
return ((e.count * e.rating + i.count * i.rating) /
max(1, (e.count + i.count)))
# heuristic for getting the overall rating:
# 1. the count will max of the count for each attribute
# 2. the rating will be average
overall = m.AggregateRating(
count=max(easiness.count, interest.count),
rating=calculate_overall_rating(easiness, interest),
)
return [
('easiness', easiness),
('interest', interest),
('overall', overall),
]
courses = {}
args = [courses, get_rating_fn]
menlo_ucs = m.MenloCourse.get_publicly_visible(rmc_util.MIN_NUM_RATINGS)
flow_ucs = m.UserCourse.get_publicly_visible(rmc_util.MIN_NUM_RATINGS)
increment_ratings(*(args + [get_fields_fn, menlo_ucs]))
increment_ratings(*(args + [get_fields_fn, flow_ucs]))
count = [0]
def set_course_ratings_in_mongo(courses):
for course_id, ratings in courses.items():
course = m.Course.objects.with_id(course_id)
if not course:
print 'could not find course %s in mongo' % course_id
continue
course.easiness = ratings['easiness']
course.interest = ratings['interest']
course.usefulness = ratings['usefulness']
course.overall = ratings['overall']
course.save()
count[0] += 1
set_course_ratings_in_mongo(courses)
print 'saved ratings for %d courses in mongodb' % count[0]
def update_mongo_course_professors():
count = 0
for course in m.Course.objects.only('professor_ids'):
def get_professor_ids(course, coll):
course_prof_ids_only = (coll.objects(course_id=course.id)
.only('professor_id'))
return set(
[x.professor_id for x in course_prof_ids_only
if x.professor_id]
)
professor_ids = get_professor_ids(course, m.UserCourse).union(
get_professor_ids(course, m.MenloCourse))
# TODO(mack): Looks like add_to_set doesn't validate that each item
# in the list meets the schema since it seemed to be letting me
# writing lists that contained None. Investigate if this is what it
# is doing.
course.update(add_to_set__professor_ids=list(professor_ids))
count += 1
print 'added professors for %d courses in mongodb' % count
def update_redis_course_professor_rating():
# course => professors => ratings
def get_rating_fn(courses, uc):
if uc.professor_id is None:
return None
if uc.course_id not in courses:
courses[uc.course_id] = {}
professors = courses[uc.course_id]
if uc.professor_id not in professors:
obj = {}
for field in PROFESSOR_RATING_FIELDS:
obj[field] = m.AggregateRating()
professors[uc.professor_id] = obj
return professors[uc.professor_id]
def get_fields_fn(uc):
return [
('easiness', uc.course_review.easiness),
('clarity', uc.professor_review.clarity),
('passion', uc.professor_review.passion),
]
def get_aggregate_fields_fn(uc):
return [
('easiness', uc.easiness),
('clarity', uc.clarity),
('passion', uc.passion),
]
courses = {}
args = [courses, get_rating_fn]
menlo_ucs = m.MenloCourse.get_publicly_visible(rmc_util.MIN_NUM_RATINGS)
flow_ucs = m.UserCourse.get_publicly_visible(rmc_util.MIN_NUM_RATINGS)
increment_ratings(*(args + [get_fields_fn, menlo_ucs]))
increment_ratings(*(args + [get_fields_fn, flow_ucs]))
count = [0]
def set_course_professor_ratings_in_redis(courses):
for course_id, professors in courses.items():
for professor_id, ratings in professors.items():
if professor_id is None:
continue
professor = m.Professor.objects.with_id(professor_id)
if not professor:
continue
for rating_type, aggregate_rating in ratings.items():
professor.set_course_rating_in_redis(
course_id, rating_type, aggregate_rating)
count[0] += 1
set_course_professor_ratings_in_redis(courses)
print 'set %d course professor rating keys in redis' % count[0]
def update_all_fb_friend_list():
for user in m.User.objects():
# TODO(Sandy): Batch requests for performance
if user.fbid and not user.is_fb_token_expired:
try:
user.update_fb_friends(
facebook.get_friend_list(user.fb_access_token))
user.save()
except facebook.FacebookOAuthException as e:
user.fb_access_token_invalid = True
user.save()
except Exception as e:
print "get_friend_list failed for %s with: %s" % (user.id,
e.message)
# TODO(mack): test it when we get data to test with
# TODO(mack): currently sort of duplicate logic in
# User.cache_mutual_course_ids()
def update_redis_friend_mutual_courses():
# TODO(Sandy): Use friend real time updates after it. There's a fb updates
# branch for this, pending on:
# https://developers.facebook.com/bugs/374296595988186?browse=search_50990ddb8a19d9316431973
# Rate limit is 600 calls / 600 seconds / token:
# http://stackoverflow.com/questions/8805316/facebook-graph-api-rate-limit-and-batch-requests
update_all_fb_friend_list()
courses_by_user = {}
for user in m.User.objects.only('friend_ids', 'course_history'):
friend_ids = [str(friend_id) for friend_id in user.friend_ids]
ucs = (m.UserCourse.objects(id__in=user.course_history)
.only('course_id'))
course_ids = [uc.course_id for uc in ucs]
courses_by_user[str(user.id)] = [friend_ids, set(course_ids)]
count = 0
user_pair = set()
for user_id, (friend_ids, courses) in courses_by_user.iteritems():
for friend_id in friend_ids:
if user_id < friend_id:
first_id = user_id
second_id = friend_id
else:
first_id = friend_id
second_id = user_id
if (first_id, second_id) in user_pair:
continue
friend_courses = courses_by_user[friend_id][1]
mutual_courses = courses.intersection(friend_courses)
if mutual_courses:
count += 1
redis_key = m.User.cls_mutual_courses_redis_key(
first_id, second_id)
r.sadd(redis_key, *list(mutual_courses))
user_pair.add((first_id, second_id))
print 'set %d friend pair keys in redis' % count
def update_mongo_points():
total_points = 0
num_course_comments = 0
num_course_ratings = 0
num_course_shares = 0
num_professor_comments = 0
num_professor_ratings = 0
num_professor_shares = 0
num_invites = 0
for user in m.User.objects.only(
'num_invites', 'course_history', 'num_points'):
num_points = 0
if user.num_invites:
num_points += m.PointSource.FIRST_INVITE
num_invites += 1
for uc in m.UserCourse.objects(id__in=user.course_history):
num_points += uc.num_points
if uc.course_review.has_commented:
num_course_comments += 1
if uc.course_review.has_been_rated:
num_course_ratings += 1
if uc.course_review.has_shared:
num_course_shares += 1
if uc.professor_review.has_commented:
num_professor_comments += 1
if uc.professor_review.has_been_rated:
num_professor_ratings += 1
if uc.professor_review.has_shared:
num_professor_shares += 1
user.update(set__num_points=num_points)
total_points += num_points
r.set('total_points', total_points)
print ' ===update_mongo_points ==='
print 'num_course_comments', num_course_comments
print 'num_course_ratings', num_course_ratings
print 'num_course_shares', num_course_shares
print 'num_professor_comments', num_professor_comments
print 'num_professor_ratings', num_professor_ratings
print 'num_professor_shares', num_professor_shares
print 'num_invites', num_invites
def update_exam_schedule():
# Crawl data and store on disk
rmc_crawler.get_opendata_exam_schedule()
# Process the data on disk
errors = rmc_processor.import_opendata_exam_schedules()
print "%d exam schedule items found" % m.Exam.objects().count()
print "%d exam schedule items skipped" % len(errors)
def update_sections():
# Fetch data from OpenData API and cache to files.
rmc_crawler.get_opendata_sections()
# Import from files to DB.
rmc_processor.import_opendata_sections()
# Send push notifications about seat openings.
num_sent = m.GcmCourseAlert.send_eligible_alerts()
num_expired = m.GcmCourseAlert.delete_expired()
print 'Sent %s push notifications and expired %s' % (num_sent, num_expired)
def update_courses():
# First get an up to date list of departments and write to a text file
print "Fetching departments"
rmc_crawler.get_departments()
# Import any departments we don't already have into Mongo
print "Loading departments into Mongo"
rmc_processor.import_departments()
# Hit the endpoints of the OpenData API for each department
print "Fetching courses"
rmc_crawler.get_opendata2_courses()
# Load the data into Mongo
print "Loading courses into Mongo"
rmc_processor.import_courses()
def update_professors_departments():
"""Update the departments_taught field for each professor in Mongo"""
for prof in m.Professor.objects():
prof.departments_taught = prof.get_departments_taught()
prof.save()
def update_scholarships():
"""Update the scholarships available in Mongo"""
print "Fetching scholarships"
rmc_crawler.get_scholarships()
print "Loading scholarships into Mongo"
rmc_processor.import_scholarships()
if __name__ == '__main__':
mongoengine.connect(c.MONGO_DB_RMC)
parser = argparse.ArgumentParser()
mode_mapping = {
'redis_course_professor_rating': update_redis_course_professor_rating,
'redis_friend_mutual_courses': update_redis_friend_mutual_courses,
'mongo_course_rating': update_mongo_course_rating,
'mongo_course_professors': update_mongo_course_professors,
'mongo_points': update_mongo_points,
'exam_schedule': update_exam_schedule,
'sections': update_sections,
'courses': update_courses,
'prof_departments': update_professors_departments,
'scholarships': update_scholarships
}
parser.add_argument('mode',
help='one of %s' % ','.join(mode_mapping.keys() + ['daily']))
args = parser.parse_args()
if args.mode == 'daily':
daily_functions = [
update_redis_course_professor_rating,
update_redis_friend_mutual_courses,
update_mongo_course_rating,
update_mongo_course_professors,
update_mongo_points,
update_exam_schedule,
update_professors_departments
]
for func in daily_functions:
try:
func()
except Exception as exp:
print "aggregator.py: function %s threw an exception" % (func)
print exp
elif args.mode in mode_mapping:
func = mode_mapping[args.mode]
func()
else:
sys.exit('The mode %s is not supported' % args.mode)
| mit |
openhdf/enigma2-wetek | lib/python/Components/Renderer/valioOledInfo.py | 13 | 2222 | # -*- coding: utf-8 -*-
#
# OLED-Info Renderer for Dreambox/Enigma-2
# Version: 1.0
# Coded by Vali (c)2011
#
#######################################################################
from enigma import eLabel
from Renderer import Renderer
from os import popen
from time import localtime, strftime
from Components.VariableText import VariableText
from Components.Sensors import sensors
from Components.config import config
from Tools.HardwareInfo import HardwareInfo
class valioOledInfo(Renderer, VariableText):
def __init__(self):
Renderer.__init__(self)
VariableText.__init__(self)
try:
self.infozahl = int(config.valiflex.OledInfo.value)
except:
self.infozahl = 12
self.Zaehler = 0
self.oben = "---"
self.unten = "---"
GUI_WIDGET = eLabel
def changed(self, what):
if not self.suspended:
if self.Zaehler > self.infozahl:
self.Zaehler = 0
if self.Zaehler == 0:
self.hide()
elif self.Zaehler == 6:
self.show()
t = localtime(self.source.time)
self.oben = _(strftime("%a", t)) + " " +strftime("%d", t)
self.unten = "%02d:%02d" % (t.tm_hour, t.tm_min)
elif self.Zaehler == 14:
self.oben = "temp:"
maxtemp = 0
try:
templist = sensors.getSensorsList(sensors.TYPE_TEMPERATURE)
tempcount = len(templist)
for count in range(tempcount):
id = templist[count]
tt = sensors.getSensorValue(id)
if tt > maxtemp:
maxtemp = tt
except:
pass
self.unten = str(maxtemp) + " °C"
elif self.Zaehler == 21:
self.oben = "loads:"
loada = 0
try:
out_line = open("/proc/loadavg").readline()
loada = out_line[:4]
except:
pass
self.unten = loada
elif self.Zaehler == 28:
self.oben = "free:"
out_lines = []
out_lines = open("/proc/meminfo").readlines()
for lidx in range(len(out_lines)-1):
tstLine = out_lines[lidx].split()
if "MemFree:" in tstLine:
templ = int(out_lines[lidx].split()[1])
fmem = "%d mb" %(templ/1024)
self.unten = str(fmem)
self.Zaehler = self.Zaehler + 1
self.text = self.oben + "\n" + self.unten
def onShow(self):
self.suspended = False
self.changed(None)
def onHide(self):
self.suspended = True
| gpl-2.0 |