repo_name
stringlengths 6
100
| path
stringlengths 4
294
| copies
stringclasses 981
values | size
stringlengths 4
6
| content
stringlengths 606
896k
| license
stringclasses 15
values |
---|---|---|---|---|---|
eharney/cinder | cinder/api/v3/attachments.py | 1 | 11362 | # Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""The volumes attachments API."""
from oslo_log import log as logging
import webob
from cinder.api import common
from cinder.api import microversions as mv
from cinder.api.openstack import wsgi
from cinder.api.v3.views import attachments as attachment_views
from cinder import exception
from cinder.i18n import _
from cinder import objects
from cinder import utils
from cinder.volume import api as volume_api
LOG = logging.getLogger(__name__)
class AttachmentsController(wsgi.Controller):
"""The Attachments API controller for the OpenStack API."""
_view_builder_class = attachment_views.ViewBuilder
allowed_filters = {'volume_id', 'status', 'instance_id', 'attach_status'}
def __init__(self, ext_mgr=None):
"""Initialize controller class."""
self.volume_api = volume_api.API()
self.ext_mgr = ext_mgr
super(AttachmentsController, self).__init__()
@wsgi.Controller.api_version(mv.NEW_ATTACH)
def show(self, req, id):
"""Return data about the given attachment."""
context = req.environ['cinder.context']
attachment = objects.VolumeAttachment.get_by_id(context, id)
return attachment_views.ViewBuilder.detail(attachment)
@wsgi.Controller.api_version(mv.NEW_ATTACH)
def index(self, req):
"""Return a summary list of attachments."""
attachments = self._items(req)
return attachment_views.ViewBuilder.list(attachments)
@wsgi.Controller.api_version(mv.NEW_ATTACH)
def detail(self, req):
"""Return a detailed list of attachments."""
attachments = self._items(req)
return attachment_views.ViewBuilder.list(attachments, detail=True)
@common.process_general_filtering('attachment')
def _process_attachment_filtering(self, context=None, filters=None,
req_version=None):
utils.remove_invalid_filter_options(context, filters,
self.allowed_filters)
def _items(self, req):
"""Return a list of attachments, transformed through view builder."""
context = req.environ['cinder.context']
req_version = req.api_version_request
# Pop out non search_opts and create local variables
search_opts = req.GET.copy()
sort_keys, sort_dirs = common.get_sort_params(search_opts)
marker, limit, offset = common.get_pagination_params(search_opts)
self._process_attachment_filtering(context=context,
filters=search_opts,
req_version=req_version)
if search_opts.get('instance_id', None):
search_opts['instance_uuid'] = search_opts.pop('instance_id', None)
if context.is_admin and 'all_tenants' in search_opts:
del search_opts['all_tenants']
return objects.VolumeAttachmentList.get_all(
context, search_opts=search_opts, marker=marker, limit=limit,
offset=offset, sort_keys=sort_keys, sort_direction=sort_dirs)
else:
return objects.VolumeAttachmentList.get_all_by_project(
context, context.project_id, search_opts=search_opts,
marker=marker, limit=limit, offset=offset, sort_keys=sort_keys,
sort_direction=sort_dirs)
@wsgi.Controller.api_version(mv.NEW_ATTACH)
@wsgi.response(202)
def create(self, req, body):
"""Create an attachment.
This method can be used to create an empty attachment (reserve) or to
create and initialize a volume attachment based on the provided input
parameters.
If the caller does not yet have the connector information but needs to
reserve an attachment for the volume (ie Nova BootFromVolume) the
create can be called with just the volume-uuid and the server
identifier. This will reserve an attachment, mark the volume as
reserved and prevent any new attachment_create calls from being made
until the attachment is updated (completed).
The alternative is that the connection can be reserved and initialized
all at once with a single call if the caller has all of the required
information (connector data) at the time of the call.
NOTE: In Nova terms server == instance, the server_id parameter
referenced below is the UUID of the Instance, for non-nova consumers
this can be a server UUID or some other arbitrary unique identifier.
Expected format of the input parameter 'body':
.. code-block:: json
{
"attachment":
{
"volume_uuid": "volume-uuid",
"instance_uuid": "nova-server-uuid",
"connector": "null|<connector-object>"
}
}
Example connector:
.. code-block:: json
{
"connector":
{
"initiator": "iqn.1993-08.org.debian:01:cad181614cec",
"ip":"192.168.1.20",
"platform": "x86_64",
"host": "tempest-1",
"os_type": "linux2",
"multipath": false,
"mountpoint": "/dev/vdb",
"mode": "null|rw|ro"
}
}
NOTE all that's required for a reserve is volume_uuid
and an instance_uuid.
returns: A summary view of the attachment object
"""
context = req.environ['cinder.context']
instance_uuid = body['attachment'].get('instance_uuid', None)
if not instance_uuid:
raise webob.exc.HTTPBadRequest(
explanation=_("Must specify 'instance_uuid' "
"to create attachment."))
volume_uuid = body['attachment'].get('volume_uuid', None)
if not volume_uuid:
raise webob.exc.HTTPBadRequest(
explanation=_("Must specify 'volume_uuid' "
"to create attachment."))
volume_ref = objects.Volume.get_by_id(
context,
volume_uuid)
connector = body['attachment'].get('connector', None)
err_msg = None
try:
attachment_ref = (
self.volume_api.attachment_create(context,
volume_ref,
instance_uuid,
connector=connector))
except exception.NotAuthorized:
raise
except exception.CinderException as ex:
err_msg = _(
"Unable to create attachment for volume (%s).") % ex.msg
LOG.exception(err_msg)
except Exception as ex:
err_msg = _("Unable to create attachment for volume.")
LOG.exception(err_msg)
finally:
if err_msg:
raise webob.exc.HTTPInternalServerError(explanation=err_msg)
return attachment_views.ViewBuilder.detail(attachment_ref)
@wsgi.Controller.api_version(mv.NEW_ATTACH)
def update(self, req, id, body):
"""Update an attachment record.
Update a reserved attachment record with connector information and set
up the appropriate connection_info from the driver.
Expected format of the input parameter 'body':
.. code:: json
{
"attachment":
{
"connector":
{
"initiator": "iqn.1993-08.org.debian:01:cad181614cec",
"ip":"192.168.1.20",
"platform": "x86_64",
"host": "tempest-1",
"os_type": "linux2",
"multipath": False,
"mountpoint": "/dev/vdb",
"mode": None|"rw"|"ro",
}
}
}
"""
context = req.environ['cinder.context']
attachment_ref = (
objects.VolumeAttachment.get_by_id(context, id))
connector = body['attachment'].get('connector', None)
if not connector:
raise webob.exc.HTTPBadRequest(
explanation=_("Must specify 'connector' "
"to update attachment."))
err_msg = None
try:
attachment_ref = (
self.volume_api.attachment_update(context,
attachment_ref,
connector))
except exception.NotAuthorized:
raise
except exception.CinderException as ex:
err_msg = (
_("Unable to update attachment.(%s).") % ex.msg)
LOG.exception(err_msg)
except Exception:
err_msg = _("Unable to update the attachment.")
LOG.exception(err_msg)
finally:
if err_msg:
raise webob.exc.HTTPInternalServerError(explanation=err_msg)
# TODO(jdg): Test this out some more, do we want to return and object
# or a dict?
return attachment_views.ViewBuilder.detail(attachment_ref)
@wsgi.Controller.api_version(mv.NEW_ATTACH)
def delete(self, req, id):
"""Delete an attachment.
Disconnects/Deletes the specified attachment, returns a list of any
known shared attachment-id's for the effected backend device.
returns: A summary list of any attachments sharing this connection
"""
context = req.environ['cinder.context']
attachment = objects.VolumeAttachment.get_by_id(context, id)
attachments = self.volume_api.attachment_delete(context, attachment)
return attachment_views.ViewBuilder.list(attachments)
@wsgi.response(202)
@wsgi.Controller.api_version(mv.NEW_ATTACH_COMPLETION)
@wsgi.action('os-complete')
def complete(self, req, id, body):
"""Mark a volume attachment process as completed (in-use)."""
context = req.environ['cinder.context']
attachment_ref = (
objects.VolumeAttachment.get_by_id(context, id))
volume_ref = objects.Volume.get_by_id(
context,
attachment_ref.volume_id)
attachment_ref.update({'attach_status': 'attached'})
attachment_ref.save()
volume_ref.update({'status': 'in-use', 'attach_status': 'attached'})
volume_ref.save()
def create_resource(ext_mgr):
"""Create the wsgi resource for this controller."""
return wsgi.Resource(AttachmentsController(ext_mgr))
| apache-2.0 |
davidmueller13/android_kernel_lge_g3-2 | Documentation/networking/cxacru-cf.py | 14668 | 1626 | #!/usr/bin/env python
# Copyright 2009 Simon Arlott
#
# This program is free software; you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by the Free
# Software Foundation; either version 2 of the License, or (at your option)
# any later version.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
# more details.
#
# You should have received a copy of the GNU General Public License along with
# this program; if not, write to the Free Software Foundation, Inc., 59
# Temple Place - Suite 330, Boston, MA 02111-1307, USA.
#
# Usage: cxacru-cf.py < cxacru-cf.bin
# Output: values string suitable for the sysfs adsl_config attribute
#
# Warning: cxacru-cf.bin with MD5 hash cdbac2689969d5ed5d4850f117702110
# contains mis-aligned values which will stop the modem from being able
# to make a connection. If the first and last two bytes are removed then
# the values become valid, but the modulation will be forced to ANSI
# T1.413 only which may not be appropriate.
#
# The original binary format is a packed list of le32 values.
import sys
import struct
i = 0
while True:
buf = sys.stdin.read(4)
if len(buf) == 0:
break
elif len(buf) != 4:
sys.stdout.write("\n")
sys.stderr.write("Error: read {0} not 4 bytes\n".format(len(buf)))
sys.exit(1)
if i > 0:
sys.stdout.write(" ")
sys.stdout.write("{0:x}={1}".format(i, struct.unpack("<I", buf)[0]))
i += 1
sys.stdout.write("\n")
| gpl-2.0 |
travellhyne/f2py | fparser/parsefortran.py | 3 | 5789 | #!/usr/bin/env python
"""Provides FortranParser.
"""
#Author: Pearu Peterson <[email protected]>
#Created: May 2006
__autodoc__ = ['FortranParser']
__all__ = ['FortranParser']
import re
import sys
import traceback
import logging
from numpy.distutils.misc_util import yellow_text, red_text
from readfortran import FortranFileReader, FortranStringReader
from block_statements import BeginSource
from utils import AnalyzeError
logger = logging.getLogger('fparser')
class FortranParser(object):
"""Parser of FortranReader structure.
Use .parse() method for parsing, parsing result is saved in .block attribute.
"""
cache = {}
def __init__(self, reader, ignore_comments=True):
self.reader = reader
if reader.id in self.cache:
parser = self.cache[reader.id]
self.block = parser.block
self.is_analyzed = parser.is_analyzed
logger.info('using cached %s' % (reader.id))
# self.block.show_message('using cached %s' % (reader.id))
else:
self.cache[reader.id] = self
self.block = None
self.is_analyzed = False
self.ignore_comments = ignore_comments
return
def get_item(self):
try:
item = self.reader.next(ignore_comments = self.ignore_comments)
return item
except StopIteration:
pass
return
def put_item(self, item):
self.reader.fifo_item.insert(0, item)
return
def parse(self):
if self.block is not None:
return
try:
block = self.block = BeginSource(self)
except KeyboardInterrupt:
raise
except:
reader = self.reader
while reader is not None:
message = reader.format_message('FATAL ERROR',
'while processing line',
reader.linecount, reader.linecount)
logger.critical(message)
# reader.show_message(message, sys.stderr)
reader = reader.reader
logger.debug(''.join(('Traceback\n',''.join( traceback.format_stack() ))))
# traceback.print_exc(file=sys.stderr)
logger.critical(red_text('STOPPED PARSING'))
# self.reader.show_message(red_text('STOPPED PARSING'), sys.stderr)
return
return
def analyze(self):
if self.is_analyzed:
return
if self.block is None:
logger.info('Nothing to analyze.')
# self.reader.show_message('Nothing to analyze.')
return
try:
self.block.analyze()
except AnalyzeError:
pass
# except Exception, msg:
# import pdb; pdb.set_trace()
# if str(msg) != '123454321':
# #print self.block
# logger.debug(''.join(('Traceback\n',''.join( traceback.format_stack() ))))
# logger.critical(red_text('FATAL ERROR: STOPPED ANALYSING %r CONTENT' % (self.reader.source) ))
# # self.reader.show_message(red_text('FATAL ERROR: STOPPED ANALYSING %r CONTENT' % (self.reader.source) ), sys.stderr)
# sys.exit(123454321)
# return
self.is_analyzed = True
return
def test_pyf():
string = """
python module foo
interface tere
subroutine bar
real r
end subroutine bar
end interface tere
end python module foo
"""
reader = FortranStringReader(string, True, True)
parser = FortranParser(reader)
block = parser.parse()
print block
def test_free90():
string = """
module foo
subroutine bar
real r
if ( pc_get_lun() .ne. 6) &
write ( pc_get_lun(), '( &
& /, a, /, " p=", i4, " stopping c_flag=", a, &
& /, " print unit=", i8)') &
trim(title), pcpsx_i_pel(), trim(c_flag), pc_get_lun()
if (.true.) then
call smth
end if
aaa : if (.false.) then
else if (a) then aaa
else aaa
end if aaa
hey = 1
end subroutine bar
abstract interface
end interface
end module foo
"""
reader = FortranStringReader(string, True, False)
parser = FortranParser(reader)
block = parser.parse()
print block
def test_f77():
string = """\
program foo
a = 3
end
subroutine bar
end
pure function foo(a)
end
pure real*4 recursive function bar()
end
"""
reader = FortranStringReader(string, False, True)
parser = FortranParser(reader)
block = parser.parse()
print block
def simple_main():
import sys
if not sys.argv[1:]:
return parse_all_f()
for filename in sys.argv[1:]:
reader = FortranFileReader(filename)
print yellow_text('Processing '+filename+' (mode=%r)' % (reader.mode))
parser = FortranParser(reader)
parser.parse()
parser.analyze()
print parser.block.torepr(4)
#print parser.block
def profile_main():
import hotshot, hotshot.stats
prof = hotshot.Profile("_parsefortran.prof")
prof.runcall(simple_main)
prof.close()
stats = hotshot.stats.load("_parsefortran.prof")
stats.strip_dirs()
stats.sort_stats('time', 'calls')
stats.print_stats(30)
def parse_all_f():
for filename in open('opt_all_f.txt'):
filename = filename.strip()
reader = FortranFileReader(filename)
print yellow_text('Processing '+filename+' (mode=%r)' % (reader.mode))
parser = FortranParser(reader)
block = parser.parse()
print block
if __name__ == "__main__":
#test_f77()
#test_free90()
#test_pyf()
simple_main()
#profile_main()
#parse_all_f()
| bsd-3-clause |
laszlocsomor/tensorflow | tensorflow/contrib/distributions/python/kernel_tests/bijectors/permute_test.py | 26 | 3206 | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for Permute bijector."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.contrib.distributions.python.ops.bijectors.permute import Permute
from tensorflow.python.framework import dtypes
from tensorflow.python.ops import array_ops
from tensorflow.python.ops.distributions.bijector_test_util import assert_bijective_and_finite
from tensorflow.python.platform import test
class PermuteBijectorTest(test.TestCase):
"""Tests correctness of the Permute bijector."""
def setUp(self):
self._rng = np.random.RandomState(42)
def testBijector(self):
expected_permutation = np.int32([2, 0, 1])
expected_x = np.random.randn(4, 2, 3)
expected_y = expected_x[..., expected_permutation]
with self.test_session() as sess:
permutation_ph = array_ops.placeholder(dtype=dtypes.int32)
bijector = Permute(
permutation=permutation_ph,
validate_args=True)
[
permutation_,
x_,
y_,
fldj,
ildj,
] = sess.run([
bijector.permutation,
bijector.inverse(expected_y),
bijector.forward(expected_x),
bijector.forward_log_det_jacobian(expected_x),
bijector.inverse_log_det_jacobian(expected_y),
], feed_dict={permutation_ph: expected_permutation})
self.assertEqual("permute", bijector.name)
self.assertAllEqual(expected_permutation, permutation_)
self.assertAllClose(expected_y, y_, rtol=1e-6, atol=0)
self.assertAllClose(expected_x, x_, rtol=1e-6, atol=0)
self.assertAllClose(0., fldj, rtol=1e-6, atol=0)
self.assertAllClose(0., ildj, rtol=1e-6, atol=0)
def testRaisesOpError(self):
with self.test_session() as sess:
with self.assertRaisesOpError("Permutation over `d` must contain"):
permutation_ph = array_ops.placeholder(dtype=dtypes.int32)
bijector = Permute(
permutation=permutation_ph,
validate_args=True)
sess.run(bijector.inverse([1.]),
feed_dict={permutation_ph: [1, 2]})
def testBijectiveAndFinite(self):
permutation = np.int32([2, 0, 1])
x = np.random.randn(4, 2, 3)
y = x[..., permutation]
with self.test_session():
bijector = Permute(
permutation=permutation,
validate_args=True)
assert_bijective_and_finite(bijector, x, y, rtol=1e-6, atol=0)
if __name__ == "__main__":
test.main()
| apache-2.0 |
timm/timmnix | pypy3-v5.5.0-linux64/lib-python/3/test/test_strtod.py | 4 | 20594 | # Tests for the correctly-rounded string -> float conversions
# introduced in Python 2.7 and 3.1.
import random
import unittest
import re
import sys
import test.support
if getattr(sys, 'float_repr_style', '') != 'short':
raise unittest.SkipTest('correctly-rounded string->float conversions '
'not available on this system')
# Correctly rounded str -> float in pure Python, for comparison.
strtod_parser = re.compile(r""" # A numeric string consists of:
(?P<sign>[-+])? # an optional sign, followed by
(?=\d|\.\d) # a number with at least one digit
(?P<int>\d*) # having a (possibly empty) integer part
(?:\.(?P<frac>\d*))? # followed by an optional fractional part
(?:E(?P<exp>[-+]?\d+))? # and an optional exponent
\Z
""", re.VERBOSE | re.IGNORECASE).match
# Pure Python version of correctly rounded string->float conversion.
# Avoids any use of floating-point by returning the result as a hex string.
def strtod(s, mant_dig=53, min_exp = -1021, max_exp = 1024):
"""Convert a finite decimal string to a hex string representing an
IEEE 754 binary64 float. Return 'inf' or '-inf' on overflow.
This function makes no use of floating-point arithmetic at any
stage."""
# parse string into a pair of integers 'a' and 'b' such that
# abs(decimal value) = a/b, along with a boolean 'negative'.
m = strtod_parser(s)
if m is None:
raise ValueError('invalid numeric string')
fraction = m.group('frac') or ''
intpart = int(m.group('int') + fraction)
exp = int(m.group('exp') or '0') - len(fraction)
negative = m.group('sign') == '-'
a, b = intpart*10**max(exp, 0), 10**max(0, -exp)
# quick return for zeros
if not a:
return '-0x0.0p+0' if negative else '0x0.0p+0'
# compute exponent e for result; may be one too small in the case
# that the rounded value of a/b lies in a different binade from a/b
d = a.bit_length() - b.bit_length()
d += (a >> d if d >= 0 else a << -d) >= b
e = max(d, min_exp) - mant_dig
# approximate a/b by number of the form q * 2**e; adjust e if necessary
a, b = a << max(-e, 0), b << max(e, 0)
q, r = divmod(a, b)
if 2*r > b or 2*r == b and q & 1:
q += 1
if q.bit_length() == mant_dig+1:
q //= 2
e += 1
# double check that (q, e) has the right form
assert q.bit_length() <= mant_dig and e >= min_exp - mant_dig
assert q.bit_length() == mant_dig or e == min_exp - mant_dig
# check for overflow and underflow
if e + q.bit_length() > max_exp:
return '-inf' if negative else 'inf'
if not q:
return '-0x0.0p+0' if negative else '0x0.0p+0'
# for hex representation, shift so # bits after point is a multiple of 4
hexdigs = 1 + (mant_dig-2)//4
shift = 3 - (mant_dig-2)%4
q, e = q << shift, e - shift
return '{}0x{:x}.{:0{}x}p{:+d}'.format(
'-' if negative else '',
q // 16**hexdigs,
q % 16**hexdigs,
hexdigs,
e + 4*hexdigs)
TEST_SIZE = 10
class StrtodTests(unittest.TestCase):
def check_strtod(self, s):
"""Compare the result of Python's builtin correctly rounded
string->float conversion (using float) to a pure Python
correctly rounded string->float implementation. Fail if the
two methods give different results."""
try:
fs = float(s)
except OverflowError:
got = '-inf' if s[0] == '-' else 'inf'
except MemoryError:
got = 'memory error'
else:
got = fs.hex()
expected = strtod(s)
self.assertEqual(expected, got,
"Incorrectly rounded str->float conversion for {}: "
"expected {}, got {}".format(s, expected, got))
def test_short_halfway_cases(self):
# exact halfway cases with a small number of significant digits
for k in 0, 5, 10, 15, 20:
# upper = smallest integer >= 2**54/5**k
upper = -(-2**54//5**k)
# lower = smallest odd number >= 2**53/5**k
lower = -(-2**53//5**k)
if lower % 2 == 0:
lower += 1
for i in range(TEST_SIZE):
# Select a random odd n in [2**53/5**k,
# 2**54/5**k). Then n * 10**k gives a halfway case
# with small number of significant digits.
n, e = random.randrange(lower, upper, 2), k
# Remove any additional powers of 5.
while n % 5 == 0:
n, e = n // 5, e + 1
assert n % 10 in (1, 3, 7, 9)
# Try numbers of the form n * 2**p2 * 10**e, p2 >= 0,
# until n * 2**p2 has more than 20 significant digits.
digits, exponent = n, e
while digits < 10**20:
s = '{}e{}'.format(digits, exponent)
self.check_strtod(s)
# Same again, but with extra trailing zeros.
s = '{}e{}'.format(digits * 10**40, exponent - 40)
self.check_strtod(s)
digits *= 2
# Try numbers of the form n * 5**p2 * 10**(e - p5), p5
# >= 0, with n * 5**p5 < 10**20.
digits, exponent = n, e
while digits < 10**20:
s = '{}e{}'.format(digits, exponent)
self.check_strtod(s)
# Same again, but with extra trailing zeros.
s = '{}e{}'.format(digits * 10**40, exponent - 40)
self.check_strtod(s)
digits *= 5
exponent -= 1
def test_halfway_cases(self):
# test halfway cases for the round-half-to-even rule
for i in range(100 * TEST_SIZE):
# bit pattern for a random finite positive (or +0.0) float
bits = random.randrange(2047*2**52)
# convert bit pattern to a number of the form m * 2**e
e, m = divmod(bits, 2**52)
if e:
m, e = m + 2**52, e - 1
e -= 1074
# add 0.5 ulps
m, e = 2*m + 1, e - 1
# convert to a decimal string
if e >= 0:
digits = m << e
exponent = 0
else:
# m * 2**e = (m * 5**-e) * 10**e
digits = m * 5**-e
exponent = e
s = '{}e{}'.format(digits, exponent)
self.check_strtod(s)
def test_boundaries(self):
# boundaries expressed as triples (n, e, u), where
# n*10**e is an approximation to the boundary value and
# u*10**e is 1ulp
boundaries = [
(10000000000000000000, -19, 1110), # a power of 2 boundary (1.0)
(17976931348623159077, 289, 1995), # overflow boundary (2.**1024)
(22250738585072013831, -327, 4941), # normal/subnormal (2.**-1022)
(0, -327, 4941), # zero
]
for n, e, u in boundaries:
for j in range(1000):
digits = n + random.randrange(-3*u, 3*u)
exponent = e
s = '{}e{}'.format(digits, exponent)
self.check_strtod(s)
n *= 10
u *= 10
e -= 1
def test_underflow_boundary(self):
# test values close to 2**-1075, the underflow boundary; similar
# to boundary_tests, except that the random error doesn't scale
# with n
for exponent in range(-400, -320):
base = 10**-exponent // 2**1075
for j in range(TEST_SIZE):
digits = base + random.randrange(-1000, 1000)
s = '{}e{}'.format(digits, exponent)
self.check_strtod(s)
def test_bigcomp(self):
for ndigs in 5, 10, 14, 15, 16, 17, 18, 19, 20, 40, 41, 50:
dig10 = 10**ndigs
for i in range(10 * TEST_SIZE):
digits = random.randrange(dig10)
exponent = random.randrange(-400, 400)
s = '{}e{}'.format(digits, exponent)
self.check_strtod(s)
def test_parsing(self):
# make '0' more likely to be chosen than other digits
digits = '000000123456789'
signs = ('+', '-', '')
# put together random short valid strings
# \d*[.\d*]?e
for i in range(1000):
for j in range(TEST_SIZE):
s = random.choice(signs)
intpart_len = random.randrange(5)
s += ''.join(random.choice(digits) for _ in range(intpart_len))
if random.choice([True, False]):
s += '.'
fracpart_len = random.randrange(5)
s += ''.join(random.choice(digits)
for _ in range(fracpart_len))
else:
fracpart_len = 0
if random.choice([True, False]):
s += random.choice(['e', 'E'])
s += random.choice(signs)
exponent_len = random.randrange(1, 4)
s += ''.join(random.choice(digits)
for _ in range(exponent_len))
if intpart_len + fracpart_len:
self.check_strtod(s)
else:
try:
float(s)
except ValueError:
pass
else:
assert False, "expected ValueError"
@test.support.bigmemtest(size=test.support._2G+10, memuse=4, dry_run=False)
def test_oversized_digit_strings(self, maxsize):
# Input string whose length doesn't fit in an INT.
s = "1." + "1" * maxsize
with self.assertRaises(ValueError):
float(s)
del s
s = "0." + "0" * maxsize + "1"
with self.assertRaises(ValueError):
float(s)
del s
def test_large_exponents(self):
# Verify that the clipping of the exponent in strtod doesn't affect the
# output values.
def positive_exp(n):
""" Long string with value 1.0 and exponent n"""
return '0.{}1e+{}'.format('0'*(n-1), n)
def negative_exp(n):
""" Long string with value 1.0 and exponent -n"""
return '1{}e-{}'.format('0'*n, n)
self.assertEqual(float(positive_exp(10000)), 1.0)
self.assertEqual(float(positive_exp(20000)), 1.0)
self.assertEqual(float(positive_exp(30000)), 1.0)
self.assertEqual(float(negative_exp(10000)), 1.0)
self.assertEqual(float(negative_exp(20000)), 1.0)
self.assertEqual(float(negative_exp(30000)), 1.0)
def test_particular(self):
# inputs that produced crashes or incorrectly rounded results with
# previous versions of dtoa.c, for various reasons
test_strings = [
# issue 7632 bug 1, originally reported failing case
'2183167012312112312312.23538020374420446192e-370',
# 5 instances of issue 7632 bug 2
'12579816049008305546974391768996369464963024663104e-357',
'17489628565202117263145367596028389348922981857013e-357',
'18487398785991994634182916638542680759613590482273e-357',
'32002864200581033134358724675198044527469366773928e-358',
'94393431193180696942841837085033647913224148539854e-358',
'73608278998966969345824653500136787876436005957953e-358',
'64774478836417299491718435234611299336288082136054e-358',
'13704940134126574534878641876947980878824688451169e-357',
'46697445774047060960624497964425416610480524760471e-358',
# failing case for bug introduced by METD in r77451 (attempted
# fix for issue 7632, bug 2), and fixed in r77482.
'28639097178261763178489759107321392745108491825303e-311',
# two numbers demonstrating a flaw in the bigcomp 'dig == 0'
# correction block (issue 7632, bug 3)
'1.00000000000000001e44',
'1.0000000000000000100000000000000000000001e44',
# dtoa.c bug for numbers just smaller than a power of 2 (issue
# 7632, bug 4)
'99999999999999994487665465554760717039532578546e-47',
# failing case for off-by-one error introduced by METD in
# r77483 (dtoa.c cleanup), fixed in r77490
'965437176333654931799035513671997118345570045914469' #...
'6213413350821416312194420007991306908470147322020121018368e0',
# incorrect lsb detection for round-half-to-even when
# bc->scale != 0 (issue 7632, bug 6).
'104308485241983990666713401708072175773165034278685' #...
'682646111762292409330928739751702404658197872319129' #...
'036519947435319418387839758990478549477777586673075' #...
'945844895981012024387992135617064532141489278815239' #...
'849108105951619997829153633535314849999674266169258' #...
'928940692239684771590065027025835804863585454872499' #...
'320500023126142553932654370362024104462255244034053' #...
'203998964360882487378334860197725139151265590832887' #...
'433736189468858614521708567646743455601905935595381' #...
'852723723645799866672558576993978025033590728687206' #...
'296379801363024094048327273913079612469982585674824' #...
'156000783167963081616214710691759864332339239688734' #...
'656548790656486646106983450809073750535624894296242' #...
'072010195710276073042036425579852459556183541199012' #...
'652571123898996574563824424330960027873516082763671875e-1075',
# demonstration that original fix for issue 7632 bug 1 was
# buggy; the exit condition was too strong
'247032822920623295e-341',
# demonstrate similar problem to issue 7632 bug1: crash
# with 'oversized quotient in quorem' message.
'99037485700245683102805043437346965248029601286431e-373',
'99617639833743863161109961162881027406769510558457e-373',
'98852915025769345295749278351563179840130565591462e-372',
'99059944827693569659153042769690930905148015876788e-373',
'98914979205069368270421829889078356254059760327101e-372',
# issue 7632 bug 5: the following 2 strings convert differently
'1000000000000000000000000000000000000000e-16',
'10000000000000000000000000000000000000000e-17',
# issue 7632 bug 7
'991633793189150720000000000000000000000000000000000000000e-33',
# And another, similar, failing halfway case
'4106250198039490000000000000000000000000000000000000000e-38',
# issue 7632 bug 8: the following produced 10.0
'10.900000000000000012345678912345678912345',
# two humongous values from issue 7743
'116512874940594195638617907092569881519034793229385' #...
'228569165191541890846564669771714896916084883987920' #...
'473321268100296857636200926065340769682863349205363' #...
'349247637660671783209907949273683040397979984107806' #...
'461822693332712828397617946036239581632976585100633' #...
'520260770761060725403904123144384571612073732754774' #...
'588211944406465572591022081973828448927338602556287' #...
'851831745419397433012491884869454462440536895047499' #...
'436551974649731917170099387762871020403582994193439' #...
'761933412166821484015883631622539314203799034497982' #...
'130038741741727907429575673302461380386596501187482' #...
'006257527709842179336488381672818798450229339123527' #...
'858844448336815912020452294624916993546388956561522' #...
'161875352572590420823607478788399460162228308693742' #...
'05287663441403533948204085390898399055004119873046875e-1075',
'525440653352955266109661060358202819561258984964913' #...
'892256527849758956045218257059713765874251436193619' #...
'443248205998870001633865657517447355992225852945912' #...
'016668660000210283807209850662224417504752264995360' #...
'631512007753855801075373057632157738752800840302596' #...
'237050247910530538250008682272783660778181628040733' #...
'653121492436408812668023478001208529190359254322340' #...
'397575185248844788515410722958784640926528544043090' #...
'115352513640884988017342469275006999104519620946430' #...
'818767147966495485406577703972687838176778993472989' #...
'561959000047036638938396333146685137903018376496408' #...
'319705333868476925297317136513970189073693314710318' #...
'991252811050501448326875232850600451776091303043715' #...
'157191292827614046876950225714743118291034780466325' #...
'085141343734564915193426994587206432697337118211527' #...
'278968731294639353354774788602467795167875117481660' #...
'4738791256853675690543663283782215866825e-1180',
# exercise exit conditions in bigcomp comparison loop
'2602129298404963083833853479113577253105939995688e2',
'260212929840496308383385347911357725310593999568896e0',
'26021292984049630838338534791135772531059399956889601e-2',
'260212929840496308383385347911357725310593999568895e0',
'260212929840496308383385347911357725310593999568897e0',
'260212929840496308383385347911357725310593999568996e0',
'260212929840496308383385347911357725310593999568866e0',
# 2**53
'9007199254740992.00',
# 2**1024 - 2**970: exact overflow boundary. All values
# smaller than this should round to something finite; any value
# greater than or equal to this one overflows.
'179769313486231580793728971405303415079934132710037' #...
'826936173778980444968292764750946649017977587207096' #...
'330286416692887910946555547851940402630657488671505' #...
'820681908902000708383676273854845817711531764475730' #...
'270069855571366959622842914819860834936475292719074' #...
'168444365510704342711559699508093042880177904174497792',
# 2**1024 - 2**970 - tiny
'179769313486231580793728971405303415079934132710037' #...
'826936173778980444968292764750946649017977587207096' #...
'330286416692887910946555547851940402630657488671505' #...
'820681908902000708383676273854845817711531764475730' #...
'270069855571366959622842914819860834936475292719074' #...
'168444365510704342711559699508093042880177904174497791.999',
# 2**1024 - 2**970 + tiny
'179769313486231580793728971405303415079934132710037' #...
'826936173778980444968292764750946649017977587207096' #...
'330286416692887910946555547851940402630657488671505' #...
'820681908902000708383676273854845817711531764475730' #...
'270069855571366959622842914819860834936475292719074' #...
'168444365510704342711559699508093042880177904174497792.001',
# 1 - 2**-54, +-tiny
'999999999999999944488848768742172978818416595458984375e-54',
'9999999999999999444888487687421729788184165954589843749999999e-54',
'9999999999999999444888487687421729788184165954589843750000001e-54',
# Value found by Rick Regan that gives a result of 2**-968
# under Gay's dtoa.c (as of Nov 04, 2010); since fixed.
# (Fixed some time ago in Python's dtoa.c.)
'0.0000000000000000000000000000000000000000100000000' #...
'000000000576129113423785429971690421191214034235435' #...
'087147763178149762956868991692289869941246658073194' #...
'51982237978882039897143840789794921875',
]
for s in test_strings:
self.check_strtod(s)
def test_main():
test.support.run_unittest(StrtodTests)
if __name__ == "__main__":
test_main()
| mit |
arbrandes/edx-platform | openedx/core/djangoapps/content/course_overviews/tests/test_tasks.py | 4 | 1982 | # lint-amnesty, pylint: disable=missing-module-docstring
from unittest import mock
from xmodule.modulestore import ModuleStoreEnum
from xmodule.modulestore.tests.django_utils import ModuleStoreTestCase
from xmodule.modulestore.tests.factories import CourseFactory
from ..tasks import enqueue_async_course_overview_update_tasks
class BatchedAsyncCourseOverviewUpdateTests(ModuleStoreTestCase): # lint-amnesty, pylint: disable=missing-class-docstring
def setUp(self):
super().setUp()
self.course_1 = CourseFactory.create(default_store=ModuleStoreEnum.Type.mongo)
self.course_2 = CourseFactory.create(default_store=ModuleStoreEnum.Type.mongo)
self.course_3 = CourseFactory.create(default_store=ModuleStoreEnum.Type.mongo)
@mock.patch('openedx.core.djangoapps.content.course_overviews.models.CourseOverview.update_select_courses')
def test_enqueue_all_courses_in_single_batch(self, mock_update_courses):
enqueue_async_course_overview_update_tasks(
course_ids=[],
force_update=True,
all_courses=True
)
called_args, called_kwargs = mock_update_courses.call_args_list[0]
assert sorted([self.course_1.id, self.course_2.id, self.course_3.id]) == sorted(called_args[0])
assert {'force_update': True} == called_kwargs
assert 1 == mock_update_courses.call_count
@mock.patch('openedx.core.djangoapps.content.course_overviews.models.CourseOverview.update_select_courses')
def test_enqueue_specific_courses_in_two_batches(self, mock_update_courses):
enqueue_async_course_overview_update_tasks(
course_ids=[str(self.course_1.id), str(self.course_2.id)],
force_update=True,
chunk_size=1,
all_courses=False
)
mock_update_courses.assert_has_calls([
mock.call([self.course_1.id], force_update=True),
mock.call([self.course_2.id], force_update=True)
])
| agpl-3.0 |
jluissandovalm/smd_lammps | python/examples/pizza/vmd.py | 31 | 8758 | # Pizza.py toolkit, www.cs.sandia.gov/~sjplimp/pizza.html
# Steve Plimpton, [email protected], Sandia National Laboratories
#
# Copyright (2005) Sandia Corporation. Under the terms of Contract
# DE-AC04-94AL85000 with Sandia Corporation, the U.S. Government retains
# certain rights in this software. This software is distributed under
# the GNU General Public License.
# vmd tool
# Minimalistic VMD embedding for Pizza.py
# (c) 2010 Axel Kohlmeyer <[email protected]>
# This class will replace the VMD startup script,
# open a pipe to the executable,
# and feed it Tcl command lines one at a time
oneline = "Control VMD from python"
docstr = """
v = vmd() start up VMD
v.stop() shut down VMD instance
v.clear() delete all visualizations
v.rep(style) set default representation style. One of
(Lines|VDW|Licorice|DynamicBonds|Points|CPK)
v.new(file[,type]) load new file (default file type 'lammpstrj')
v.data(file[,atomstyle]) load new data file (default atom style 'full')
v.replace(file[,type]) replace current frames with new file
v.append(file[,type]) append file to current frame(s)
v.set(snap,x,y,z,(True|False)) set coordinates from a pizza.py snapshot to new or current frame
v.frame(frame) set current frame
v.flush() flush pending input to VMD and update GUI
v.read(file) read Tcl script file (e.g. saved state)
v.enter() enter interactive shell
v.debug([True|False]) display generated VMD script commands?
"""
# History
# 11/10, Axel Kohlmeyer (Temple U): original version
# Imports and external programs
import types, os
import numpy
try: from DEFAULTS import PIZZA_VMDNAME
except: PIZZA_VMDNAME = "vmd"
try: from DEFAULTS import PIZZA_VMDDIR
except: PIZZA_VMDDIR = "/usr/local/lib/vmd"
try: from DEFAULTS import PIZZA_VMDDEV
except: PIZZA_VMDDEV = "win"
try: from DEFAULTS import PIZZA_VMDARCH
except: PIZZA_VMDARCH = "LINUX"
# try these settings for a Mac
#PIZZA_VMDNAME = "vmd"
#PIZZA_VMDDIR = "/Applications/VMD\ 1.8.7.app/Contents/vmd"
#PIZZA_VMDDEV = "win"
#PIZZA_VMDARCH = "MACOSXX86"
try: import pexpect
except:
print "pexpect from http://pypi.python.org/pypi/pexpect", \
"is required for vmd tool"
raise
# Class definition
class vmd:
# --------------------------------------------------------------------
def __init__(self):
self.vmddir = PIZZA_VMDDIR
self.vmdexe = PIZZA_VMDDIR + '/' + PIZZA_VMDNAME + '_' + PIZZA_VMDARCH
# these are all defaults copied from the vmd launch script
os.environ['VMDDIR'] = PIZZA_VMDDIR
os.environ['VMDDISPLAYDEVICE'] = PIZZA_VMDDEV
os.environ['VMDSCRPOS'] = "596 190"
os.environ['VMDSCRSIZE'] = "669 834"
os.environ['VMDSCRHEIGHT'] = "6.0"
os.environ['VMDSCRDIST'] = "-2.0"
os.environ['VMDTITLE'] = "on"
os.environ['TCL_LIBRARY'] = PIZZA_VMDDIR + "/scripts/tcl"
os.environ['STRIDE_BIN'] = PIZZA_VMDDIR + "/stride_" + PIZZA_VMDARCH
os.environ['SURF_BIN'] = PIZZA_VMDDIR + "/surf_" + PIZZA_VMDARCH
os.environ['TACHYON_BIN'] = PIZZA_VMDDIR + "/tachyon_" + PIZZA_VMDARCH
ldpath = os.environ.get('LD_LIBRARY_PATH','')
if ldpath == '':
os.environ['LD_LIBRARY_PATH'] = PIZZA_VMDDIR
else:
os.environ['LD_LIBRARY_PATH'] = ldpath + ':' + PIZZA_VMDDIR
ldpath = os.environ.get('LD_LIBRARY_PATH','')
if ldpath == '':
os.environ['PYTHONPATH'] = PIZZA_VMDDIR
else:
os.environ['PYTHONPATH'] = PIZZA_VMDDIR + "/scripts/python"
self.debugme = False
# open pipe to vmd and wait until we have a prompt
self.VMD = pexpect.spawn(self.vmdexe)
self.VMD.expect('vmd >')
# --------------------------------------------------------------------
# post command to vmd and wait until the prompt returns.
def __call__(self,command):
if self.VMD.isalive():
self.VMD.sendline(command)
self.VMD.expect('vmd >')
if self.debugme:
print "call+result:"+self.VMD.before
return
# --------------------------------------------------------------------
# exit VMD
def stop(self):
self.__call__("quit")
del self.VMD
# --------------------------------------------------------------------
# force VMD display and GUI update.
def flush(self):
self.__call__('display update ui')
# --------------------------------------------------------------------
# turn on debugging info
def debug(self,status=True):
if status and not self.debugme:
print 'Turning vmd.py debugging ON.'
if not status and self.debugme:
print 'Turning vmd.py debugging OFF.'
self.debugme = status
# --------------------------------------------------------------------
# emulate a regular tcl command prompt
def enter(self,mode='tcl'):
self.__call__('menu main off')
self.__call__('menu main on')
while 1:
try:
command = raw_input("vmd > ")
except EOFError:
print "(EOF)"
self.__call__('menu main off')
return
if command == "quit" or command == "exit":
self.__call__('menu main off')
return
if command == "gopython":
print "gopython not supported here"
continue
self.__call__(command)
# --------------------------------------------------------------------
# read and execute tcl script file (e.g. a saved state)
def read(self,filename):
self.__call__('play ' + filename)
self.flush()
# --------------------------------------------------------------------
# remove all molecules, data and visualizations
def clear(self):
self.__call__("mol delete all")
# --------------------------------------------------------------------
# navigate to a given frame
def rep(self,style='Lines'):
if style == 'Lines' or style == 'VDW' or style == 'Licorice' \
or style == 'DynamicBonds' or style == 'Points' or style == 'CPK':
self.__call__('mol default style ' + style)
# --------------------------------------------------------------------
# navigate to a given frame
def frame(self,framespec):
self.__call__('animate goto ' + str(framespec))
# --------------------------------------------------------------------
# load a new molecule from a file supported by a molfile plugin
def new(self,filename,filetype='lammpstrj'):
self.__call__('mol new ' + filename + ' type ' + filetype + ' waitfor all')
self.flush()
# --------------------------------------------------------------------
# load a new molecule from a data file via the topotools plugin
def data(self,filename,atomstyle='full'):
self.__call__('package require topotools 1.0')
self.__call__('topo readlammpsdata ' + filename + ' ' + atomstyle)
self.flush()
# --------------------------------------------------------------------
# append all frames from a given file to the current molecule
def append(self,filename,filetype='lammpstrj'):
self.__call__('set tmol [molinfo top]')
self.__call__('array set viewpoints {}')
self.__call__('foreach mol [molinfo list] { set viewpoints($mol) [molinfo $mol get { center_matrix rotate_matrix scale_matrix global_matrix}]}')
self.__call__('mol addfile ' + filename + ' mol $tmol type ' + filetype + ' waitfor all')
self.__call__('foreach mol [molinfo list] { molinfo $mol set {center_matrix rotate_matrix scale_matrix global_matrix} $viewpoints($mol)}')
self.flush()
# --------------------------------------------------------------------
# replace all frames of a molecule with those from a given file
def update(self,filename,filetype='lammpstrj'):
self.__call__('set tmol [molinfo top]')
self.__call__('array set viewpoints {}')
self.__call__('foreach mol [molinfo list] {set viewpoints($mol) [molinfo $mol get { center_matrix rotate_matrix scale_matrix global_matrix}]}')
self.__call__('animate delete all $tmol')
self.__call__('mol addfile ' + filename + ' mol $tmol type ' + filetype + ' waitfor all')
self.__call__('foreach mol [molinfo list] {molinfo $mol set {center_matrix rotate_matrix scale_matrix global_matrix} $viewpoints($mol)}')
self.flush()
# --------------------------------------------------------------------
# add or overwrite coordinates with coordinates in a snapshot
def set(self,snap,x,y,z,append=True):
self.__call__('set vmdsel [atomselect top all]')
if append:
self.__call__('animate dup [molinfo top]')
cmd = '$vmdsel set {x y z} {'
for idx in range(0,snap.natoms):
cmd += ' {'+str(snap[idx,x])+' '+str(snap[idx,y])+' '+str(snap[idx,z])+'}'
cmd += '}'
self.__call__(cmd)
self.__call__('$vmdsel delete ; unset vmdsel')
self.flush()
| gpl-2.0 |
YangChihWei/w16b_test | static/Brython3.1.1-20150328-091302/Lib/os.py | 635 | 35582 | r"""OS routines for Mac, NT, or Posix depending on what system we're on.
This exports:
- all functions from posix, nt, os2, or ce, e.g. unlink, stat, etc.
- os.path is either posixpath or ntpath
- os.name is either 'posix', 'nt', 'os2' or 'ce'.
- os.curdir is a string representing the current directory ('.' or ':')
- os.pardir is a string representing the parent directory ('..' or '::')
- os.sep is the (or a most common) pathname separator ('/' or ':' or '\\')
- os.extsep is the extension separator (always '.')
- os.altsep is the alternate pathname separator (None or '/')
- os.pathsep is the component separator used in $PATH etc
- os.linesep is the line separator in text files ('\r' or '\n' or '\r\n')
- os.defpath is the default search path for executables
- os.devnull is the file path of the null device ('/dev/null', etc.)
Programs that import and use 'os' stand a better chance of being
portable between different platforms. Of course, they must then
only use functions that are defined by all platforms (e.g., unlink
and opendir), and leave all pathname manipulation to os.path
(e.g., split and join).
"""
import sys, errno
import stat as st
_names = sys.builtin_module_names
# Note: more names are added to __all__ later.
__all__ = ["altsep", "curdir", "pardir", "sep", "pathsep", "linesep",
"defpath", "name", "path", "devnull", "SEEK_SET", "SEEK_CUR",
"SEEK_END", "fsencode", "fsdecode", "get_exec_path", "fdopen",
"popen", "extsep"]
def _exists(name):
return name in globals()
def _get_exports_list(module):
try:
return list(module.__all__)
except AttributeError:
return [n for n in dir(module) if n[0] != '_']
# Any new dependencies of the os module and/or changes in path separator
# requires updating importlib as well.
if 'posix' in _names:
name = 'posix'
linesep = '\n'
from posix import *
try:
from posix import _exit
__all__.append('_exit')
except ImportError:
pass
import posixpath as path
try:
from posix import _have_functions
except ImportError:
pass
elif 'nt' in _names:
name = 'nt'
linesep = '\r\n'
from nt import *
try:
from nt import _exit
__all__.append('_exit')
except ImportError:
pass
import ntpath as path
import nt
__all__.extend(_get_exports_list(nt))
del nt
try:
from nt import _have_functions
except ImportError:
pass
elif 'os2' in _names:
name = 'os2'
linesep = '\r\n'
from os2 import *
try:
from os2 import _exit
__all__.append('_exit')
except ImportError:
pass
if sys.version.find('EMX GCC') == -1:
import ntpath as path
else:
import os2emxpath as path
from _emx_link import link
import os2
__all__.extend(_get_exports_list(os2))
del os2
try:
from os2 import _have_functions
except ImportError:
pass
elif 'ce' in _names:
name = 'ce'
linesep = '\r\n'
from ce import *
try:
from ce import _exit
__all__.append('_exit')
except ImportError:
pass
# We can use the standard Windows path.
import ntpath as path
import ce
__all__.extend(_get_exports_list(ce))
del ce
try:
from ce import _have_functions
except ImportError:
pass
else:
raise ImportError('no os specific module found')
sys.modules['os.path'] = path
from os.path import (curdir, pardir, sep, pathsep, defpath, extsep, altsep,
devnull)
del _names
if _exists("_have_functions"):
_globals = globals()
def _add(str, fn):
if (fn in _globals) and (str in _have_functions):
_set.add(_globals[fn])
_set = set()
_add("HAVE_FACCESSAT", "access")
_add("HAVE_FCHMODAT", "chmod")
_add("HAVE_FCHOWNAT", "chown")
_add("HAVE_FSTATAT", "stat")
_add("HAVE_FUTIMESAT", "utime")
_add("HAVE_LINKAT", "link")
_add("HAVE_MKDIRAT", "mkdir")
_add("HAVE_MKFIFOAT", "mkfifo")
_add("HAVE_MKNODAT", "mknod")
_add("HAVE_OPENAT", "open")
_add("HAVE_READLINKAT", "readlink")
_add("HAVE_RENAMEAT", "rename")
_add("HAVE_SYMLINKAT", "symlink")
_add("HAVE_UNLINKAT", "unlink")
_add("HAVE_UNLINKAT", "rmdir")
_add("HAVE_UTIMENSAT", "utime")
supports_dir_fd = _set
_set = set()
_add("HAVE_FACCESSAT", "access")
supports_effective_ids = _set
_set = set()
_add("HAVE_FCHDIR", "chdir")
_add("HAVE_FCHMOD", "chmod")
_add("HAVE_FCHOWN", "chown")
_add("HAVE_FDOPENDIR", "listdir")
_add("HAVE_FEXECVE", "execve")
_set.add(stat) # fstat always works
_add("HAVE_FTRUNCATE", "truncate")
_add("HAVE_FUTIMENS", "utime")
_add("HAVE_FUTIMES", "utime")
_add("HAVE_FPATHCONF", "pathconf")
if _exists("statvfs") and _exists("fstatvfs"): # mac os x10.3
_add("HAVE_FSTATVFS", "statvfs")
supports_fd = _set
_set = set()
_add("HAVE_FACCESSAT", "access")
# Some platforms don't support lchmod(). Often the function exists
# anyway, as a stub that always returns ENOSUP or perhaps EOPNOTSUPP.
# (No, I don't know why that's a good design.) ./configure will detect
# this and reject it--so HAVE_LCHMOD still won't be defined on such
# platforms. This is Very Helpful.
#
# However, sometimes platforms without a working lchmod() *do* have
# fchmodat(). (Examples: Linux kernel 3.2 with glibc 2.15,
# OpenIndiana 3.x.) And fchmodat() has a flag that theoretically makes
# it behave like lchmod(). So in theory it would be a suitable
# replacement for lchmod(). But when lchmod() doesn't work, fchmodat()'s
# flag doesn't work *either*. Sadly ./configure isn't sophisticated
# enough to detect this condition--it only determines whether or not
# fchmodat() minimally works.
#
# Therefore we simply ignore fchmodat() when deciding whether or not
# os.chmod supports follow_symlinks. Just checking lchmod() is
# sufficient. After all--if you have a working fchmodat(), your
# lchmod() almost certainly works too.
#
# _add("HAVE_FCHMODAT", "chmod")
_add("HAVE_FCHOWNAT", "chown")
_add("HAVE_FSTATAT", "stat")
_add("HAVE_LCHFLAGS", "chflags")
_add("HAVE_LCHMOD", "chmod")
if _exists("lchown"): # mac os x10.3
_add("HAVE_LCHOWN", "chown")
_add("HAVE_LINKAT", "link")
_add("HAVE_LUTIMES", "utime")
_add("HAVE_LSTAT", "stat")
_add("HAVE_FSTATAT", "stat")
_add("HAVE_UTIMENSAT", "utime")
_add("MS_WINDOWS", "stat")
supports_follow_symlinks = _set
del _set
del _have_functions
del _globals
del _add
# Python uses fixed values for the SEEK_ constants; they are mapped
# to native constants if necessary in posixmodule.c
# Other possible SEEK values are directly imported from posixmodule.c
SEEK_SET = 0
SEEK_CUR = 1
SEEK_END = 2
def _get_masked_mode(mode):
mask = umask(0)
umask(mask)
return mode & ~mask
# Super directory utilities.
# (Inspired by Eric Raymond; the doc strings are mostly his)
def makedirs(name, mode=0o777, exist_ok=False):
"""makedirs(path [, mode=0o777][, exist_ok=False])
Super-mkdir; create a leaf directory and all intermediate ones.
Works like mkdir, except that any intermediate path segment (not
just the rightmost) will be created if it does not exist. If the
target directory with the same mode as we specified already exists,
raises an OSError if exist_ok is False, otherwise no exception is
raised. This is recursive.
"""
head, tail = path.split(name)
if not tail:
head, tail = path.split(head)
if head and tail and not path.exists(head):
try:
makedirs(head, mode, exist_ok)
except OSError as e:
# be happy if someone already created the path
if e.errno != errno.EEXIST:
raise
cdir = curdir
if isinstance(tail, bytes):
cdir = bytes(curdir, 'ASCII')
if tail == cdir: # xxx/newdir/. exists if xxx/newdir exists
return
try:
mkdir(name, mode)
except OSError as e:
dir_exists = path.isdir(name)
expected_mode = _get_masked_mode(mode)
if dir_exists:
# S_ISGID is automatically copied by the OS from parent to child
# directories on mkdir. Don't consider it being set to be a mode
# mismatch as mkdir does not unset it when not specified in mode.
actual_mode = st.S_IMODE(lstat(name).st_mode) & ~st.S_ISGID
else:
actual_mode = -1
if not (e.errno == errno.EEXIST and exist_ok and dir_exists and
actual_mode == expected_mode):
if dir_exists and actual_mode != expected_mode:
e.strerror += ' (mode %o != expected mode %o)' % (
actual_mode, expected_mode)
raise
def removedirs(name):
"""removedirs(path)
Super-rmdir; remove a leaf directory and all empty intermediate
ones. Works like rmdir except that, if the leaf directory is
successfully removed, directories corresponding to rightmost path
segments will be pruned away until either the whole path is
consumed or an error occurs. Errors during this latter phase are
ignored -- they generally mean that a directory was not empty.
"""
rmdir(name)
head, tail = path.split(name)
if not tail:
head, tail = path.split(head)
while head and tail:
try:
rmdir(head)
except error:
break
head, tail = path.split(head)
def renames(old, new):
"""renames(old, new)
Super-rename; create directories as necessary and delete any left
empty. Works like rename, except creation of any intermediate
directories needed to make the new pathname good is attempted
first. After the rename, directories corresponding to rightmost
path segments of the old name will be pruned way until either the
whole path is consumed or a nonempty directory is found.
Note: this function can fail with the new directory structure made
if you lack permissions needed to unlink the leaf directory or
file.
"""
head, tail = path.split(new)
if head and tail and not path.exists(head):
makedirs(head)
rename(old, new)
head, tail = path.split(old)
if head and tail:
try:
removedirs(head)
except error:
pass
__all__.extend(["makedirs", "removedirs", "renames"])
def walk(top, topdown=True, onerror=None, followlinks=False):
"""Directory tree generator.
For each directory in the directory tree rooted at top (including top
itself, but excluding '.' and '..'), yields a 3-tuple
dirpath, dirnames, filenames
dirpath is a string, the path to the directory. dirnames is a list of
the names of the subdirectories in dirpath (excluding '.' and '..').
filenames is a list of the names of the non-directory files in dirpath.
Note that the names in the lists are just names, with no path components.
To get a full path (which begins with top) to a file or directory in
dirpath, do os.path.join(dirpath, name).
If optional arg 'topdown' is true or not specified, the triple for a
directory is generated before the triples for any of its subdirectories
(directories are generated top down). If topdown is false, the triple
for a directory is generated after the triples for all of its
subdirectories (directories are generated bottom up).
When topdown is true, the caller can modify the dirnames list in-place
(e.g., via del or slice assignment), and walk will only recurse into the
subdirectories whose names remain in dirnames; this can be used to prune
the search, or to impose a specific order of visiting. Modifying
dirnames when topdown is false is ineffective, since the directories in
dirnames have already been generated by the time dirnames itself is
generated.
By default errors from the os.listdir() call are ignored. If
optional arg 'onerror' is specified, it should be a function; it
will be called with one argument, an os.error instance. It can
report the error to continue with the walk, or raise the exception
to abort the walk. Note that the filename is available as the
filename attribute of the exception object.
By default, os.walk does not follow symbolic links to subdirectories on
systems that support them. In order to get this functionality, set the
optional argument 'followlinks' to true.
Caution: if you pass a relative pathname for top, don't change the
current working directory between resumptions of walk. walk never
changes the current directory, and assumes that the client doesn't
either.
Example:
import os
from os.path import join, getsize
for root, dirs, files in os.walk('python/Lib/email'):
print(root, "consumes", end="")
print(sum([getsize(join(root, name)) for name in files]), end="")
print("bytes in", len(files), "non-directory files")
if 'CVS' in dirs:
dirs.remove('CVS') # don't visit CVS directories
"""
islink, join, isdir = path.islink, path.join, path.isdir
# We may not have read permission for top, in which case we can't
# get a list of the files the directory contains. os.walk
# always suppressed the exception then, rather than blow up for a
# minor reason when (say) a thousand readable directories are still
# left to visit. That logic is copied here.
try:
# Note that listdir and error are globals in this module due
# to earlier import-*.
names = listdir(top)
except error as err:
if onerror is not None:
onerror(err)
return
dirs, nondirs = [], []
for name in names:
if isdir(join(top, name)):
dirs.append(name)
else:
nondirs.append(name)
if topdown:
yield top, dirs, nondirs
for name in dirs:
new_path = join(top, name)
if followlinks or not islink(new_path):
yield from walk(new_path, topdown, onerror, followlinks)
if not topdown:
yield top, dirs, nondirs
__all__.append("walk")
if {open, stat} <= supports_dir_fd and {listdir, stat} <= supports_fd:
def fwalk(top=".", topdown=True, onerror=None, *, follow_symlinks=False, dir_fd=None):
"""Directory tree generator.
This behaves exactly like walk(), except that it yields a 4-tuple
dirpath, dirnames, filenames, dirfd
`dirpath`, `dirnames` and `filenames` are identical to walk() output,
and `dirfd` is a file descriptor referring to the directory `dirpath`.
The advantage of fwalk() over walk() is that it's safe against symlink
races (when follow_symlinks is False).
If dir_fd is not None, it should be a file descriptor open to a directory,
and top should be relative; top will then be relative to that directory.
(dir_fd is always supported for fwalk.)
Caution:
Since fwalk() yields file descriptors, those are only valid until the
next iteration step, so you should dup() them if you want to keep them
for a longer period.
Example:
import os
for root, dirs, files, rootfd in os.fwalk('python/Lib/email'):
print(root, "consumes", end="")
print(sum([os.stat(name, dir_fd=rootfd).st_size for name in files]),
end="")
print("bytes in", len(files), "non-directory files")
if 'CVS' in dirs:
dirs.remove('CVS') # don't visit CVS directories
"""
# Note: To guard against symlink races, we use the standard
# lstat()/open()/fstat() trick.
orig_st = stat(top, follow_symlinks=False, dir_fd=dir_fd)
topfd = open(top, O_RDONLY, dir_fd=dir_fd)
try:
if (follow_symlinks or (st.S_ISDIR(orig_st.st_mode) and
path.samestat(orig_st, stat(topfd)))):
yield from _fwalk(topfd, top, topdown, onerror, follow_symlinks)
finally:
close(topfd)
def _fwalk(topfd, toppath, topdown, onerror, follow_symlinks):
# Note: This uses O(depth of the directory tree) file descriptors: if
# necessary, it can be adapted to only require O(1) FDs, see issue
# #13734.
names = listdir(topfd)
dirs, nondirs = [], []
for name in names:
try:
# Here, we don't use AT_SYMLINK_NOFOLLOW to be consistent with
# walk() which reports symlinks to directories as directories.
# We do however check for symlinks before recursing into
# a subdirectory.
if st.S_ISDIR(stat(name, dir_fd=topfd).st_mode):
dirs.append(name)
else:
nondirs.append(name)
except FileNotFoundError:
try:
# Add dangling symlinks, ignore disappeared files
if st.S_ISLNK(stat(name, dir_fd=topfd, follow_symlinks=False)
.st_mode):
nondirs.append(name)
except FileNotFoundError:
continue
if topdown:
yield toppath, dirs, nondirs, topfd
for name in dirs:
try:
orig_st = stat(name, dir_fd=topfd, follow_symlinks=follow_symlinks)
dirfd = open(name, O_RDONLY, dir_fd=topfd)
except error as err:
if onerror is not None:
onerror(err)
return
try:
if follow_symlinks or path.samestat(orig_st, stat(dirfd)):
dirpath = path.join(toppath, name)
yield from _fwalk(dirfd, dirpath, topdown, onerror, follow_symlinks)
finally:
close(dirfd)
if not topdown:
yield toppath, dirs, nondirs, topfd
__all__.append("fwalk")
# Make sure os.environ exists, at least
try:
environ
except NameError:
environ = {}
def execl(file, *args):
"""execl(file, *args)
Execute the executable file with argument list args, replacing the
current process. """
execv(file, args)
def execle(file, *args):
"""execle(file, *args, env)
Execute the executable file with argument list args and
environment env, replacing the current process. """
env = args[-1]
execve(file, args[:-1], env)
def execlp(file, *args):
"""execlp(file, *args)
Execute the executable file (which is searched for along $PATH)
with argument list args, replacing the current process. """
execvp(file, args)
def execlpe(file, *args):
"""execlpe(file, *args, env)
Execute the executable file (which is searched for along $PATH)
with argument list args and environment env, replacing the current
process. """
env = args[-1]
execvpe(file, args[:-1], env)
def execvp(file, args):
"""execvp(file, args)
Execute the executable file (which is searched for along $PATH)
with argument list args, replacing the current process.
args may be a list or tuple of strings. """
_execvpe(file, args)
def execvpe(file, args, env):
"""execvpe(file, args, env)
Execute the executable file (which is searched for along $PATH)
with argument list args and environment env , replacing the
current process.
args may be a list or tuple of strings. """
_execvpe(file, args, env)
__all__.extend(["execl","execle","execlp","execlpe","execvp","execvpe"])
def _execvpe(file, args, env=None):
if env is not None:
exec_func = execve
argrest = (args, env)
else:
exec_func = execv
argrest = (args,)
env = environ
head, tail = path.split(file)
if head:
exec_func(file, *argrest)
return
last_exc = saved_exc = None
saved_tb = None
path_list = get_exec_path(env)
if name != 'nt':
file = fsencode(file)
path_list = map(fsencode, path_list)
for dir in path_list:
fullname = path.join(dir, file)
try:
exec_func(fullname, *argrest)
except error as e:
last_exc = e
tb = sys.exc_info()[2]
if (e.errno != errno.ENOENT and e.errno != errno.ENOTDIR
and saved_exc is None):
saved_exc = e
saved_tb = tb
if saved_exc:
raise saved_exc.with_traceback(saved_tb)
raise last_exc.with_traceback(tb)
def get_exec_path(env=None):
"""Returns the sequence of directories that will be searched for the
named executable (similar to a shell) when launching a process.
*env* must be an environment variable dict or None. If *env* is None,
os.environ will be used.
"""
# Use a local import instead of a global import to limit the number of
# modules loaded at startup: the os module is always loaded at startup by
# Python. It may also avoid a bootstrap issue.
import warnings
if env is None:
env = environ
# {b'PATH': ...}.get('PATH') and {'PATH': ...}.get(b'PATH') emit a
# BytesWarning when using python -b or python -bb: ignore the warning
with warnings.catch_warnings():
warnings.simplefilter("ignore", BytesWarning)
try:
path_list = env.get('PATH')
except TypeError:
path_list = None
if supports_bytes_environ:
try:
path_listb = env[b'PATH']
except (KeyError, TypeError):
pass
else:
if path_list is not None:
raise ValueError(
"env cannot contain 'PATH' and b'PATH' keys")
path_list = path_listb
if path_list is not None and isinstance(path_list, bytes):
path_list = fsdecode(path_list)
if path_list is None:
path_list = defpath
return path_list.split(pathsep)
# Change environ to automatically call putenv(), unsetenv if they exist.
from collections.abc import MutableMapping
class _Environ(MutableMapping):
def __init__(self, data, encodekey, decodekey, encodevalue, decodevalue, putenv, unsetenv):
self.encodekey = encodekey
self.decodekey = decodekey
self.encodevalue = encodevalue
self.decodevalue = decodevalue
self.putenv = putenv
self.unsetenv = unsetenv
self._data = data
def __getitem__(self, key):
try:
value = self._data[self.encodekey(key)]
except KeyError:
# raise KeyError with the original key value
raise KeyError(key) from None
return self.decodevalue(value)
def __setitem__(self, key, value):
key = self.encodekey(key)
value = self.encodevalue(value)
self.putenv(key, value)
self._data[key] = value
def __delitem__(self, key):
encodedkey = self.encodekey(key)
self.unsetenv(encodedkey)
try:
del self._data[encodedkey]
except KeyError:
# raise KeyError with the original key value
raise KeyError(key) from None
def __iter__(self):
for key in self._data:
yield self.decodekey(key)
def __len__(self):
return len(self._data)
def __repr__(self):
return 'environ({{{}}})'.format(', '.join(
('{!r}: {!r}'.format(self.decodekey(key), self.decodevalue(value))
for key, value in self._data.items())))
def copy(self):
return dict(self)
def setdefault(self, key, value):
if key not in self:
self[key] = value
return self[key]
try:
_putenv = putenv
except NameError:
_putenv = lambda key, value: None
else:
__all__.append("putenv")
try:
_unsetenv = unsetenv
except NameError:
_unsetenv = lambda key: _putenv(key, "")
else:
__all__.append("unsetenv")
def _createenviron():
if name in ('os2', 'nt'):
# Where Env Var Names Must Be UPPERCASE
def check_str(value):
if not isinstance(value, str):
raise TypeError("str expected, not %s" % type(value).__name__)
return value
encode = check_str
decode = str
def encodekey(key):
return encode(key).upper()
data = {}
for key, value in environ.items():
data[encodekey(key)] = value
else:
# Where Env Var Names Can Be Mixed Case
encoding = sys.getfilesystemencoding()
def encode(value):
if not isinstance(value, str):
raise TypeError("str expected, not %s" % type(value).__name__)
return value.encode(encoding, 'surrogateescape')
def decode(value):
return value.decode(encoding, 'surrogateescape')
encodekey = encode
data = environ
return _Environ(data,
encodekey, decode,
encode, decode,
_putenv, _unsetenv)
# unicode environ
environ = _createenviron()
del _createenviron
def getenv(key, default=None):
"""Get an environment variable, return None if it doesn't exist.
The optional second argument can specify an alternate default.
key, default and the result are str."""
return environ.get(key, default)
supports_bytes_environ = name not in ('os2', 'nt')
__all__.extend(("getenv", "supports_bytes_environ"))
if supports_bytes_environ:
def _check_bytes(value):
if not isinstance(value, bytes):
raise TypeError("bytes expected, not %s" % type(value).__name__)
return value
# bytes environ
environb = _Environ(environ._data,
_check_bytes, bytes,
_check_bytes, bytes,
_putenv, _unsetenv)
del _check_bytes
def getenvb(key, default=None):
"""Get an environment variable, return None if it doesn't exist.
The optional second argument can specify an alternate default.
key, default and the result are bytes."""
return environb.get(key, default)
__all__.extend(("environb", "getenvb"))
def _fscodec():
encoding = sys.getfilesystemencoding()
if encoding == 'mbcs':
errors = 'strict'
else:
errors = 'surrogateescape'
def fsencode(filename):
"""
Encode filename to the filesystem encoding with 'surrogateescape' error
handler, return bytes unchanged. On Windows, use 'strict' error handler if
the file system encoding is 'mbcs' (which is the default encoding).
"""
if isinstance(filename, bytes):
return filename
elif isinstance(filename, str):
return filename.encode(encoding, errors)
else:
raise TypeError("expect bytes or str, not %s" % type(filename).__name__)
def fsdecode(filename):
"""
Decode filename from the filesystem encoding with 'surrogateescape' error
handler, return str unchanged. On Windows, use 'strict' error handler if
the file system encoding is 'mbcs' (which is the default encoding).
"""
if isinstance(filename, str):
return filename
elif isinstance(filename, bytes):
return filename.decode(encoding, errors)
else:
raise TypeError("expect bytes or str, not %s" % type(filename).__name__)
return fsencode, fsdecode
fsencode, fsdecode = _fscodec()
del _fscodec
# Supply spawn*() (probably only for Unix)
if _exists("fork") and not _exists("spawnv") and _exists("execv"):
P_WAIT = 0
P_NOWAIT = P_NOWAITO = 1
__all__.extend(["P_WAIT", "P_NOWAIT", "P_NOWAITO"])
# XXX Should we support P_DETACH? I suppose it could fork()**2
# and close the std I/O streams. Also, P_OVERLAY is the same
# as execv*()?
def _spawnvef(mode, file, args, env, func):
# Internal helper; func is the exec*() function to use
pid = fork()
if not pid:
# Child
try:
if env is None:
func(file, args)
else:
func(file, args, env)
except:
_exit(127)
else:
# Parent
if mode == P_NOWAIT:
return pid # Caller is responsible for waiting!
while 1:
wpid, sts = waitpid(pid, 0)
if WIFSTOPPED(sts):
continue
elif WIFSIGNALED(sts):
return -WTERMSIG(sts)
elif WIFEXITED(sts):
return WEXITSTATUS(sts)
else:
raise error("Not stopped, signaled or exited???")
def spawnv(mode, file, args):
"""spawnv(mode, file, args) -> integer
Execute file with arguments from args in a subprocess.
If mode == P_NOWAIT return the pid of the process.
If mode == P_WAIT return the process's exit code if it exits normally;
otherwise return -SIG, where SIG is the signal that killed it. """
return _spawnvef(mode, file, args, None, execv)
def spawnve(mode, file, args, env):
"""spawnve(mode, file, args, env) -> integer
Execute file with arguments from args in a subprocess with the
specified environment.
If mode == P_NOWAIT return the pid of the process.
If mode == P_WAIT return the process's exit code if it exits normally;
otherwise return -SIG, where SIG is the signal that killed it. """
return _spawnvef(mode, file, args, env, execve)
# Note: spawnvp[e] is't currently supported on Windows
def spawnvp(mode, file, args):
"""spawnvp(mode, file, args) -> integer
Execute file (which is looked for along $PATH) with arguments from
args in a subprocess.
If mode == P_NOWAIT return the pid of the process.
If mode == P_WAIT return the process's exit code if it exits normally;
otherwise return -SIG, where SIG is the signal that killed it. """
return _spawnvef(mode, file, args, None, execvp)
def spawnvpe(mode, file, args, env):
"""spawnvpe(mode, file, args, env) -> integer
Execute file (which is looked for along $PATH) with arguments from
args in a subprocess with the supplied environment.
If mode == P_NOWAIT return the pid of the process.
If mode == P_WAIT return the process's exit code if it exits normally;
otherwise return -SIG, where SIG is the signal that killed it. """
return _spawnvef(mode, file, args, env, execvpe)
if _exists("spawnv"):
# These aren't supplied by the basic Windows code
# but can be easily implemented in Python
def spawnl(mode, file, *args):
"""spawnl(mode, file, *args) -> integer
Execute file with arguments from args in a subprocess.
If mode == P_NOWAIT return the pid of the process.
If mode == P_WAIT return the process's exit code if it exits normally;
otherwise return -SIG, where SIG is the signal that killed it. """
return spawnv(mode, file, args)
def spawnle(mode, file, *args):
"""spawnle(mode, file, *args, env) -> integer
Execute file with arguments from args in a subprocess with the
supplied environment.
If mode == P_NOWAIT return the pid of the process.
If mode == P_WAIT return the process's exit code if it exits normally;
otherwise return -SIG, where SIG is the signal that killed it. """
env = args[-1]
return spawnve(mode, file, args[:-1], env)
__all__.extend(["spawnv", "spawnve", "spawnl", "spawnle",])
if _exists("spawnvp"):
# At the moment, Windows doesn't implement spawnvp[e],
# so it won't have spawnlp[e] either.
def spawnlp(mode, file, *args):
"""spawnlp(mode, file, *args) -> integer
Execute file (which is looked for along $PATH) with arguments from
args in a subprocess with the supplied environment.
If mode == P_NOWAIT return the pid of the process.
If mode == P_WAIT return the process's exit code if it exits normally;
otherwise return -SIG, where SIG is the signal that killed it. """
return spawnvp(mode, file, args)
def spawnlpe(mode, file, *args):
"""spawnlpe(mode, file, *args, env) -> integer
Execute file (which is looked for along $PATH) with arguments from
args in a subprocess with the supplied environment.
If mode == P_NOWAIT return the pid of the process.
If mode == P_WAIT return the process's exit code if it exits normally;
otherwise return -SIG, where SIG is the signal that killed it. """
env = args[-1]
return spawnvpe(mode, file, args[:-1], env)
__all__.extend(["spawnvp", "spawnvpe", "spawnlp", "spawnlpe",])
import copyreg as _copyreg
def _make_stat_result(tup, dict):
return stat_result(tup, dict)
def _pickle_stat_result(sr):
(type, args) = sr.__reduce__()
return (_make_stat_result, args)
try:
_copyreg.pickle(stat_result, _pickle_stat_result, _make_stat_result)
except NameError: # stat_result may not exist
pass
def _make_statvfs_result(tup, dict):
return statvfs_result(tup, dict)
def _pickle_statvfs_result(sr):
(type, args) = sr.__reduce__()
return (_make_statvfs_result, args)
try:
_copyreg.pickle(statvfs_result, _pickle_statvfs_result,
_make_statvfs_result)
except NameError: # statvfs_result may not exist
pass
# Supply os.popen()
def popen(cmd, mode="r", buffering=-1):
if not isinstance(cmd, str):
raise TypeError("invalid cmd type (%s, expected string)" % type(cmd))
if mode not in ("r", "w"):
raise ValueError("invalid mode %r" % mode)
if buffering == 0 or buffering is None:
raise ValueError("popen() does not support unbuffered streams")
import subprocess, io
if mode == "r":
proc = subprocess.Popen(cmd,
shell=True,
stdout=subprocess.PIPE,
bufsize=buffering)
return _wrap_close(io.TextIOWrapper(proc.stdout), proc)
else:
proc = subprocess.Popen(cmd,
shell=True,
stdin=subprocess.PIPE,
bufsize=buffering)
return _wrap_close(io.TextIOWrapper(proc.stdin), proc)
# Helper for popen() -- a proxy for a file whose close waits for the process
class _wrap_close:
def __init__(self, stream, proc):
self._stream = stream
self._proc = proc
def close(self):
self._stream.close()
returncode = self._proc.wait()
if returncode == 0:
return None
if name == 'nt':
return returncode
else:
return returncode << 8 # Shift left to match old behavior
def __enter__(self):
return self
def __exit__(self, *args):
self.close()
def __getattr__(self, name):
return getattr(self._stream, name)
def __iter__(self):
return iter(self._stream)
# Supply os.fdopen()
def fdopen(fd, *args, **kwargs):
if not isinstance(fd, int):
raise TypeError("invalid fd type (%s, expected integer)" % type(fd))
import io
return io.open(fd, *args, **kwargs)
| agpl-3.0 |
turbokongen/home-assistant | homeassistant/components/plex/config_flow.py | 1 | 15991 | """Config flow for Plex."""
import copy
import logging
from aiohttp import web_response
import plexapi.exceptions
from plexapi.gdm import GDM
from plexauth import PlexAuth
import requests.exceptions
import voluptuous as vol
from homeassistant import config_entries
from homeassistant.components.http.view import HomeAssistantView
from homeassistant.components.media_player import DOMAIN as MP_DOMAIN
from homeassistant.const import (
CONF_CLIENT_ID,
CONF_HOST,
CONF_PORT,
CONF_SOURCE,
CONF_SSL,
CONF_TOKEN,
CONF_URL,
CONF_VERIFY_SSL,
)
from homeassistant.core import callback
from homeassistant.helpers.aiohttp_client import async_get_clientsession
import homeassistant.helpers.config_validation as cv
from homeassistant.helpers.network import get_url
from .const import ( # pylint: disable=unused-import
AUTH_CALLBACK_NAME,
AUTH_CALLBACK_PATH,
AUTOMATIC_SETUP_STRING,
CONF_IGNORE_NEW_SHARED_USERS,
CONF_IGNORE_PLEX_WEB_CLIENTS,
CONF_MONITORED_USERS,
CONF_SERVER,
CONF_SERVER_IDENTIFIER,
CONF_USE_EPISODE_ART,
DEFAULT_PORT,
DEFAULT_SSL,
DEFAULT_VERIFY_SSL,
DOMAIN,
MANUAL_SETUP_STRING,
PLEX_SERVER_CONFIG,
SERVERS,
X_PLEX_DEVICE_NAME,
X_PLEX_PLATFORM,
X_PLEX_PRODUCT,
X_PLEX_VERSION,
)
from .errors import NoServersFound, ServerNotSpecified
from .server import PlexServer
_LOGGER = logging.getLogger(__package__)
@callback
def configured_servers(hass):
"""Return a set of the configured Plex servers."""
return {
entry.data[CONF_SERVER_IDENTIFIER]
for entry in hass.config_entries.async_entries(DOMAIN)
}
async def async_discover(hass):
"""Scan for available Plex servers."""
gdm = GDM()
await hass.async_add_executor_job(gdm.scan)
for server_data in gdm.entries:
await hass.config_entries.flow.async_init(
DOMAIN,
context={CONF_SOURCE: config_entries.SOURCE_INTEGRATION_DISCOVERY},
data=server_data,
)
class PlexFlowHandler(config_entries.ConfigFlow, domain=DOMAIN):
"""Handle a Plex config flow."""
VERSION = 1
CONNECTION_CLASS = config_entries.CONN_CLASS_LOCAL_PUSH
@staticmethod
@callback
def async_get_options_flow(config_entry):
"""Get the options flow for this handler."""
return PlexOptionsFlowHandler(config_entry)
def __init__(self):
"""Initialize the Plex flow."""
self.current_login = {}
self.available_servers = None
self.plexauth = None
self.token = None
self.client_id = None
self._manual = False
async def async_step_user(
self, user_input=None, errors=None
): # pylint: disable=arguments-differ
"""Handle a flow initialized by the user."""
if user_input is not None:
return await self.async_step_plex_website_auth()
if self.show_advanced_options:
return await self.async_step_user_advanced(errors=errors)
return self.async_show_form(step_id="user", errors=errors)
async def async_step_user_advanced(self, user_input=None, errors=None):
"""Handle an advanced mode flow initialized by the user."""
if user_input is not None:
if user_input.get("setup_method") == MANUAL_SETUP_STRING:
self._manual = True
return await self.async_step_manual_setup()
return await self.async_step_plex_website_auth()
data_schema = vol.Schema(
{
vol.Required("setup_method", default=AUTOMATIC_SETUP_STRING): vol.In(
[AUTOMATIC_SETUP_STRING, MANUAL_SETUP_STRING]
)
}
)
return self.async_show_form(
step_id="user_advanced", data_schema=data_schema, errors=errors
)
async def async_step_manual_setup(self, user_input=None, errors=None):
"""Begin manual configuration."""
if user_input is not None and errors is None:
user_input.pop(CONF_URL, None)
host = user_input.get(CONF_HOST)
if host:
port = user_input[CONF_PORT]
prefix = "https" if user_input.get(CONF_SSL) else "http"
user_input[CONF_URL] = f"{prefix}://{host}:{port}"
elif CONF_TOKEN not in user_input:
return await self.async_step_manual_setup(
user_input=user_input, errors={"base": "host_or_token"}
)
return await self.async_step_server_validate(user_input)
previous_input = user_input or {}
data_schema = vol.Schema(
{
vol.Optional(
CONF_HOST,
description={"suggested_value": previous_input.get(CONF_HOST)},
): str,
vol.Required(
CONF_PORT, default=previous_input.get(CONF_PORT, DEFAULT_PORT)
): int,
vol.Required(
CONF_SSL, default=previous_input.get(CONF_SSL, DEFAULT_SSL)
): bool,
vol.Required(
CONF_VERIFY_SSL,
default=previous_input.get(CONF_VERIFY_SSL, DEFAULT_VERIFY_SSL),
): bool,
vol.Optional(
CONF_TOKEN,
description={"suggested_value": previous_input.get(CONF_TOKEN)},
): str,
}
)
return self.async_show_form(
step_id="manual_setup", data_schema=data_schema, errors=errors
)
async def async_step_server_validate(self, server_config):
"""Validate a provided configuration."""
errors = {}
self.current_login = server_config
plex_server = PlexServer(self.hass, server_config)
try:
await self.hass.async_add_executor_job(plex_server.connect)
except NoServersFound:
_LOGGER.error("No servers linked to Plex account")
errors["base"] = "no_servers"
except (plexapi.exceptions.BadRequest, plexapi.exceptions.Unauthorized):
_LOGGER.error("Invalid credentials provided, config not created")
errors[CONF_TOKEN] = "faulty_credentials"
except requests.exceptions.SSLError as error:
_LOGGER.error("SSL certificate error: [%s]", error)
errors["base"] = "ssl_error"
except (plexapi.exceptions.NotFound, requests.exceptions.ConnectionError):
server_identifier = (
server_config.get(CONF_URL) or plex_server.server_choice or "Unknown"
)
_LOGGER.error("Plex server could not be reached: %s", server_identifier)
errors[CONF_HOST] = "not_found"
except ServerNotSpecified as available_servers:
self.available_servers = available_servers.args[0]
return await self.async_step_select_server()
except Exception as error: # pylint: disable=broad-except
_LOGGER.exception("Unknown error connecting to Plex server: %s", error)
return self.async_abort(reason="unknown")
if errors:
if self._manual:
return await self.async_step_manual_setup(
user_input=server_config, errors=errors
)
return await self.async_step_user(errors=errors)
server_id = plex_server.machine_identifier
url = plex_server.url_in_use
token = server_config.get(CONF_TOKEN)
entry_config = {CONF_URL: url}
if self.client_id:
entry_config[CONF_CLIENT_ID] = self.client_id
if token:
entry_config[CONF_TOKEN] = token
if url.startswith("https"):
entry_config[CONF_VERIFY_SSL] = server_config.get(
CONF_VERIFY_SSL, DEFAULT_VERIFY_SSL
)
data = {
CONF_SERVER: plex_server.friendly_name,
CONF_SERVER_IDENTIFIER: server_id,
PLEX_SERVER_CONFIG: entry_config,
}
entry = await self.async_set_unique_id(server_id)
if self.context[CONF_SOURCE] == config_entries.SOURCE_REAUTH:
self.hass.config_entries.async_update_entry(entry, data=data)
_LOGGER.debug("Updated config entry for %s", plex_server.friendly_name)
await self.hass.config_entries.async_reload(entry.entry_id)
return self.async_abort(reason="reauth_successful")
self._abort_if_unique_id_configured()
_LOGGER.debug("Valid config created for %s", plex_server.friendly_name)
return self.async_create_entry(title=plex_server.friendly_name, data=data)
async def async_step_select_server(self, user_input=None):
"""Use selected Plex server."""
config = dict(self.current_login)
if user_input is not None:
config[CONF_SERVER] = user_input[CONF_SERVER]
return await self.async_step_server_validate(config)
configured = configured_servers(self.hass)
available_servers = [
name
for (name, server_id) in self.available_servers
if server_id not in configured
]
if not available_servers:
return self.async_abort(reason="all_configured")
if len(available_servers) == 1:
config[CONF_SERVER] = available_servers[0]
return await self.async_step_server_validate(config)
return self.async_show_form(
step_id="select_server",
data_schema=vol.Schema(
{vol.Required(CONF_SERVER): vol.In(available_servers)}
),
errors={},
)
async def async_step_integration_discovery(self, discovery_info):
"""Handle GDM discovery."""
machine_identifier = discovery_info["data"]["Resource-Identifier"]
await self.async_set_unique_id(machine_identifier)
self._abort_if_unique_id_configured()
host = f"{discovery_info['from'][0]}:{discovery_info['data']['Port']}"
name = discovery_info["data"]["Name"]
self.context["title_placeholders"] = {
"host": host,
"name": name,
}
return await self.async_step_user()
async def async_step_plex_website_auth(self):
"""Begin external auth flow on Plex website."""
self.hass.http.register_view(PlexAuthorizationCallbackView)
hass_url = get_url(self.hass)
headers = {"Origin": hass_url}
payload = {
"X-Plex-Device-Name": X_PLEX_DEVICE_NAME,
"X-Plex-Version": X_PLEX_VERSION,
"X-Plex-Product": X_PLEX_PRODUCT,
"X-Plex-Device": self.hass.config.location_name,
"X-Plex-Platform": X_PLEX_PLATFORM,
"X-Plex-Model": "Plex OAuth",
}
session = async_get_clientsession(self.hass)
self.plexauth = PlexAuth(payload, session, headers)
await self.plexauth.initiate_auth()
forward_url = f"{hass_url}{AUTH_CALLBACK_PATH}?flow_id={self.flow_id}"
auth_url = self.plexauth.auth_url(forward_url)
return self.async_external_step(step_id="obtain_token", url=auth_url)
async def async_step_obtain_token(self, user_input=None):
"""Obtain token after external auth completed."""
token = await self.plexauth.token(10)
if not token:
return self.async_external_step_done(next_step_id="timed_out")
self.token = token
self.client_id = self.plexauth.client_identifier
return self.async_external_step_done(next_step_id="use_external_token")
async def async_step_timed_out(self, user_input=None):
"""Abort flow when time expires."""
return self.async_abort(reason="token_request_timeout")
async def async_step_use_external_token(self, user_input=None):
"""Continue server validation with external token."""
server_config = {CONF_TOKEN: self.token}
return await self.async_step_server_validate(server_config)
async def async_step_reauth(self, data):
"""Handle a reauthorization flow request."""
self.current_login = dict(data)
return await self.async_step_user()
class PlexOptionsFlowHandler(config_entries.OptionsFlow):
"""Handle Plex options."""
def __init__(self, config_entry):
"""Initialize Plex options flow."""
self.options = copy.deepcopy(dict(config_entry.options))
self.server_id = config_entry.data[CONF_SERVER_IDENTIFIER]
async def async_step_init(self, user_input=None):
"""Manage the Plex options."""
return await self.async_step_plex_mp_settings()
async def async_step_plex_mp_settings(self, user_input=None):
"""Manage the Plex media_player options."""
plex_server = self.hass.data[DOMAIN][SERVERS][self.server_id]
if user_input is not None:
self.options[MP_DOMAIN][CONF_USE_EPISODE_ART] = user_input[
CONF_USE_EPISODE_ART
]
self.options[MP_DOMAIN][CONF_IGNORE_NEW_SHARED_USERS] = user_input[
CONF_IGNORE_NEW_SHARED_USERS
]
self.options[MP_DOMAIN][CONF_IGNORE_PLEX_WEB_CLIENTS] = user_input[
CONF_IGNORE_PLEX_WEB_CLIENTS
]
account_data = {
user: {"enabled": bool(user in user_input[CONF_MONITORED_USERS])}
for user in plex_server.accounts
}
self.options[MP_DOMAIN][CONF_MONITORED_USERS] = account_data
return self.async_create_entry(title="", data=self.options)
available_accounts = {name: name for name in plex_server.accounts}
available_accounts[plex_server.owner] += " [Owner]"
default_accounts = plex_server.accounts
known_accounts = set(plex_server.option_monitored_users)
if known_accounts:
default_accounts = {
user
for user in plex_server.option_monitored_users
if plex_server.option_monitored_users[user]["enabled"]
}
for user in plex_server.accounts:
if user not in known_accounts:
available_accounts[user] += " [New]"
if not plex_server.option_ignore_new_shared_users:
for new_user in plex_server.accounts - known_accounts:
default_accounts.add(new_user)
return self.async_show_form(
step_id="plex_mp_settings",
data_schema=vol.Schema(
{
vol.Required(
CONF_USE_EPISODE_ART,
default=plex_server.option_use_episode_art,
): bool,
vol.Optional(
CONF_MONITORED_USERS, default=default_accounts
): cv.multi_select(available_accounts),
vol.Required(
CONF_IGNORE_NEW_SHARED_USERS,
default=plex_server.option_ignore_new_shared_users,
): bool,
vol.Required(
CONF_IGNORE_PLEX_WEB_CLIENTS,
default=plex_server.option_ignore_plexweb_clients,
): bool,
}
),
)
class PlexAuthorizationCallbackView(HomeAssistantView):
"""Handle callback from external auth."""
url = AUTH_CALLBACK_PATH
name = AUTH_CALLBACK_NAME
requires_auth = False
async def get(self, request):
"""Receive authorization confirmation."""
hass = request.app["hass"]
await hass.config_entries.flow.async_configure(
flow_id=request.query["flow_id"], user_input=None
)
return web_response.Response(
headers={"content-type": "text/html"},
text="<script>window.close()</script>Success! This window can be closed",
)
| apache-2.0 |
gangadharkadam/v6_frappe | frappe/api.py | 27 | 3521 | # Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors
# MIT License. See license.txt
from __future__ import unicode_literals
import json
import frappe
import frappe.handler
import frappe.client
import frappe.desk.reportview
from frappe.utils.response import build_response
from frappe import _
def handle():
"""
Handler for `/api` methods
### Examples:
`/api/method/{methodname}` will call a whitelisted method
`/api/resource/{doctype}` will query a table
examples:
- `?fields=["name", "owner"]`
- `?filters=[["Task", "name", "like", "%005"]]`
- `?limit_start=0`
- `?limit_page_length=20`
`/api/resource/{doctype}/{name}` will point to a resource
`GET` will return doclist
`POST` will insert
`PUT` will update
`DELETE` will delete
`/api/resource/{doctype}/{name}?run_method={method}` will run a whitelisted controller method
"""
parts = frappe.request.path[1:].split("/",3)
call = doctype = name = None
if len(parts) > 1:
call = parts[1]
if len(parts) > 2:
doctype = parts[2]
if len(parts) > 3:
name = parts[3]
if call=="method":
frappe.local.form_dict.cmd = doctype
return frappe.handler.handle()
elif call=="resource":
if "run_method" in frappe.local.form_dict:
method = frappe.local.form_dict.pop("run_method")
doc = frappe.get_doc(doctype, name)
doc.is_whitelisted(method)
if frappe.local.request.method=="GET":
if not doc.has_permission("read"):
frappe.throw(_("Not permitted"), frappe.PermissionError)
frappe.local.response.update({"data": doc.run_method(method, **frappe.local.form_dict)})
if frappe.local.request.method=="POST":
if not doc.has_permission("write"):
frappe.throw(_("Not permitted"), frappe.PermissionError)
frappe.local.response.update({"data": doc.run_method(method, **frappe.local.form_dict)})
frappe.db.commit()
else:
if name:
if frappe.local.request.method=="GET":
doc = frappe.get_doc(doctype, name)
if not doc.has_permission("read"):
raise frappe.PermissionError
frappe.local.response.update({"data": doc})
if frappe.local.request.method=="PUT":
data = json.loads(frappe.local.form_dict.data)
doc = frappe.get_doc(doctype, name)
if "flags" in data:
del data["flags"]
# Not checking permissions here because it's checked in doc.save
doc.update(data)
frappe.local.response.update({
"data": doc.save().as_dict()
})
frappe.db.commit()
if frappe.local.request.method=="DELETE":
# Not checking permissions here because it's checked in delete_doc
frappe.delete_doc(doctype, name)
frappe.local.response.http_status_code = 202
frappe.local.response.message = "ok"
frappe.db.commit()
elif doctype:
if frappe.local.request.method=="GET":
if frappe.local.form_dict.get('fields'):
frappe.local.form_dict['fields'] = json.loads(frappe.local.form_dict['fields'])
frappe.local.form_dict.setdefault('limit_page_length', 20)
frappe.local.response.update({
"data": frappe.call(frappe.client.get_list,
doctype, **frappe.local.form_dict)})
if frappe.local.request.method=="POST":
data = json.loads(frappe.local.form_dict.data)
data.update({
"doctype": doctype
})
frappe.local.response.update({
"data": frappe.get_doc(data).insert().as_dict()
})
frappe.db.commit()
else:
raise frappe.DoesNotExistError
else:
raise frappe.DoesNotExistError
return build_response("json")
| mit |
vtapia/sssd | src/tests/python-test.py | 4 | 16953 | #!/usr/bin/env python
# coding=utf-8
# Authors:
# Jakub Hrozek <[email protected]>
#
# Copyright (C) 2009 Red Hat
# see file 'COPYING' for use and warranty information
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import os
import tempfile
import shutil
import unittest
import subprocess
import errno
# module under test
import pysss
class LocalTest(unittest.TestCase):
local_path = "/var/lib/sss/db/sssd.ldb"
def setUp(self):
self.local = pysss.local()
def _run_and_check(self, runme):
(status, output) = subprocess.call(runme, shell=True)
self.failUnlessEqual(status, 0, output)
def _get_object_info(self, name, subtree, domain):
search_dn = "dn=name=%s,cn=%s,cn=%s,cn=sysdb" % (name, subtree, domain)
try:
cmd = "ldbsearch -H %s %s" % (self.local_path, search_dn)
output = subprocess.check_call(cmd, shell=True)
output = output.decode('utf-8')
except subprocess.CalledProcessError:
return {}
kw = {}
for key, value in \
[l.split(':') for l in output.split('\n') if ":" in l]:
kw[key] = value.strip()
del kw['asq']
return kw
def get_user_info(self, name, domain="LOCAL"):
return self._get_object_info(name, "users", domain)
def get_group_info(self, name, domain="LOCAL"):
return self._get_object_info(name, "groups", domain)
def _validate_object(self, kw, name, **kwargs):
if kw == {}:
self.fail("Could not get %s info" % name)
for key in kwargs.keys():
self.assert_(str(kwargs[key]) == str(kw[key]),
"%s %s != %s %s" % (key, kwargs[key], key, kw[key]))
def validate_user(self, username, **kwargs):
return self._validate_object(self.get_user_info(username), "user",
**kwargs)
def validate_group(self, groupname, **kwargs):
return self._validate_object(self.get_group_info(groupname), "group",
**kwargs)
def _validate_no_object(self, kw, name):
if kw != {}:
self.fail("Got %s info" % name)
def validate_no_user(self, username):
return self._validate_no_object(self.get_user_info(username), "user")
def validate_no_group(self, groupname):
return self._validate_no_object(self.get_group_info(groupname),
"group")
def _get_object_membership(self, name, subtree, domain):
search_dn = "dn=name=%s,cn=%s,cn=%s,cn=sysdb" % (name, subtree, domain)
try:
cmd = "ldbsearch -H %s %s" % (self.local_path, search_dn)
output = subprocess.check_call(cmd, shell=True)
output = output.decode('utf-8')
except subprocess.CalledProcessError:
return []
members = [value.strip() for key, value in
[l.split(':') for l in output.split('\n') if ":" in l]
if key == "memberof"]
return members
def _assertMembership(self, name, group_list, subtree, domain):
members = self._get_object_membership(name, subtree, domain)
for group in group_list:
group_dn = "name=%s,cn=groups,cn=%s,cn=sysdb" % (group, domain)
if group_dn in members:
members.remove(group_dn)
else:
self.fail("Cannot find required group %s" % group_dn)
if len(members) > 0:
self.fail("More groups than selected")
def assertUserMembership(self, name, group_list, domain="LOCAL"):
return self._assertMembership(name, group_list, "users", domain)
def assertGroupMembership(self, name, group_list, domain="LOCAL"):
return self._assertMembership(name, group_list, "groups", domain)
def get_user_membership(self, name, domain="LOCAL"):
return self._get_object_membership(name, "users", domain)
def get_group_membership(self, name, domain="LOCAL"):
return self._get_object_membership(name, "groups", domain)
def add_group(self, groupname):
self._run_and_check("sss_groupadd %s" % (groupname))
def remove_group(self, groupname):
self._run_and_check("sss_groupdel %s" % (groupname))
def add_user(self, username):
self._run_and_check("sss_useradd %s" % (username))
def add_user_not_home(self, username):
self._run_and_check("sss_useradd -M %s" % (username))
def remove_user(self, username):
self._run_and_check("sss_userdel %s" % (username))
def remove_user_not_home(self, username):
self._run_and_check("sss_userdel -R %s" % (username))
class SanityTest(unittest.TestCase):
def testInstantiate(self):
"Test that the local backed binding can be instantiated"
local = pysss.local()
self.assert_(local.__class__, "<type 'sss.local'>")
class UseraddTest(LocalTest):
def tearDown(self):
if self.username:
self.remove_user(self.username)
def testUseradd(self):
"Test adding a local user"
self.username = "testUseradd"
self.local.useradd(self.username)
self.validate_user(self.username)
# check home directory was created with default name
self.assertEquals(os.access("/home/%s" % self.username, os.F_OK), True)
def testUseraddWithParams(self):
"Test adding a local user with modified parameters"
self.username = "testUseraddWithParams"
self.local.useradd(self.username,
gecos="foo bar",
homedir="/home/foobar",
shell="/bin/zsh")
self.validate_user(self.username,
gecos="foo bar",
homeDirectory="/home/foobar",
loginShell="/bin/zsh")
# check home directory was created with nondefault name
self.assertEquals(os.access("/home/foobar", os.F_OK), True)
def testUseraddNoHomedir(self):
"Test adding a local user without creating his home dir"
self.username = "testUseraddNoHomedir"
self.local.useradd(self.username, create_home=False)
self.validate_user(self.username)
# check home directory was not created
username_path = "/home/%s" % self.username
self.assertEquals(os.access(username_path, os.F_OK), False)
self.local.userdel(self.username, remove=False)
self.username = None # fool tearDown into not removing the user
def testUseraddAlternateSkeldir(self):
"Test adding a local user and init his homedir from a custom location"
self.username = "testUseraddAlternateSkeldir"
skeldir = tempfile.mkdtemp()
fd, path = tempfile.mkstemp(dir=skeldir)
fdo = os.fdopen(fd)
fdo.flush()
fdo.close
self.assertEquals(os.access(path, os.F_OK), True)
filename = os.path.basename(path)
try:
self.local.useradd(self.username, skel=skeldir)
self.validate_user(self.username)
path = "/home/%s/%s" % (self.username, filename)
self.assertEquals(os.access(path, os.F_OK), True)
finally:
shutil.rmtree(skeldir)
def testUseraddToGroups(self):
"Test adding a local user with group membership"
self.username = "testUseraddToGroups"
self.add_group("gr1")
self.add_group("gr2")
try:
self.local.useradd(self.username,
groups=["gr1", "gr2"])
self.assertUserMembership(self.username,
["gr1", "gr2"])
finally:
self.remove_group("gr1")
self.remove_group("gr2")
def testUseraddWithUID(self):
"Test adding a local user with a custom UID"
self.username = "testUseraddWithUID"
self.local.useradd(self.username,
uid=1024)
self.validate_user(self.username,
uidNumber=1024)
class UseraddTestNegative(LocalTest):
def testUseraddNoParams(self):
"Test that local.useradd() requires the username parameter"
self.assertRaises(TypeError, self.local.useradd)
def testUseraddUserAlreadyExists(self):
"Test adding a local with a duplicate name"
self.username = "testUseraddUserAlreadyExists"
self.local.useradd(self.username)
try:
self.local.useradd(self.username)
except IOError as e:
self.assertEquals(e.errno, errno.EEXIST)
else:
self.fail("Was expecting exception")
finally:
self.remove_user(self.username)
def testUseraddUIDAlreadyExists(self):
"Test adding a local with a duplicate user ID"
self.username = "testUseraddUIDAlreadyExists1"
self.local.useradd(self.username, uid=1025)
try:
self.local.useradd("testUseraddUIDAlreadyExists2", uid=1025)
except IOError as e:
self.assertEquals(e.errno, errno.EEXIST)
else:
self.fail("Was expecting exception")
finally:
self.remove_user(self.username)
class UserdelTest(LocalTest):
def testUserdel(self):
self.add_user("testUserdel")
self.assertEquals(os.access("/home/testUserdel", os.F_OK), True)
self.validate_user("testUserdel")
self.local.userdel("testUserdel")
self.validate_no_user("testUserdel")
self.assertEquals(os.access("/home/testUserdel", os.F_OK), False)
def testUserdelNotHomedir(self):
self.add_user("testUserdel")
self.assertEquals(os.access("/home/testUserdel", os.F_OK), True)
self.validate_user("testUserdel")
self.local.userdel("testUserdel", remove=False)
self.validate_no_user("testUserdel")
self.assertEquals(os.access("/home/testUserdel", os.F_OK), True)
shutil.rmtree("/home/testUserdel")
os.remove("/var/mail/testUserdel")
def testUserdelNegative(self):
self.validate_no_user("testUserdelNegative")
try:
self.local.userdel("testUserdelNegative")
except IOError as e:
self.assertEquals(e.errno, errno.ENOENT)
else:
fail("Was expecting exception")
class UsermodTest(LocalTest):
def setUp(self):
self.local = pysss.local()
self.username = "UsermodTest"
self.add_user_not_home(self.username)
def tearDown(self):
self.remove_user_not_home(self.username)
def testUsermod(self):
"Test modifying user attributes"
self.local.usermod(self.username,
gecos="foo bar",
homedir="/home/foobar",
shell="/bin/zsh")
self.validate_user(self.username,
gecos="foo bar",
homeDirectory="/home/foobar",
loginShell="/bin/zsh")
def testUsermodUID(self):
"Test modifying UID"
self.local.usermod(self.username,
uid=1024)
self.validate_user(self.username,
uidNumber=1024)
def testUsermodGroupMembership(self):
"Test adding to and removing from groups"
self.add_group("gr1")
self.add_group("gr2")
try:
self.local.usermod(self.username,
addgroups=["gr1", "gr2"])
self.assertUserMembership(self.username,
["gr1", "gr2"])
self.local.usermod(self.username,
rmgroups=["gr2"])
self.assertUserMembership(self.username,
["gr1"])
self.local.usermod(self.username,
rmgroups=["gr1"])
self.assertUserMembership(self.username,
[])
finally:
self.remove_group("gr1")
self.remove_group("gr2")
def testUsermodLockUnlock(self):
"Test locking and unlocking user"
self.local.usermod(self.username,
lock=self.local.lock)
self.validate_user(self.username,
disabled="true")
self.local.usermod(self.username,
lock=self.local.unlock)
self.validate_user(self.username,
disabled="false")
class GroupaddTest(LocalTest):
def tearDown(self):
if self.groupname:
self.remove_group(self.groupname)
def testGroupadd(self):
"Test adding a local group"
self.groupname = "testGroupadd"
self.local.groupadd(self.groupname)
self.validate_group(self.groupname)
def testGroupaddWithGID(self):
"Test adding a local group with a custom GID"
self.groupname = "testUseraddWithGID"
self.local.groupadd(self.groupname,
gid=1024)
self.validate_group(self.groupname,
gidNumber=1024)
class GroupaddTestNegative(LocalTest):
def testGroupaddNoParams(self):
"Test that local.groupadd() requires the groupname parameter"
self.assertRaises(TypeError, self.local.groupadd)
def testGroupaddUserAlreadyExists(self):
"Test adding a local with a duplicate name"
self.groupname = "testGroupaddUserAlreadyExists"
self.local.groupadd(self.groupname)
try:
self.local.groupadd(self.groupname)
except IOError as e:
self.assertEquals(e.errno, errno.EEXIST)
else:
self.fail("Was expecting exception")
finally:
self.remove_group(self.groupname)
def testGroupaddGIDAlreadyExists(self):
"Test adding a local with a duplicate group ID"
self.groupname = "testGroupaddGIDAlreadyExists1"
self.local.groupadd(self.groupname, gid=1025)
try:
self.local.groupadd("testGroupaddGIDAlreadyExists2", gid=1025)
except IOError as e:
self.assertEquals(e.errno, errno.EEXIST)
else:
self.fail("Was expecting exception")
finally:
self.remove_group(self.groupname)
class GroupdelTest(LocalTest):
def testGroupdel(self):
self.add_group("testGroupdel")
self.validate_group("testGroupdel")
self.local.groupdel("testGroupdel")
self.validate_no_group("testGroupdel")
def testGroupdelNegative(self):
self.validate_no_group("testGroupdelNegative")
try:
self.local.groupdel("testGroupdelNegative")
except IOError as e:
self.assertEquals(e.errno, errno.ENOENT)
else:
fail("Was expecting exception")
class GroupmodTest(LocalTest):
def setUp(self):
self.local = pysss.local()
self.groupname = "GroupmodTest"
self.add_group(self.groupname)
def tearDown(self):
self.remove_group(self.groupname)
def testGroupmodGID(self):
"Test modifying UID"
self.local.groupmod(self.groupname,
gid=1024)
self.validate_group(self.groupname,
gidNumber=1024)
def testGroupmodGroupMembership(self):
"Test adding to groups"
self.add_group("gr1")
self.add_group("gr2")
try:
self.local.groupmod(self.groupname,
addgroups=["gr1", "gr2"])
self.assertGroupMembership(self.groupname,
["gr1", "gr2"])
self.local.groupmod(self.groupname,
rmgroups=["gr2"])
self.assertGroupMembership(self.groupname,
["gr1"])
self.local.groupmod(self.groupname,
rmgroups=["gr1"])
self.assertGroupMembership(self.groupname,
[])
finally:
self.remove_group("gr1")
self.remove_group("gr2")
# -------------- run the test suite -------------- #
if __name__ == "__main__":
unittest.main()
| gpl-3.0 |
ericbaze/continuum_code_2012 | pydata/moin/pythonenv/local/lib/python2.7/site-packages/pip-1.1-py2.7.egg/pip/commands/search.py | 60 | 4523 | import sys
import textwrap
import pkg_resources
import pip.download
from pip.basecommand import Command, SUCCESS
from pip.util import get_terminal_size
from pip.log import logger
from pip.backwardcompat import xmlrpclib, reduce, cmp
from pip.exceptions import CommandError
from pip.status_codes import NO_MATCHES_FOUND
from distutils.version import StrictVersion, LooseVersion
class SearchCommand(Command):
name = 'search'
usage = '%prog QUERY'
summary = 'Search PyPI'
def __init__(self):
super(SearchCommand, self).__init__()
self.parser.add_option(
'--index',
dest='index',
metavar='URL',
default='http://pypi.python.org/pypi',
help='Base URL of Python Package Index (default %default)')
def run(self, options, args):
if not args:
raise CommandError('Missing required argument (search query).')
query = args
index_url = options.index
pypi_hits = self.search(query, index_url)
hits = transform_hits(pypi_hits)
terminal_width = None
if sys.stdout.isatty():
terminal_width = get_terminal_size()[0]
print_results(hits, terminal_width=terminal_width)
if pypi_hits:
return SUCCESS
return NO_MATCHES_FOUND
def search(self, query, index_url):
pypi = xmlrpclib.ServerProxy(index_url, pip.download.xmlrpclib_transport)
hits = pypi.search({'name': query, 'summary': query}, 'or')
return hits
def transform_hits(hits):
"""
The list from pypi is really a list of versions. We want a list of
packages with the list of versions stored inline. This converts the
list from pypi into one we can use.
"""
packages = {}
for hit in hits:
name = hit['name']
summary = hit['summary']
version = hit['version']
score = hit['_pypi_ordering']
if name not in packages.keys():
packages[name] = {'name': name, 'summary': summary, 'versions': [version], 'score': score}
else:
packages[name]['versions'].append(version)
# if this is the highest version, replace summary and score
if version == highest_version(packages[name]['versions']):
packages[name]['summary'] = summary
packages[name]['score'] = score
# each record has a unique name now, so we will convert the dict into a list sorted by score
package_list = sorted(packages.values(), key=lambda x: x['score'], reverse=True)
return package_list
def print_results(hits, name_column_width=25, terminal_width=None):
installed_packages = [p.project_name for p in pkg_resources.working_set]
for hit in hits:
name = hit['name']
summary = hit['summary'] or ''
if terminal_width is not None:
# wrap and indent summary to fit terminal
summary = textwrap.wrap(summary, terminal_width - name_column_width - 5)
summary = ('\n' + ' ' * (name_column_width + 3)).join(summary)
line = '%s - %s' % (name.ljust(name_column_width), summary)
try:
logger.notify(line)
if name in installed_packages:
dist = pkg_resources.get_distribution(name)
logger.indent += 2
try:
latest = highest_version(hit['versions'])
if dist.version == latest:
logger.notify('INSTALLED: %s (latest)' % dist.version)
else:
logger.notify('INSTALLED: %s' % dist.version)
logger.notify('LATEST: %s' % latest)
finally:
logger.indent -= 2
except UnicodeEncodeError:
pass
def compare_versions(version1, version2):
try:
return cmp(StrictVersion(version1), StrictVersion(version2))
# in case of abnormal version number, fall back to LooseVersion
except ValueError:
pass
try:
return cmp(LooseVersion(version1), LooseVersion(version2))
except TypeError:
# certain LooseVersion comparions raise due to unorderable types,
# fallback to string comparison
return cmp([str(v) for v in LooseVersion(version1).version],
[str(v) for v in LooseVersion(version2).version])
def highest_version(versions):
return reduce((lambda v1, v2: compare_versions(v1, v2) == 1 and v1 or v2), versions)
SearchCommand()
| gpl-2.0 |
dagwieers/ansible | lib/ansible/modules/network/cloudengine/ce_mlag_interface.py | 31 | 35442 | #!/usr/bin/python
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: ce_mlag_interface
version_added: "2.4"
short_description: Manages MLAG interfaces on HUAWEI CloudEngine switches.
description:
- Manages MLAG interface attributes on HUAWEI CloudEngine switches.
author:
- Li Yanfeng (@QijunPan)
options:
eth_trunk_id:
description:
- Name of the local M-LAG interface. The value is ranging from 0 to 511.
dfs_group_id:
description:
- ID of a DFS group.The value is 1.
default: present
mlag_id:
description:
- ID of the M-LAG. The value is an integer that ranges from 1 to 2048.
mlag_system_id:
description:
- M-LAG global LACP system MAC address. The value is a string of 0 to 255 characters. The default value
is the MAC address of the Ethernet port of MPU.
mlag_priority_id:
description:
- M-LAG global LACP system priority. The value is an integer ranging from 0 to 65535.
The default value is 32768.
interface:
description:
- Name of the interface that enters the Error-Down state when the peer-link fails.
The value is a string of 1 to 63 characters.
mlag_error_down:
description:
- Configure the interface on the slave device to enter the Error-Down state.
choices: ['enable','disable']
state:
description:
- Specify desired state of the resource.
default: present
choices: ['present','absent']
'''
EXAMPLES = '''
- name: mlag interface module test
hosts: cloudengine
connection: local
gather_facts: no
vars:
cli:
host: "{{ inventory_hostname }}"
port: "{{ ansible_ssh_port }}"
username: "{{ username }}"
password: "{{ password }}"
transport: cli
tasks:
- name: Set interface mlag error down
ce_mlag_interface:
interface: 10GE2/0/1
mlag_error_down: enable
provider: "{{ cli }}"
- name: Create mlag
ce_mlag_interface:
eth_trunk_id: 1
dfs_group_id: 1
mlag_id: 4
provider: "{{ cli }}"
- name: Set mlag global attribute
ce_mlag_interface:
mlag_system_id: 0020-1409-0407
mlag_priority_id: 5
provider: "{{ cli }}"
- name: Set mlag interface attribute
ce_mlag_interface:
eth_trunk_id: 1
mlag_system_id: 0020-1409-0400
mlag_priority_id: 3
provider: "{{ cli }}"
'''
RETURN = '''
changed:
description: check to see if a change was made on the device
returned: always
type: bool
sample: true
proposed:
description: k/v pairs of parameters passed into module
returned: always
type: dict
sample: { "interface": "eth-trunk1",
"mlag_error_down": "disable",
"state": "present"
}
existing:
description: k/v pairs of existing aaa server
returned: always
type: dict
sample: { "mlagErrorDownInfos": [
{
"dfsgroupId": "1",
"portName": "Eth-Trunk1"
}
]
}
end_state:
description: k/v pairs of aaa params after module execution
returned: always
type: dict
sample: {}
updates:
description: command sent to the device
returned: always
type: list
sample: { "interface eth-trunk1",
"undo m-lag unpaired-port suspend"}
'''
import re
from xml.etree import ElementTree
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.network.cloudengine.ce import load_config
from ansible.module_utils.network.cloudengine.ce import get_nc_config, set_nc_config, ce_argument_spec
CE_NC_GET_MLAG_INFO = """
<filter type="subtree">
<mlag xmlns="http://www.huawei.com/netconf/vrp" content-version="1.0" format-version="1.0">
<mlagInstances>
<mlagInstance>
</mlagInstance>
</mlagInstances>
</mlag>
</filter>
"""
CE_NC_CREATE_MLAG_INFO = """
<config>
<mlag xmlns="http://www.huawei.com/netconf/vrp" content-version="1.0" format-version="1.0">
<mlagInstances>
<mlagInstance operation="create">
<dfsgroupId>%s</dfsgroupId>
<mlagId>%s</mlagId>
<localMlagPort>%s</localMlagPort>
</mlagInstance>
</mlagInstances>
</mlag>
</config>
"""
CE_NC_DELETE_MLAG_INFO = """
<config>
<mlag xmlns="http://www.huawei.com/netconf/vrp" content-version="1.0" format-version="1.0">
<mlagInstances>
<mlagInstance operation="delete">
<dfsgroupId>%s</dfsgroupId>
<mlagId>%s</mlagId>
<localMlagPort>%s</localMlagPort>
</mlagInstance>
</mlagInstances>
</mlag>
</config>
"""
CE_NC_GET_LACP_MLAG_INFO = """
<filter type="subtree">
<ifmtrunk xmlns="http://www.huawei.com/netconf/vrp" content-version="1.0" format-version="1.0">
<TrunkIfs>
<TrunkIf>
<ifName>%s</ifName>
<lacpMlagIf>
<lacpMlagSysId></lacpMlagSysId>
<lacpMlagPriority></lacpMlagPriority>
</lacpMlagIf>
</TrunkIf>
</TrunkIfs>
</ifmtrunk>
</filter>
"""
CE_NC_SET_LACP_MLAG_INFO_HEAD = """
<config>
<ifmtrunk xmlns="http://www.huawei.com/netconf/vrp" content-version="1.0" format-version="1.0">
<TrunkIfs>
<TrunkIf>
<ifName>%s</ifName>
<lacpMlagIf operation="merge">
"""
CE_NC_SET_LACP_MLAG_INFO_TAIL = """
</lacpMlagIf>
</TrunkIf>
</TrunkIfs>
</ifmtrunk>
</config>
"""
CE_NC_GET_GLOBAL_LACP_MLAG_INFO = """
<filter type="subtree">
<ifmtrunk xmlns="http://www.huawei.com/netconf/vrp" content-version="1.0" format-version="1.0">
<lacpSysInfo>
<lacpMlagGlobal>
<lacpMlagSysId></lacpMlagSysId>
<lacpMlagPriority></lacpMlagPriority>
</lacpMlagGlobal>
</lacpSysInfo>
</ifmtrunk>
</filter>
"""
CE_NC_SET_GLOBAL_LACP_MLAG_INFO_HEAD = """
<config>
<ifmtrunk xmlns="http://www.huawei.com/netconf/vrp" content-version="1.0" format-version="1.0">
<lacpSysInfo>
<lacpMlagGlobal operation="merge">
"""
CE_NC_SET_GLOBAL_LACP_MLAG_INFO_TAIL = """
</lacpMlagGlobal>
</lacpSysInfo>
</ifmtrunk>
</config>
"""
CE_NC_GET_MLAG_ERROR_DOWN_INFO = """
<filter type="subtree">
<mlag xmlns="http://www.huawei.com/netconf/vrp" content-version="1.0" format-version="1.0">
<errordowns>
<errordown>
<dfsgroupId></dfsgroupId>
<portName></portName>
<portState></portState>
</errordown>
</errordowns>
</mlag>
</filter>
"""
CE_NC_CREATE_MLAG_ERROR_DOWN_INFO = """
<config>
<mlag xmlns="http://www.huawei.com/netconf/vrp" content-version="1.0" format-version="1.0">
<errordowns>
<errordown operation="create">
<dfsgroupId>1</dfsgroupId>
<portName>%s</portName>
</errordown>
</errordowns>
</mlag>
</config>
"""
CE_NC_DELETE_MLAG_ERROR_DOWN_INFO = """
<config>
<mlag xmlns="http://www.huawei.com/netconf/vrp" content-version="1.0" format-version="1.0">
<errordowns>
<errordown operation="delete">
<dfsgroupId>1</dfsgroupId>
<portName>%s</portName>
</errordown>
</errordowns>
</mlag>
</config>
"""
def get_interface_type(interface):
"""Gets the type of interface, such as 10GE, ETH-TRUNK, VLANIF..."""
if interface is None:
return None
iftype = None
if interface.upper().startswith('GE'):
iftype = 'ge'
elif interface.upper().startswith('10GE'):
iftype = '10ge'
elif interface.upper().startswith('25GE'):
iftype = '25ge'
elif interface.upper().startswith('40GE'):
iftype = '40ge'
elif interface.upper().startswith('100GE'):
iftype = '100ge'
elif interface.upper().startswith('ETH-TRUNK'):
iftype = 'eth-trunk'
elif interface.upper().startswith('NULL'):
iftype = 'null'
else:
return None
return iftype.lower()
class MlagInterface(object):
"""
Manages Manages MLAG interface information.
"""
def __init__(self, argument_spec):
self.spec = argument_spec
self.module = None
self.init_module()
# module input info
self.eth_trunk_id = self.module.params['eth_trunk_id']
self.dfs_group_id = self.module.params['dfs_group_id']
self.mlag_id = self.module.params['mlag_id']
self.mlag_system_id = self.module.params['mlag_system_id']
self.mlag_priority_id = self.module.params['mlag_priority_id']
self.interface = self.module.params['interface']
self.mlag_error_down = self.module.params['mlag_error_down']
self.state = self.module.params['state']
# state
self.changed = False
self.updates_cmd = list()
self.results = dict()
self.existing = dict()
self.proposed = dict()
self.end_state = dict()
# mlag info
self.commands = list()
self.mlag_info = None
self.mlag_global_info = None
self.mlag_error_down_info = None
self.mlag_trunk_attribute_info = None
def init_module(self):
""" init module """
self.module = AnsibleModule(
argument_spec=self.spec, supports_check_mode=True)
def check_response(self, xml_str, xml_name):
"""Check if response message is already succeed."""
if "<ok/>" not in xml_str:
self.module.fail_json(msg='Error: %s failed.' % xml_name)
def cli_add_command(self, command, undo=False):
"""add command to self.update_cmd and self.commands"""
if undo and command.lower() not in ["quit", "return"]:
cmd = "undo " + command
else:
cmd = command
self.commands.append(cmd) # set to device
if command.lower() not in ["quit", "return"]:
self.updates_cmd.append(cmd) # show updates result
def cli_load_config(self, commands):
"""load config by cli"""
if not self.module.check_mode:
load_config(self.module, commands)
def get_mlag_info(self):
""" get mlag info."""
mlag_info = dict()
conf_str = CE_NC_GET_MLAG_INFO
xml_str = get_nc_config(self.module, conf_str)
if "<data/>" in xml_str:
return mlag_info
else:
xml_str = xml_str.replace('\r', '').replace('\n', '').\
replace('xmlns="urn:ietf:params:xml:ns:netconf:base:1.0"', "").\
replace('xmlns="http://www.huawei.com/netconf/vrp"', "")
mlag_info["mlagInfos"] = list()
root = ElementTree.fromstring(xml_str)
dfs_mlag_infos = root.findall(
"data/mlag/mlagInstances/mlagInstance")
if dfs_mlag_infos:
for dfs_mlag_info in dfs_mlag_infos:
mlag_dict = dict()
for ele in dfs_mlag_info:
if ele.tag in ["dfsgroupId", "mlagId", "localMlagPort"]:
mlag_dict[ele.tag] = ele.text
mlag_info["mlagInfos"].append(mlag_dict)
return mlag_info
def get_mlag_global_info(self):
""" get mlag global info."""
mlag_global_info = dict()
conf_str = CE_NC_GET_GLOBAL_LACP_MLAG_INFO
xml_str = get_nc_config(self.module, conf_str)
if "<data/>" in xml_str:
return mlag_global_info
else:
xml_str = xml_str.replace('\r', '').replace('\n', '').\
replace('xmlns="urn:ietf:params:xml:ns:netconf:base:1.0"', "").\
replace('xmlns="http://www.huawei.com/netconf/vrp"', "")
root = ElementTree.fromstring(xml_str)
global_info = root.findall(
"data/ifmtrunk/lacpSysInfo/lacpMlagGlobal")
if global_info:
for tmp in global_info:
for site in tmp:
if site.tag in ["lacpMlagSysId", "lacpMlagPriority"]:
mlag_global_info[site.tag] = site.text
return mlag_global_info
def get_mlag_trunk_attribute_info(self):
""" get mlag global info."""
mlag_trunk_attribute_info = dict()
eth_trunk = "Eth-Trunk"
eth_trunk += self.eth_trunk_id
conf_str = CE_NC_GET_LACP_MLAG_INFO % eth_trunk
xml_str = get_nc_config(self.module, conf_str)
if "<data/>" in xml_str:
return mlag_trunk_attribute_info
else:
xml_str = xml_str.replace('\r', '').replace('\n', '').\
replace('xmlns="urn:ietf:params:xml:ns:netconf:base:1.0"', "").\
replace('xmlns="http://www.huawei.com/netconf/vrp"', "")
root = ElementTree.fromstring(xml_str)
global_info = root.findall(
"data/ifmtrunk/TrunkIfs/TrunkIf/lacpMlagIf")
if global_info:
for tmp in global_info:
for site in tmp:
if site.tag in ["lacpMlagSysId", "lacpMlagPriority"]:
mlag_trunk_attribute_info[site.tag] = site.text
return mlag_trunk_attribute_info
def get_mlag_error_down_info(self):
""" get error down info."""
mlag_error_down_info = dict()
conf_str = CE_NC_GET_MLAG_ERROR_DOWN_INFO
xml_str = get_nc_config(self.module, conf_str)
if "<data/>" in xml_str:
return mlag_error_down_info
else:
xml_str = xml_str.replace('\r', '').replace('\n', '').\
replace('xmlns="urn:ietf:params:xml:ns:netconf:base:1.0"', "").\
replace('xmlns="http://www.huawei.com/netconf/vrp"', "")
mlag_error_down_info["mlagErrorDownInfos"] = list()
root = ElementTree.fromstring(xml_str)
mlag_error_infos = root.findall(
"data/mlag/errordowns/errordown")
if mlag_error_infos:
for mlag_error_info in mlag_error_infos:
mlag_error_dict = dict()
for ele in mlag_error_info:
if ele.tag in ["dfsgroupId", "portName"]:
mlag_error_dict[ele.tag] = ele.text
mlag_error_down_info[
"mlagErrorDownInfos"].append(mlag_error_dict)
return mlag_error_down_info
def check_macaddr(self):
"""check mac-address whether valid"""
valid_char = '0123456789abcdef-'
mac = self.mlag_system_id
if len(mac) > 16:
return False
mac_list = re.findall(r'([0-9a-fA-F]+)', mac)
if len(mac_list) != 3:
return False
if mac.count('-') != 2:
return False
for _, value in enumerate(mac, start=0):
if value.lower() not in valid_char:
return False
return True
def check_params(self):
"""Check all input params"""
# eth_trunk_id check
if self.eth_trunk_id:
if not self.eth_trunk_id.isdigit():
self.module.fail_json(
msg='Error: The value of eth_trunk_id is an integer.')
if int(self.eth_trunk_id) < 0 or int(self.eth_trunk_id) > 511:
self.module.fail_json(
msg='Error: The value of eth_trunk_id is not in the range from 0 to 511.')
# dfs_group_id check
if self.dfs_group_id:
if self.dfs_group_id != "1":
self.module.fail_json(
msg='Error: The value of dfs_group_id must be 1.')
# mlag_id check
if self.mlag_id:
if not self.mlag_id.isdigit():
self.module.fail_json(
msg='Error: The value of mlag_id is an integer.')
if int(self.mlag_id) < 1 or int(self.mlag_id) > 2048:
self.module.fail_json(
msg='Error: The value of mlag_id is not in the range from 1 to 2048.')
# mlag_system_id check
if self.mlag_system_id:
if not self.check_macaddr():
self.module.fail_json(
msg="Error: mlag_system_id has invalid value %s." % self.mlag_system_id)
# mlag_priority_id check
if self.mlag_priority_id:
if not self.mlag_priority_id.isdigit():
self.module.fail_json(
msg='Error: The value of mlag_priority_id is an integer.')
if int(self.mlag_priority_id) < 0 or int(self.mlag_priority_id) > 254:
self.module.fail_json(
msg='Error: The value of mlag_priority_id is not in the range from 0 to 254.')
# interface check
if self.interface:
intf_type = get_interface_type(self.interface)
if not intf_type:
self.module.fail_json(
msg='Error: Interface name of %s '
'is error.' % self.interface)
def is_mlag_info_change(self):
"""whether mlag info change"""
if not self.mlag_info:
return True
eth_trunk = "Eth-Trunk"
eth_trunk += self.eth_trunk_id
for info in self.mlag_info["mlagInfos"]:
if info["mlagId"] == self.mlag_id and info["localMlagPort"] == eth_trunk:
return False
return True
def is_mlag_info_exist(self):
"""whether mlag info exist"""
if not self.mlag_info:
return False
eth_trunk = "Eth-Trunk"
eth_trunk += self.eth_trunk_id
for info in self.mlag_info["mlagInfos"]:
if info["mlagId"] == self.mlag_id and info["localMlagPort"] == eth_trunk:
return True
return False
def is_mlag_error_down_info_change(self):
"""whether mlag error down info change"""
if not self.mlag_error_down_info:
return True
for info in self.mlag_error_down_info["mlagErrorDownInfos"]:
if info["portName"].upper() == self.interface.upper():
return False
return True
def is_mlag_error_down_info_exist(self):
"""whether mlag error down info exist"""
if not self.mlag_error_down_info:
return False
for info in self.mlag_error_down_info["mlagErrorDownInfos"]:
if info["portName"].upper() == self.interface.upper():
return True
return False
def is_mlag_interface_info_change(self):
"""whether mlag interface attribute info change"""
if not self.mlag_trunk_attribute_info:
return True
if self.mlag_system_id:
if self.mlag_trunk_attribute_info["lacpMlagSysId"] != self.mlag_system_id:
return True
if self.mlag_priority_id:
if self.mlag_trunk_attribute_info["lacpMlagPriority"] != self.mlag_priority_id:
return True
return False
def is_mlag_interface_info_exist(self):
"""whether mlag interface attribute info exist"""
if not self.mlag_trunk_attribute_info:
return False
if self.mlag_system_id:
if self.mlag_priority_id:
if self.mlag_trunk_attribute_info["lacpMlagSysId"] == self.mlag_system_id \
and self.mlag_trunk_attribute_info["lacpMlagPriority"] == self.mlag_priority_id:
return True
else:
if self.mlag_trunk_attribute_info["lacpMlagSysId"] == self.mlag_system_id:
return True
if self.mlag_priority_id:
if self.mlag_system_id:
if self.mlag_trunk_attribute_info["lacpMlagSysId"] == self.mlag_system_id \
and self.mlag_trunk_attribute_info["lacpMlagPriority"] == self.mlag_priority_id:
return True
else:
if self.mlag_trunk_attribute_info["lacpMlagPriority"] == self.mlag_priority_id:
return True
return False
def is_mlag_global_info_change(self):
"""whether mlag global attribute info change"""
if not self.mlag_global_info:
return True
if self.mlag_system_id:
if self.mlag_global_info["lacpMlagSysId"] != self.mlag_system_id:
return True
if self.mlag_priority_id:
if self.mlag_global_info["lacpMlagPriority"] != self.mlag_priority_id:
return True
return False
def is_mlag_global_info_exist(self):
"""whether mlag global attribute info exist"""
if not self.mlag_global_info:
return False
if self.mlag_system_id:
if self.mlag_priority_id:
if self.mlag_global_info["lacpMlagSysId"] == self.mlag_system_id \
and self.mlag_global_info["lacpMlagPriority"] == self.mlag_priority_id:
return True
else:
if self.mlag_global_info["lacpMlagSysId"] == self.mlag_system_id:
return True
if self.mlag_priority_id:
if self.mlag_system_id:
if self.mlag_global_info["lacpMlagSysId"] == self.mlag_system_id \
and self.mlag_global_info["lacpMlagPriority"] == self.mlag_priority_id:
return True
else:
if self.mlag_global_info["lacpMlagPriority"] == self.mlag_priority_id:
return True
return False
def create_mlag(self):
"""create mlag info"""
if self.is_mlag_info_change():
mlag_port = "Eth-Trunk"
mlag_port += self.eth_trunk_id
conf_str = CE_NC_CREATE_MLAG_INFO % (
self.dfs_group_id, self.mlag_id, mlag_port)
recv_xml = set_nc_config(self.module, conf_str)
if "<ok/>" not in recv_xml:
self.module.fail_json(
msg='Error: create mlag info failed.')
self.updates_cmd.append("interface %s" % mlag_port)
self.updates_cmd.append("dfs-group %s m-lag %s" %
(self.dfs_group_id, self.mlag_id))
self.changed = True
def delete_mlag(self):
"""delete mlag info"""
if self.is_mlag_info_exist():
mlag_port = "Eth-Trunk"
mlag_port += self.eth_trunk_id
conf_str = CE_NC_DELETE_MLAG_INFO % (
self.dfs_group_id, self.mlag_id, mlag_port)
recv_xml = set_nc_config(self.module, conf_str)
if "<ok/>" not in recv_xml:
self.module.fail_json(
msg='Error: delete mlag info failed.')
self.updates_cmd.append("interface %s" % mlag_port)
self.updates_cmd.append(
"undo dfs-group %s m-lag %s" % (self.dfs_group_id, self.mlag_id))
self.changed = True
def create_mlag_error_down(self):
"""create mlag error down info"""
if self.is_mlag_error_down_info_change():
conf_str = CE_NC_CREATE_MLAG_ERROR_DOWN_INFO % self.interface
recv_xml = set_nc_config(self.module, conf_str)
if "<ok/>" not in recv_xml:
self.module.fail_json(
msg='Error: create mlag error down info failed.')
self.updates_cmd.append("interface %s" % self.interface)
self.updates_cmd.append("m-lag unpaired-port suspend")
self.changed = True
def delete_mlag_error_down(self):
"""delete mlag error down info"""
if self.is_mlag_error_down_info_exist():
conf_str = CE_NC_DELETE_MLAG_ERROR_DOWN_INFO % self.interface
recv_xml = set_nc_config(self.module, conf_str)
if "<ok/>" not in recv_xml:
self.module.fail_json(
msg='Error: delete mlag error down info failed.')
self.updates_cmd.append("interface %s" % self.interface)
self.updates_cmd.append("undo m-lag unpaired-port suspend")
self.changed = True
def set_mlag_interface(self):
"""set mlag interface atrribute info"""
if self.is_mlag_interface_info_change():
mlag_port = "Eth-Trunk"
mlag_port += self.eth_trunk_id
conf_str = CE_NC_SET_LACP_MLAG_INFO_HEAD % mlag_port
if self.mlag_priority_id:
conf_str += "<lacpMlagPriority>%s</lacpMlagPriority>" % self.mlag_priority_id
if self.mlag_system_id:
conf_str += "<lacpMlagSysId>%s</lacpMlagSysId>" % self.mlag_system_id
conf_str += CE_NC_SET_LACP_MLAG_INFO_TAIL
recv_xml = set_nc_config(self.module, conf_str)
if "<ok/>" not in recv_xml:
self.module.fail_json(
msg='Error: set mlag interface atrribute info failed.')
self.updates_cmd.append("interface %s" % mlag_port)
if self.mlag_priority_id:
self.updates_cmd.append(
"lacp m-lag priority %s" % self.mlag_priority_id)
if self.mlag_system_id:
self.updates_cmd.append(
"lacp m-lag system-id %s" % self.mlag_system_id)
self.changed = True
def delete_mlag_interface(self):
"""delete mlag interface attribute info"""
if self.is_mlag_interface_info_exist():
mlag_port = "Eth-Trunk"
mlag_port += self.eth_trunk_id
cmd = "interface %s" % mlag_port
self.cli_add_command(cmd)
if self.mlag_priority_id:
cmd = "lacp m-lag priority %s" % self.mlag_priority_id
self.cli_add_command(cmd, True)
if self.mlag_system_id:
cmd = "lacp m-lag system-id %s" % self.mlag_system_id
self.cli_add_command(cmd, True)
if self.commands:
self.cli_load_config(self.commands)
self.changed = True
def set_mlag_global(self):
"""set mlag global attribute info"""
if self.is_mlag_global_info_change():
conf_str = CE_NC_SET_GLOBAL_LACP_MLAG_INFO_HEAD
if self.mlag_priority_id:
conf_str += "<lacpMlagPriority>%s</lacpMlagPriority>" % self.mlag_priority_id
if self.mlag_system_id:
conf_str += "<lacpMlagSysId>%s</lacpMlagSysId>" % self.mlag_system_id
conf_str += CE_NC_SET_GLOBAL_LACP_MLAG_INFO_TAIL
recv_xml = set_nc_config(self.module, conf_str)
if "<ok/>" not in recv_xml:
self.module.fail_json(
msg='Error: set mlag interface atrribute info failed.')
if self.mlag_priority_id:
self.updates_cmd.append(
"lacp m-lag priority %s" % self.mlag_priority_id)
if self.mlag_system_id:
self.updates_cmd.append(
"lacp m-lag system-id %s" % self.mlag_system_id)
self.changed = True
def delete_mlag_global(self):
"""delete mlag global attribute info"""
if self.is_mlag_global_info_exist():
if self.mlag_priority_id:
cmd = "lacp m-lag priority %s" % self.mlag_priority_id
self.cli_add_command(cmd, True)
if self.mlag_system_id:
cmd = "lacp m-lag system-id %s" % self.mlag_system_id
self.cli_add_command(cmd, True)
if self.commands:
self.cli_load_config(self.commands)
self.changed = True
def get_proposed(self):
"""get proposed info"""
if self.eth_trunk_id:
self.proposed["eth_trunk_id"] = self.eth_trunk_id
if self.dfs_group_id:
self.proposed["dfs_group_id"] = self.dfs_group_id
if self.mlag_id:
self.proposed["mlag_id"] = self.mlag_id
if self.mlag_system_id:
self.proposed["mlag_system_id"] = self.mlag_system_id
if self.mlag_priority_id:
self.proposed["mlag_priority_id"] = self.mlag_priority_id
if self.interface:
self.proposed["interface"] = self.interface
if self.mlag_error_down:
self.proposed["mlag_error_down"] = self.mlag_error_down
if self.state:
self.proposed["state"] = self.state
def get_existing(self):
"""get existing info"""
self.mlag_info = self.get_mlag_info()
self.mlag_global_info = self.get_mlag_global_info()
self.mlag_error_down_info = self.get_mlag_error_down_info()
if self.eth_trunk_id or self.dfs_group_id or self.mlag_id:
if not self.mlag_system_id and not self.mlag_priority_id:
if self.mlag_info:
self.existing["mlagInfos"] = self.mlag_info["mlagInfos"]
if self.mlag_system_id or self.mlag_priority_id:
if self.eth_trunk_id:
if self.mlag_trunk_attribute_info:
if self.mlag_system_id:
self.end_state["lacpMlagSysId"] = self.mlag_trunk_attribute_info[
"lacpMlagSysId"]
if self.mlag_priority_id:
self.end_state["lacpMlagPriority"] = self.mlag_trunk_attribute_info[
"lacpMlagPriority"]
else:
if self.mlag_global_info:
if self.mlag_system_id:
self.end_state["lacpMlagSysId"] = self.mlag_global_info[
"lacpMlagSysId"]
if self.mlag_priority_id:
self.end_state["lacpMlagPriority"] = self.mlag_global_info[
"lacpMlagPriority"]
if self.interface or self.mlag_error_down:
if self.mlag_error_down_info:
self.existing["mlagErrorDownInfos"] = self.mlag_error_down_info[
"mlagErrorDownInfos"]
def get_end_state(self):
"""get end state info"""
if self.eth_trunk_id or self.dfs_group_id or self.mlag_id:
self.mlag_info = self.get_mlag_info()
if not self.mlag_system_id and not self.mlag_priority_id:
if self.mlag_info:
self.end_state["mlagInfos"] = self.mlag_info["mlagInfos"]
if self.mlag_system_id or self.mlag_priority_id:
if self.eth_trunk_id:
self.mlag_trunk_attribute_info = self.get_mlag_trunk_attribute_info()
if self.mlag_trunk_attribute_info:
if self.mlag_system_id:
self.end_state["lacpMlagSysId"] = self.mlag_trunk_attribute_info[
"lacpMlagSysId"]
if self.mlag_priority_id:
self.end_state["lacpMlagPriority"] = self.mlag_trunk_attribute_info[
"lacpMlagPriority"]
else:
self.mlag_global_info = self.get_mlag_global_info()
if self.mlag_global_info:
if self.mlag_system_id:
self.end_state["lacpMlagSysId"] = self.mlag_global_info[
"lacpMlagSysId"]
if self.mlag_priority_id:
self.end_state["lacpMlagPriority"] = self.mlag_global_info[
"lacpMlagPriority"]
if self.interface or self.mlag_error_down:
self.mlag_error_down_info = self.get_mlag_error_down_info()
if self.mlag_error_down_info:
self.end_state["mlagErrorDownInfos"] = self.mlag_error_down_info[
"mlagErrorDownInfos"]
def work(self):
"""worker"""
self.check_params()
self.get_proposed()
self.get_existing()
if self.eth_trunk_id or self.dfs_group_id or self.mlag_id:
self.mlag_info = self.get_mlag_info()
if self.eth_trunk_id and self.dfs_group_id and self.mlag_id:
if self.state == "present":
self.create_mlag()
else:
self.delete_mlag()
else:
if not self.mlag_system_id and not self.mlag_priority_id:
self.module.fail_json(
msg='Error: eth_trunk_id, dfs_group_id, mlag_id must be config at the same time.')
if self.mlag_system_id or self.mlag_priority_id:
if self.eth_trunk_id:
self.mlag_trunk_attribute_info = self.get_mlag_trunk_attribute_info()
if self.mlag_system_id or self.mlag_priority_id:
if self.state == "present":
self.set_mlag_interface()
else:
self.delete_mlag_interface()
else:
self.mlag_global_info = self.get_mlag_global_info()
if self.mlag_system_id or self.mlag_priority_id:
if self.state == "present":
self.set_mlag_global()
else:
self.delete_mlag_global()
if self.interface or self.mlag_error_down:
self.mlag_error_down_info = self.get_mlag_error_down_info()
if self.interface and self.mlag_error_down:
if self.mlag_error_down == "enable":
self.create_mlag_error_down()
else:
self.delete_mlag_error_down()
else:
self.module.fail_json(
msg='Error: interface, mlag_error_down must be config at the same time.')
self.get_end_state()
self.results['changed'] = self.changed
self.results['proposed'] = self.proposed
self.results['existing'] = self.existing
self.results['end_state'] = self.end_state
if self.changed:
self.results['updates'] = self.updates_cmd
else:
self.results['updates'] = list()
self.module.exit_json(**self.results)
def main():
""" Module main """
argument_spec = dict(
eth_trunk_id=dict(type='str'),
dfs_group_id=dict(type='str'),
mlag_id=dict(type='str'),
mlag_system_id=dict(type='str'),
mlag_priority_id=dict(type='str'),
interface=dict(type='str'),
mlag_error_down=dict(type='str', choices=['enable', 'disable']),
state=dict(type='str', default='present',
choices=['present', 'absent'])
)
argument_spec.update(ce_argument_spec)
module = MlagInterface(argument_spec=argument_spec)
module.work()
if __name__ == '__main__':
main()
| gpl-3.0 |
sid88in/incubator-airflow | airflow/contrib/operators/winrm_operator.py | 13 | 5811 | # -*- coding: utf-8 -*-
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from base64 import b64encode
import logging
from winrm.exceptions import WinRMOperationTimeoutError
from airflow import configuration
from airflow.contrib.hooks.winrm_hook import WinRMHook
from airflow.exceptions import AirflowException
from airflow.models import BaseOperator
from airflow.utils.decorators import apply_defaults
# Hide the following error message in urllib3 when making WinRM connections:
# requests.packages.urllib3.exceptions.HeaderParsingError: [StartBoundaryNotFoundDefect(),
# MultipartInvariantViolationDefect()], unparsed data: ''
logging.getLogger('requests.packages.urllib3.connectionpool').setLevel(logging.CRITICAL)
class WinRMOperator(BaseOperator):
"""
WinRMOperator to execute commands on given remote host using the winrm_hook.
:param winrm_hook: predefined ssh_hook to use for remote execution
:type winrm_hook: :class:`WinRMHook`
:param ssh_conn_id: connection id from airflow Connections
:type ssh_conn_id: str
:param remote_host: remote host to connect
:type remote_host: str
:param command: command to execute on remote host. (templated)
:type command: str
:param timeout: timeout for executing the command.
:type timeout: int
:param do_xcom_push: return the stdout which also get set in xcom by airflow platform
:type do_xcom_push: bool
"""
template_fields = ('command',)
@apply_defaults
def __init__(self,
winrm_hook=None,
ssh_conn_id=None,
remote_host=None,
command=None,
timeout=10,
do_xcom_push=False,
*args,
**kwargs):
super(WinRMOperator, self).__init__(*args, **kwargs)
self.winrm_hook = winrm_hook
self.ssh_conn_id = ssh_conn_id
self.remote_host = remote_host
self.command = command
self.timeout = timeout
self.do_xcom_push = do_xcom_push
def execute(self, context):
if self.ssh_conn_id and not self.winrm_hook:
self.log.info("Hook not found, creating...")
self.winrm_hook = WinRMHook(ssh_conn_id=self.ssh_conn_id)
if not self.winrm_hook:
raise AirflowException("Cannot operate without winrm_hook or ssh_conn_id.")
if self.remote_host is not None:
self.winrm_hook.remote_host = self.remote_host
if not self.command:
raise AirflowException("No command specified so nothing to execute here.")
winrm_client = self.winrm_hook.get_conn()
try:
self.log.info("Running command: '{command}'...".format(command=self.command))
command_id = self.winrm_hook.winrm_protocol.run_command(
winrm_client,
self.command
)
# See: https://github.com/diyan/pywinrm/blob/master/winrm/protocol.py
stdout_buffer = []
stderr_buffer = []
command_done = False
while not command_done:
try:
stdout, stderr, return_code, command_done = \
self.winrm_hook.winrm_protocol._raw_get_command_output(
winrm_client,
command_id
)
# Only buffer stdout if we need to so that we minimize memory usage.
if self.do_xcom_push:
stdout_buffer.append(stdout)
stderr_buffer.append(stderr)
for line in stdout.decode('utf-8').splitlines():
self.log.info(line)
for line in stderr.decode('utf-8').splitlines():
self.log.warning(line)
except WinRMOperationTimeoutError as e:
# this is an expected error when waiting for a
# long-running process, just silently retry
pass
self.winrm_hook.winrm_protocol.cleanup_command(winrm_client, command_id)
self.winrm_hook.winrm_protocol.close_shell(winrm_client)
except Exception as e:
raise AirflowException("WinRM operator error: {0}".format(str(e)))
if return_code is 0:
# returning output if do_xcom_push is set
if self.do_xcom_push:
enable_pickling = configuration.conf.getboolean(
'core', 'enable_xcom_pickling'
)
if enable_pickling:
return stdout_buffer
else:
return b64encode(b''.join(stdout_buffer)).decode('utf-8')
else:
error_msg = "Error running cmd: {0}, return code: {1}, error: {2}".format(
self.command,
return_code,
b''.join(stderr_buffer).decode('utf-8')
)
raise AirflowException(error_msg)
self.log.info("Finished!")
return True
| apache-2.0 |
google/contentbox | third_party/django/contrib/gis/db/models/sql/query.py | 209 | 5406 | from django.db import connections
from django.db.models.query import sql
from django.contrib.gis.db.models.fields import GeometryField
from django.contrib.gis.db.models.sql import aggregates as gis_aggregates
from django.contrib.gis.db.models.sql.conversion import AreaField, DistanceField, GeomField
from django.contrib.gis.db.models.sql.where import GeoWhereNode
from django.contrib.gis.geometry.backend import Geometry
from django.contrib.gis.measure import Area, Distance
ALL_TERMS = set([
'bbcontains', 'bboverlaps', 'contained', 'contains',
'contains_properly', 'coveredby', 'covers', 'crosses', 'disjoint',
'distance_gt', 'distance_gte', 'distance_lt', 'distance_lte',
'dwithin', 'equals', 'exact',
'intersects', 'overlaps', 'relate', 'same_as', 'touches', 'within',
'left', 'right', 'overlaps_left', 'overlaps_right',
'overlaps_above', 'overlaps_below',
'strictly_above', 'strictly_below'
])
ALL_TERMS.update(sql.constants.QUERY_TERMS)
class GeoQuery(sql.Query):
"""
A single spatial SQL query.
"""
# Overridding the valid query terms.
query_terms = ALL_TERMS
aggregates_module = gis_aggregates
compiler = 'GeoSQLCompiler'
#### Methods overridden from the base Query class ####
def __init__(self, model, where=GeoWhereNode):
super(GeoQuery, self).__init__(model, where)
# The following attributes are customized for the GeoQuerySet.
# The GeoWhereNode and SpatialBackend classes contain backend-specific
# routines and functions.
self.custom_select = {}
self.transformed_srid = None
self.extra_select_fields = {}
def clone(self, *args, **kwargs):
obj = super(GeoQuery, self).clone(*args, **kwargs)
# Customized selection dictionary and transformed srid flag have
# to also be added to obj.
obj.custom_select = self.custom_select.copy()
obj.transformed_srid = self.transformed_srid
obj.extra_select_fields = self.extra_select_fields.copy()
return obj
def convert_values(self, value, field, connection):
"""
Using the same routines that Oracle does we can convert our
extra selection objects into Geometry and Distance objects.
TODO: Make converted objects 'lazy' for less overhead.
"""
if connection.ops.oracle:
# Running through Oracle's first.
value = super(GeoQuery, self).convert_values(value, field or GeomField(), connection)
if value is None:
# Output from spatial function is NULL (e.g., called
# function on a geometry field with NULL value).
pass
elif isinstance(field, DistanceField):
# Using the field's distance attribute, can instantiate
# `Distance` with the right context.
value = Distance(**{field.distance_att : value})
elif isinstance(field, AreaField):
value = Area(**{field.area_att : value})
elif isinstance(field, (GeomField, GeometryField)) and value:
value = Geometry(value)
elif field is not None:
return super(GeoQuery, self).convert_values(value, field, connection)
return value
def get_aggregation(self, using):
# Remove any aggregates marked for reduction from the subquery
# and move them to the outer AggregateQuery.
connection = connections[using]
for alias, aggregate in self.aggregate_select.items():
if isinstance(aggregate, gis_aggregates.GeoAggregate):
if not getattr(aggregate, 'is_extent', False) or connection.ops.oracle:
self.extra_select_fields[alias] = GeomField()
return super(GeoQuery, self).get_aggregation(using)
def resolve_aggregate(self, value, aggregate, connection):
"""
Overridden from GeoQuery's normalize to handle the conversion of
GeoAggregate objects.
"""
if isinstance(aggregate, self.aggregates_module.GeoAggregate):
if aggregate.is_extent:
if aggregate.is_extent == '3D':
return connection.ops.convert_extent3d(value)
else:
return connection.ops.convert_extent(value)
else:
return connection.ops.convert_geom(value, aggregate.source)
else:
return super(GeoQuery, self).resolve_aggregate(value, aggregate, connection)
# Private API utilities, subject to change.
def _geo_field(self, field_name=None):
"""
Returns the first Geometry field encountered; or specified via the
`field_name` keyword. The `field_name` may be a string specifying
the geometry field on this GeoQuery's model, or a lookup string
to a geometry field via a ForeignKey relation.
"""
if field_name is None:
# Incrementing until the first geographic field is found.
for fld in self.model._meta.fields:
if isinstance(fld, GeometryField): return fld
return False
else:
# Otherwise, check by the given field name -- which may be
# a lookup to a _related_ geographic field.
return GeoWhereNode._check_geo_field(self.model._meta, field_name)
| apache-2.0 |
italomaia/turtle-linux | games/Dynamite/pgu/test.py | 1 | 1624 | import pygame
from pygame.locals import *
import gui
screen = pygame.display.set_mode(
(640, 480), FULLSCREEN ) # try adding DOUBLEBUF | HWSURFACE
# pygame.mouse.set_visible(0)
app = gui.App()
c = gui.Container(width=640,height=480)
##
## dialog 1
##
t1 = gui.Table()
t1.tr()
t1.add(gui.Label("Gal Test"))
t2 = gui.Table()
t2.tr()
t2.add(gui.Label("Gui Widgets"))
t2.add(gui.Input())
t2.tr()
t2.add(gui.Label("Button"))
t2.add(gui.Button("Click Me!"))
d1 = gui.Dialog(t1, t2)
c.add(d1, 50, 150)
##
## dialog 2
##
t3 = gui.Table()
t3.tr()
t3.add(gui.Label("Another one"))
t4 = gui.Table()
t4.tr()
t4.add(gui.Label("Name"))
t4.add(gui.Input())
t4.tr()
t4.add(gui.Label("Ohh"))
b1 = gui.Button("OK")
t4.add(b1)
d2 = gui.Dialog(t3, t4)
c.add(d2, 50, 300)
##
## some labels
##
l1 = gui.Label("Suppose this is a menu", color=(255, 255, 255) )
c.add(l1, 50, 50)
l2 = gui.Label("Click <SPACE> to hide top dialog", color=(255, 255,
255) )
c.add(l2, 50, 75)
l3 = gui.Label("Opps... Did it happen?", color=(255, 255, 255) )
##
## app begins
##
app.init(widget=c,screen=screen)
FRAME_EVT = USEREVENT + 1
pygame.event.Event(FRAME_EVT)
pygame.time.set_timer(FRAME_EVT, 30)
_quit = 0
while _quit == 0:
event = pygame.event.wait()
if event.type == FRAME_EVT:
pygame.display.flip()
continue
if event.type == KEYDOWN:
if event.key == K_ESCAPE:
_quit = 1
continue
elif event.key == K_SPACE:
d1.close()
c.add(l3, 100, 100)
app._event(event)
screen.fill((0,0,0))
app.paint(screen)
| gpl-3.0 |
Sixshaman/networkx | networkx/utils/random_sequence.py | 10 | 6411 | """
Utilities for generating random numbers, random sequences, and
random selections.
"""
# Copyright (C) 2004-2016 by
# Aric Hagberg <[email protected]>
# Dan Schult <[email protected]>
# Pieter Swart <[email protected]>
# All rights reserved.
# BSD license.
import random
import sys
import networkx as nx
__author__ = '\n'.join(['Aric Hagberg ([email protected])',
'Dan Schult([email protected])',
'Ben Edwards([email protected])'])
import warnings as _warnings
def create_degree_sequence(n, sfunction=None, max_tries=50, **kwds):
_warnings.warn("create_degree_sequence() is deprecated",
DeprecationWarning)
""" Attempt to create a valid degree sequence of length n using
specified function sfunction(n,**kwds).
Parameters
----------
n : int
Length of degree sequence = number of nodes
sfunction: function
Function which returns a list of n real or integer values.
Called as "sfunction(n,**kwds)".
max_tries: int
Max number of attempts at creating valid degree sequence.
Notes
-----
Repeatedly create a degree sequence by calling sfunction(n,**kwds)
until achieving a valid degree sequence. If unsuccessful after
max_tries attempts, raise an exception.
For examples of sfunctions that return sequences of random numbers,
see networkx.Utils.
Examples
--------
>>> from networkx.utils import uniform_sequence, create_degree_sequence
>>> seq=create_degree_sequence(10,uniform_sequence)
"""
tries=0
max_deg=n
while tries < max_tries:
trialseq=sfunction(n,**kwds)
# round to integer values in the range [0,max_deg]
seq=[min(max_deg, max( int(round(s)),0 )) for s in trialseq]
# if graphical return, else throw away and try again
if nx.is_valid_degree_sequence(seq):
return seq
tries+=1
raise nx.NetworkXError(\
"Exceeded max (%d) attempts at a valid sequence."%max_tries)
# The same helpers for choosing random sequences from distributions
# uses Python's random module
# http://www.python.org/doc/current/lib/module-random.html
def pareto_sequence(n,exponent=1.0):
"""
Return sample sequence of length n from a Pareto distribution.
"""
return [random.paretovariate(exponent) for i in range(n)]
def powerlaw_sequence(n,exponent=2.0):
"""
Return sample sequence of length n from a power law distribution.
"""
return [random.paretovariate(exponent-1) for i in range(n)]
def zipf_rv(alpha, xmin=1, seed=None):
r"""Return a random value chosen from the Zipf distribution.
The return value is an integer drawn from the probability distribution
::math::
p(x)=\frac{x^{-\alpha}}{\zeta(\alpha,x_{min})},
where `\zeta(\alpha,x_{min})` is the Hurwitz zeta function.
Parameters
----------
alpha : float
Exponent value of the distribution
xmin : int
Minimum value
seed : int
Seed value for random number generator
Returns
-------
x : int
Random value from Zipf distribution
Raises
------
ValueError:
If xmin < 1 or
If alpha <= 1
Notes
-----
The rejection algorithm generates random values for a the power-law
distribution in uniformly bounded expected time dependent on
parameters. See [1] for details on its operation.
Examples
--------
>>> nx.zipf_rv(alpha=2, xmin=3, seed=42) # doctest: +SKIP
References
----------
..[1] Luc Devroye, Non-Uniform Random Variate Generation,
Springer-Verlag, New York, 1986.
"""
if xmin < 1:
raise ValueError("xmin < 1")
if alpha <= 1:
raise ValueError("a <= 1.0")
if not seed is None:
random.seed(seed)
a1 = alpha - 1.0
b = 2**a1
while True:
u = 1.0 - random.random() # u in (0,1]
v = random.random() # v in [0,1)
x = int(xmin*u**-(1.0/a1))
t = (1.0+(1.0/x))**a1
if v*x*(t-1.0)/(b-1.0) <= t/b:
break
return x
def zipf_sequence(n, alpha=2.0, xmin=1):
"""Return a sample sequence of length n from a Zipf distribution with
exponent parameter alpha and minimum value xmin.
See Also
--------
zipf_rv
"""
return [ zipf_rv(alpha,xmin) for _ in range(n)]
def uniform_sequence(n):
"""
Return sample sequence of length n from a uniform distribution.
"""
return [ random.uniform(0,n) for i in range(n)]
def cumulative_distribution(distribution):
"""Return normalized cumulative distribution from discrete distribution."""
cdf= [0.0]
psum=float(sum(distribution))
for i in range(0,len(distribution)):
cdf.append(cdf[i]+distribution[i]/psum)
return cdf
def discrete_sequence(n, distribution=None, cdistribution=None):
"""
Return sample sequence of length n from a given discrete distribution
or discrete cumulative distribution.
One of the following must be specified.
distribution = histogram of values, will be normalized
cdistribution = normalized discrete cumulative distribution
"""
import bisect
if cdistribution is not None:
cdf=cdistribution
elif distribution is not None:
cdf=cumulative_distribution(distribution)
else:
raise nx.NetworkXError(
"discrete_sequence: distribution or cdistribution missing")
# get a uniform random number
inputseq=[random.random() for i in range(n)]
# choose from CDF
seq=[bisect.bisect_left(cdf,s)-1 for s in inputseq]
return seq
def random_weighted_sample(mapping, k):
"""Return k items without replacement from a weighted sample.
The input is a dictionary of items with weights as values.
"""
if k > len(mapping):
raise ValueError("sample larger than population")
sample = set()
while len(sample) < k:
sample.add(weighted_choice(mapping))
return list(sample)
def weighted_choice(mapping):
"""Return a single element from a weighted sample.
The input is a dictionary of items with weights as values.
"""
# use roulette method
rnd = random.random() * sum(mapping.values())
for k, w in mapping.items():
rnd -= w
if rnd < 0:
return k
| bsd-3-clause |
ezbake/ezbake-common-python | thrift/thrift-utils/lib/ezbake/thrift/utils/ezthrifttest.py | 1 | 3296 | # Copyright (C) 2013-2014 Computer Sciences Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
EzThriftTest contains classes that will be useful for testing thrift services
"""
from kazoo.testing import KazooTestCase
from ezbake.discovery import ServiceDiscoveryClient
from thrift.protocol import TBinaryProtocol
from thrift.server import TServer
from thrift.transport import TSocket, TTransport
from thrift.transport.TTransport import TTransportException
from ..transport.EzSSLSocket import TSSLServerSocket
from multiprocessing.process import Process
import time
import logging
logger = logging.getLogger(__name__)
class EzThriftServerTestHarness(KazooTestCase):
"""The EzThriftServerTestHarness extends KazooTestCase to provide service discovery for clients in tests
The thrift server is started using a TSimpleServer and registered with EzBake service discovery
"""
def setUp(self):
super(EzThriftServerTestHarness, self).setUp()
self.sd_client = ServiceDiscoveryClient(self.hosts)
self.server_processes = []
@staticmethod
def __thrift_server(processor, host="localhost", port=8449, use_simple_server=True,
use_ssl=False, ca_certs=None, cert=None, key=None):
if use_ssl:
transport = TSSLServerSocket(host=host, port=port,
ca_certs=ca_certs, cert=cert, key=key)
else:
transport = TSocket.TServerSocket(host=host, port=port)
t_factory = TTransport.TBufferedTransportFactory()
p_factory = TBinaryProtocol.TBinaryProtocolFactory()
if use_simple_server:
server = TServer.TSimpleServer(processor, transport, t_factory, p_factory)
else:
server = TServer.TThreadedServer(processor, transport, t_factory, p_factory)
try:
server.serve()
except (Exception, AttributeError, TTransportException) as e:
print e
logger.error("Server error: %s", e)
def add_server(self, app_name, service_name, host, port, processor, use_simple_server=True, wait=1,
use_ssl=False, ca_certs=None, cert=None, key=None):
self.sd_client.register_endpoint(app_name, service_name, host, port)
server_process = Process(target=self.__thrift_server,
args=(processor, host, port, use_simple_server, use_ssl, ca_certs, cert, key))
server_process.start()
time.sleep(wait)
self.server_processes.append(server_process)
def tearDown(self):
super(EzThriftServerTestHarness, self).tearDown()
for server_process in self.server_processes:
if server_process.is_alive():
server_process.terminate() | apache-2.0 |
softliumin/redis-py | redis/client.py | 22 | 100985 | from __future__ import with_statement
from itertools import chain
import datetime
import sys
import warnings
import time
import threading
import time as mod_time
from redis._compat import (b, basestring, bytes, imap, iteritems, iterkeys,
itervalues, izip, long, nativestr, unicode,
safe_unicode)
from redis.connection import (ConnectionPool, UnixDomainSocketConnection,
SSLConnection, Token)
from redis.lock import Lock, LuaLock
from redis.exceptions import (
ConnectionError,
DataError,
ExecAbortError,
NoScriptError,
PubSubError,
RedisError,
ResponseError,
TimeoutError,
WatchError,
)
SYM_EMPTY = b('')
def list_or_args(keys, args):
# returns a single list combining keys and args
try:
iter(keys)
# a string or bytes instance can be iterated, but indicates
# keys wasn't passed as a list
if isinstance(keys, (basestring, bytes)):
keys = [keys]
except TypeError:
keys = [keys]
if args:
keys.extend(args)
return keys
def timestamp_to_datetime(response):
"Converts a unix timestamp to a Python datetime object"
if not response:
return None
try:
response = int(response)
except ValueError:
return None
return datetime.datetime.fromtimestamp(response)
def string_keys_to_dict(key_string, callback):
return dict.fromkeys(key_string.split(), callback)
def dict_merge(*dicts):
merged = {}
[merged.update(d) for d in dicts]
return merged
def parse_debug_object(response):
"Parse the results of Redis's DEBUG OBJECT command into a Python dict"
# The 'type' of the object is the first item in the response, but isn't
# prefixed with a name
response = nativestr(response)
response = 'type:' + response
response = dict([kv.split(':') for kv in response.split()])
# parse some expected int values from the string response
# note: this cmd isn't spec'd so these may not appear in all redis versions
int_fields = ('refcount', 'serializedlength', 'lru', 'lru_seconds_idle')
for field in int_fields:
if field in response:
response[field] = int(response[field])
return response
def parse_object(response, infotype):
"Parse the results of an OBJECT command"
if infotype in ('idletime', 'refcount'):
return int_or_none(response)
return response
def parse_info(response):
"Parse the result of Redis's INFO command into a Python dict"
info = {}
response = nativestr(response)
def get_value(value):
if ',' not in value or '=' not in value:
try:
if '.' in value:
return float(value)
else:
return int(value)
except ValueError:
return value
else:
sub_dict = {}
for item in value.split(','):
k, v = item.rsplit('=', 1)
sub_dict[k] = get_value(v)
return sub_dict
for line in response.splitlines():
if line and not line.startswith('#'):
if line.find(':') != -1:
key, value = line.split(':', 1)
info[key] = get_value(value)
else:
# if the line isn't splittable, append it to the "__raw__" key
info.setdefault('__raw__', []).append(line)
return info
SENTINEL_STATE_TYPES = {
'can-failover-its-master': int,
'config-epoch': int,
'down-after-milliseconds': int,
'failover-timeout': int,
'info-refresh': int,
'last-hello-message': int,
'last-ok-ping-reply': int,
'last-ping-reply': int,
'last-ping-sent': int,
'master-link-down-time': int,
'master-port': int,
'num-other-sentinels': int,
'num-slaves': int,
'o-down-time': int,
'pending-commands': int,
'parallel-syncs': int,
'port': int,
'quorum': int,
'role-reported-time': int,
's-down-time': int,
'slave-priority': int,
'slave-repl-offset': int,
'voted-leader-epoch': int
}
def parse_sentinel_state(item):
result = pairs_to_dict_typed(item, SENTINEL_STATE_TYPES)
flags = set(result['flags'].split(','))
for name, flag in (('is_master', 'master'), ('is_slave', 'slave'),
('is_sdown', 's_down'), ('is_odown', 'o_down'),
('is_sentinel', 'sentinel'),
('is_disconnected', 'disconnected'),
('is_master_down', 'master_down')):
result[name] = flag in flags
return result
def parse_sentinel_master(response):
return parse_sentinel_state(imap(nativestr, response))
def parse_sentinel_masters(response):
result = {}
for item in response:
state = parse_sentinel_state(imap(nativestr, item))
result[state['name']] = state
return result
def parse_sentinel_slaves_and_sentinels(response):
return [parse_sentinel_state(imap(nativestr, item)) for item in response]
def parse_sentinel_get_master(response):
return response and (response[0], int(response[1])) or None
def pairs_to_dict(response):
"Create a dict given a list of key/value pairs"
it = iter(response)
return dict(izip(it, it))
def pairs_to_dict_typed(response, type_info):
it = iter(response)
result = {}
for key, value in izip(it, it):
if key in type_info:
try:
value = type_info[key](value)
except:
# if for some reason the value can't be coerced, just use
# the string value
pass
result[key] = value
return result
def zset_score_pairs(response, **options):
"""
If ``withscores`` is specified in the options, return the response as
a list of (value, score) pairs
"""
if not response or not options['withscores']:
return response
score_cast_func = options.get('score_cast_func', float)
it = iter(response)
return list(izip(it, imap(score_cast_func, it)))
def sort_return_tuples(response, **options):
"""
If ``groups`` is specified, return the response as a list of
n-element tuples with n being the value found in options['groups']
"""
if not response or not options['groups']:
return response
n = options['groups']
return list(izip(*[response[i::n] for i in range(n)]))
def int_or_none(response):
if response is None:
return None
return int(response)
def float_or_none(response):
if response is None:
return None
return float(response)
def bool_ok(response):
return nativestr(response) == 'OK'
def parse_client_list(response, **options):
clients = []
for c in nativestr(response).splitlines():
clients.append(dict([pair.split('=') for pair in c.split(' ')]))
return clients
def parse_config_get(response, **options):
response = [nativestr(i) if i is not None else None for i in response]
return response and pairs_to_dict(response) or {}
def parse_scan(response, **options):
cursor, r = response
return long(cursor), r
def parse_hscan(response, **options):
cursor, r = response
return long(cursor), r and pairs_to_dict(r) or {}
def parse_zscan(response, **options):
score_cast_func = options.get('score_cast_func', float)
cursor, r = response
it = iter(r)
return long(cursor), list(izip(it, imap(score_cast_func, it)))
def parse_slowlog_get(response, **options):
return [{
'id': item[0],
'start_time': int(item[1]),
'duration': int(item[2]),
'command': b(' ').join(item[3])
} for item in response]
class StrictRedis(object):
"""
Implementation of the Redis protocol.
This abstract class provides a Python interface to all Redis commands
and an implementation of the Redis protocol.
Connection and Pipeline derive from this, implementing how
the commands are sent and received to the Redis server
"""
RESPONSE_CALLBACKS = dict_merge(
string_keys_to_dict(
'AUTH EXISTS EXPIRE EXPIREAT HEXISTS HMSET MOVE MSETNX PERSIST '
'PSETEX RENAMENX SISMEMBER SMOVE SETEX SETNX',
bool
),
string_keys_to_dict(
'BITCOUNT BITPOS DECRBY DEL GETBIT HDEL HLEN INCRBY LINSERT LLEN '
'LPUSHX PFADD PFCOUNT RPUSHX SADD SCARD SDIFFSTORE SETBIT '
'SETRANGE SINTERSTORE SREM STRLEN SUNIONSTORE ZADD ZCARD '
'ZLEXCOUNT ZREM ZREMRANGEBYLEX ZREMRANGEBYRANK ZREMRANGEBYSCORE',
int
),
string_keys_to_dict('INCRBYFLOAT HINCRBYFLOAT', float),
string_keys_to_dict(
# these return OK, or int if redis-server is >=1.3.4
'LPUSH RPUSH',
lambda r: isinstance(r, long) and r or nativestr(r) == 'OK'
),
string_keys_to_dict('SORT', sort_return_tuples),
string_keys_to_dict('ZSCORE ZINCRBY', float_or_none),
string_keys_to_dict(
'FLUSHALL FLUSHDB LSET LTRIM MSET PFMERGE RENAME '
'SAVE SELECT SHUTDOWN SLAVEOF WATCH UNWATCH',
bool_ok
),
string_keys_to_dict('BLPOP BRPOP', lambda r: r and tuple(r) or None),
string_keys_to_dict(
'SDIFF SINTER SMEMBERS SUNION',
lambda r: r and set(r) or set()
),
string_keys_to_dict(
'ZRANGE ZRANGEBYSCORE ZREVRANGE ZREVRANGEBYSCORE',
zset_score_pairs
),
string_keys_to_dict('ZRANK ZREVRANK', int_or_none),
string_keys_to_dict('BGREWRITEAOF BGSAVE', lambda r: True),
{
'CLIENT GETNAME': lambda r: r and nativestr(r),
'CLIENT KILL': bool_ok,
'CLIENT LIST': parse_client_list,
'CLIENT SETNAME': bool_ok,
'CONFIG GET': parse_config_get,
'CONFIG RESETSTAT': bool_ok,
'CONFIG SET': bool_ok,
'DEBUG OBJECT': parse_debug_object,
'HGETALL': lambda r: r and pairs_to_dict(r) or {},
'HSCAN': parse_hscan,
'INFO': parse_info,
'LASTSAVE': timestamp_to_datetime,
'OBJECT': parse_object,
'PING': lambda r: nativestr(r) == 'PONG',
'RANDOMKEY': lambda r: r and r or None,
'SCAN': parse_scan,
'SCRIPT EXISTS': lambda r: list(imap(bool, r)),
'SCRIPT FLUSH': bool_ok,
'SCRIPT KILL': bool_ok,
'SCRIPT LOAD': nativestr,
'SENTINEL GET-MASTER-ADDR-BY-NAME': parse_sentinel_get_master,
'SENTINEL MASTER': parse_sentinel_master,
'SENTINEL MASTERS': parse_sentinel_masters,
'SENTINEL MONITOR': bool_ok,
'SENTINEL REMOVE': bool_ok,
'SENTINEL SENTINELS': parse_sentinel_slaves_and_sentinels,
'SENTINEL SET': bool_ok,
'SENTINEL SLAVES': parse_sentinel_slaves_and_sentinels,
'SET': lambda r: r and nativestr(r) == 'OK',
'SLOWLOG GET': parse_slowlog_get,
'SLOWLOG LEN': int,
'SLOWLOG RESET': bool_ok,
'SSCAN': parse_scan,
'TIME': lambda x: (int(x[0]), int(x[1])),
'ZSCAN': parse_zscan
}
)
@classmethod
def from_url(cls, url, db=None, **kwargs):
"""
Return a Redis client object configured from the given URL.
For example::
redis://[:password]@localhost:6379/0
unix://[:password]@/path/to/socket.sock?db=0
There are several ways to specify a database number. The parse function
will return the first specified option:
1. A ``db`` querystring option, e.g. redis://localhost?db=0
2. If using the redis:// scheme, the path argument of the url, e.g.
redis://localhost/0
3. The ``db`` argument to this function.
If none of these options are specified, db=0 is used.
Any additional querystring arguments and keyword arguments will be
passed along to the ConnectionPool class's initializer. In the case
of conflicting arguments, querystring arguments always win.
"""
connection_pool = ConnectionPool.from_url(url, db=db, **kwargs)
return cls(connection_pool=connection_pool)
def __init__(self, host='localhost', port=6379,
db=0, password=None, socket_timeout=None,
socket_connect_timeout=None,
socket_keepalive=None, socket_keepalive_options=None,
connection_pool=None, unix_socket_path=None,
encoding='utf-8', encoding_errors='strict',
charset=None, errors=None,
decode_responses=False, retry_on_timeout=False,
ssl=False, ssl_keyfile=None, ssl_certfile=None,
ssl_cert_reqs=None, ssl_ca_certs=None):
if not connection_pool:
if charset is not None:
warnings.warn(DeprecationWarning(
'"charset" is deprecated. Use "encoding" instead'))
encoding = charset
if errors is not None:
warnings.warn(DeprecationWarning(
'"errors" is deprecated. Use "encoding_errors" instead'))
encoding_errors = errors
kwargs = {
'db': db,
'password': password,
'socket_timeout': socket_timeout,
'encoding': encoding,
'encoding_errors': encoding_errors,
'decode_responses': decode_responses,
'retry_on_timeout': retry_on_timeout
}
# based on input, setup appropriate connection args
if unix_socket_path is not None:
kwargs.update({
'path': unix_socket_path,
'connection_class': UnixDomainSocketConnection
})
else:
# TCP specific options
kwargs.update({
'host': host,
'port': port,
'socket_connect_timeout': socket_connect_timeout,
'socket_keepalive': socket_keepalive,
'socket_keepalive_options': socket_keepalive_options,
})
if ssl:
kwargs.update({
'connection_class': SSLConnection,
'ssl_keyfile': ssl_keyfile,
'ssl_certfile': ssl_certfile,
'ssl_cert_reqs': ssl_cert_reqs,
'ssl_ca_certs': ssl_ca_certs,
})
connection_pool = ConnectionPool(**kwargs)
self.connection_pool = connection_pool
self._use_lua_lock = None
self.response_callbacks = self.__class__.RESPONSE_CALLBACKS.copy()
def __repr__(self):
return "%s<%s>" % (type(self).__name__, repr(self.connection_pool))
def set_response_callback(self, command, callback):
"Set a custom Response Callback"
self.response_callbacks[command] = callback
def pipeline(self, transaction=True, shard_hint=None):
"""
Return a new pipeline object that can queue multiple commands for
later execution. ``transaction`` indicates whether all commands
should be executed atomically. Apart from making a group of operations
atomic, pipelines are useful for reducing the back-and-forth overhead
between the client and server.
"""
return StrictPipeline(
self.connection_pool,
self.response_callbacks,
transaction,
shard_hint)
def transaction(self, func, *watches, **kwargs):
"""
Convenience method for executing the callable `func` as a transaction
while watching all keys specified in `watches`. The 'func' callable
should expect a single argument which is a Pipeline object.
"""
shard_hint = kwargs.pop('shard_hint', None)
value_from_callable = kwargs.pop('value_from_callable', False)
watch_delay = kwargs.pop('watch_delay', None)
with self.pipeline(True, shard_hint) as pipe:
while 1:
try:
if watches:
pipe.watch(*watches)
func_value = func(pipe)
exec_value = pipe.execute()
return func_value if value_from_callable else exec_value
except WatchError:
if watch_delay is not None and watch_delay > 0:
time.sleep(watch_delay)
continue
def lock(self, name, timeout=None, sleep=0.1, blocking_timeout=None,
lock_class=None, thread_local=True):
"""
Return a new Lock object using key ``name`` that mimics
the behavior of threading.Lock.
If specified, ``timeout`` indicates a maximum life for the lock.
By default, it will remain locked until release() is called.
``sleep`` indicates the amount of time to sleep per loop iteration
when the lock is in blocking mode and another client is currently
holding the lock.
``blocking_timeout`` indicates the maximum amount of time in seconds to
spend trying to acquire the lock. A value of ``None`` indicates
continue trying forever. ``blocking_timeout`` can be specified as a
float or integer, both representing the number of seconds to wait.
``lock_class`` forces the specified lock implementation.
``thread_local`` indicates whether the lock token is placed in
thread-local storage. By default, the token is placed in thread local
storage so that a thread only sees its token, not a token set by
another thread. Consider the following timeline:
time: 0, thread-1 acquires `my-lock`, with a timeout of 5 seconds.
thread-1 sets the token to "abc"
time: 1, thread-2 blocks trying to acquire `my-lock` using the
Lock instance.
time: 5, thread-1 has not yet completed. redis expires the lock
key.
time: 5, thread-2 acquired `my-lock` now that it's available.
thread-2 sets the token to "xyz"
time: 6, thread-1 finishes its work and calls release(). if the
token is *not* stored in thread local storage, then
thread-1 would see the token value as "xyz" and would be
able to successfully release the thread-2's lock.
In some use cases it's necessary to disable thread local storage. For
example, if you have code where one thread acquires a lock and passes
that lock instance to a worker thread to release later. If thread
local storage isn't disabled in this case, the worker thread won't see
the token set by the thread that acquired the lock. Our assumption
is that these cases aren't common and as such default to using
thread local storage. """
if lock_class is None:
if self._use_lua_lock is None:
# the first time .lock() is called, determine if we can use
# Lua by attempting to register the necessary scripts
try:
LuaLock.register_scripts(self)
self._use_lua_lock = True
except ResponseError:
self._use_lua_lock = False
lock_class = self._use_lua_lock and LuaLock or Lock
return lock_class(self, name, timeout=timeout, sleep=sleep,
blocking_timeout=blocking_timeout,
thread_local=thread_local)
def pubsub(self, **kwargs):
"""
Return a Publish/Subscribe object. With this object, you can
subscribe to channels and listen for messages that get published to
them.
"""
return PubSub(self.connection_pool, **kwargs)
# COMMAND EXECUTION AND PROTOCOL PARSING
def execute_command(self, *args, **options):
"Execute a command and return a parsed response"
pool = self.connection_pool
command_name = args[0]
connection = pool.get_connection(command_name, **options)
try:
connection.send_command(*args)
return self.parse_response(connection, command_name, **options)
except (ConnectionError, TimeoutError) as e:
connection.disconnect()
if not connection.retry_on_timeout and isinstance(e, TimeoutError):
raise
connection.send_command(*args)
return self.parse_response(connection, command_name, **options)
finally:
pool.release(connection)
def parse_response(self, connection, command_name, **options):
"Parses a response from the Redis server"
response = connection.read_response()
if command_name in self.response_callbacks:
return self.response_callbacks[command_name](response, **options)
return response
# SERVER INFORMATION
def bgrewriteaof(self):
"Tell the Redis server to rewrite the AOF file from data in memory."
return self.execute_command('BGREWRITEAOF')
def bgsave(self):
"""
Tell the Redis server to save its data to disk. Unlike save(),
this method is asynchronous and returns immediately.
"""
return self.execute_command('BGSAVE')
def client_kill(self, address):
"Disconnects the client at ``address`` (ip:port)"
return self.execute_command('CLIENT KILL', address)
def client_list(self):
"Returns a list of currently connected clients"
return self.execute_command('CLIENT LIST')
def client_getname(self):
"Returns the current connection name"
return self.execute_command('CLIENT GETNAME')
def client_setname(self, name):
"Sets the current connection name"
return self.execute_command('CLIENT SETNAME', name)
def config_get(self, pattern="*"):
"Return a dictionary of configuration based on the ``pattern``"
return self.execute_command('CONFIG GET', pattern)
def config_set(self, name, value):
"Set config item ``name`` with ``value``"
return self.execute_command('CONFIG SET', name, value)
def config_resetstat(self):
"Reset runtime statistics"
return self.execute_command('CONFIG RESETSTAT')
def config_rewrite(self):
"Rewrite config file with the minimal change to reflect running config"
return self.execute_command('CONFIG REWRITE')
def dbsize(self):
"Returns the number of keys in the current database"
return self.execute_command('DBSIZE')
def debug_object(self, key):
"Returns version specific meta information about a given key"
return self.execute_command('DEBUG OBJECT', key)
def echo(self, value):
"Echo the string back from the server"
return self.execute_command('ECHO', value)
def flushall(self):
"Delete all keys in all databases on the current host"
return self.execute_command('FLUSHALL')
def flushdb(self):
"Delete all keys in the current database"
return self.execute_command('FLUSHDB')
def info(self, section=None):
"""
Returns a dictionary containing information about the Redis server
The ``section`` option can be used to select a specific section
of information
The section option is not supported by older versions of Redis Server,
and will generate ResponseError
"""
if section is None:
return self.execute_command('INFO')
else:
return self.execute_command('INFO', section)
def lastsave(self):
"""
Return a Python datetime object representing the last time the
Redis database was saved to disk
"""
return self.execute_command('LASTSAVE')
def object(self, infotype, key):
"Return the encoding, idletime, or refcount about the key"
return self.execute_command('OBJECT', infotype, key, infotype=infotype)
def ping(self):
"Ping the Redis server"
return self.execute_command('PING')
def save(self):
"""
Tell the Redis server to save its data to disk,
blocking until the save is complete
"""
return self.execute_command('SAVE')
def sentinel(self, *args):
"Redis Sentinel's SENTINEL command."
warnings.warn(
DeprecationWarning('Use the individual sentinel_* methods'))
def sentinel_get_master_addr_by_name(self, service_name):
"Returns a (host, port) pair for the given ``service_name``"
return self.execute_command('SENTINEL GET-MASTER-ADDR-BY-NAME',
service_name)
def sentinel_master(self, service_name):
"Returns a dictionary containing the specified masters state."
return self.execute_command('SENTINEL MASTER', service_name)
def sentinel_masters(self):
"Returns a list of dictionaries containing each master's state."
return self.execute_command('SENTINEL MASTERS')
def sentinel_monitor(self, name, ip, port, quorum):
"Add a new master to Sentinel to be monitored"
return self.execute_command('SENTINEL MONITOR', name, ip, port, quorum)
def sentinel_remove(self, name):
"Remove a master from Sentinel's monitoring"
return self.execute_command('SENTINEL REMOVE', name)
def sentinel_sentinels(self, service_name):
"Returns a list of sentinels for ``service_name``"
return self.execute_command('SENTINEL SENTINELS', service_name)
def sentinel_set(self, name, option, value):
"Set Sentinel monitoring parameters for a given master"
return self.execute_command('SENTINEL SET', name, option, value)
def sentinel_slaves(self, service_name):
"Returns a list of slaves for ``service_name``"
return self.execute_command('SENTINEL SLAVES', service_name)
def shutdown(self):
"Shutdown the server"
try:
self.execute_command('SHUTDOWN')
except ConnectionError:
# a ConnectionError here is expected
return
raise RedisError("SHUTDOWN seems to have failed.")
def slaveof(self, host=None, port=None):
"""
Set the server to be a replicated slave of the instance identified
by the ``host`` and ``port``. If called without arguments, the
instance is promoted to a master instead.
"""
if host is None and port is None:
return self.execute_command('SLAVEOF', Token('NO'), Token('ONE'))
return self.execute_command('SLAVEOF', host, port)
def slowlog_get(self, num=None):
"""
Get the entries from the slowlog. If ``num`` is specified, get the
most recent ``num`` items.
"""
args = ['SLOWLOG GET']
if num is not None:
args.append(num)
return self.execute_command(*args)
def slowlog_len(self):
"Get the number of items in the slowlog"
return self.execute_command('SLOWLOG LEN')
def slowlog_reset(self):
"Remove all items in the slowlog"
return self.execute_command('SLOWLOG RESET')
def time(self):
"""
Returns the server time as a 2-item tuple of ints:
(seconds since epoch, microseconds into this second).
"""
return self.execute_command('TIME')
def wait(self, num_replicas, timeout):
"""
Redis synchronous replication
That returns the number of replicas that processed the query when
we finally have at least ``num_replicas``, or when the ``timeout`` was
reached.
"""
return self.execute_command('WAIT', num_replicas, timeout)
# BASIC KEY COMMANDS
def append(self, key, value):
"""
Appends the string ``value`` to the value at ``key``. If ``key``
doesn't already exist, create it with a value of ``value``.
Returns the new length of the value at ``key``.
"""
return self.execute_command('APPEND', key, value)
def bitcount(self, key, start=None, end=None):
"""
Returns the count of set bits in the value of ``key``. Optional
``start`` and ``end`` paramaters indicate which bytes to consider
"""
params = [key]
if start is not None and end is not None:
params.append(start)
params.append(end)
elif (start is not None and end is None) or \
(end is not None and start is None):
raise RedisError("Both start and end must be specified")
return self.execute_command('BITCOUNT', *params)
def bitop(self, operation, dest, *keys):
"""
Perform a bitwise operation using ``operation`` between ``keys`` and
store the result in ``dest``.
"""
return self.execute_command('BITOP', operation, dest, *keys)
def bitpos(self, key, bit, start=None, end=None):
"""
Return the position of the first bit set to 1 or 0 in a string.
``start`` and ``end`` difines search range. The range is interpreted
as a range of bytes and not a range of bits, so start=0 and end=2
means to look at the first three bytes.
"""
if bit not in (0, 1):
raise RedisError('bit must be 0 or 1')
params = [key, bit]
start is not None and params.append(start)
if start is not None and end is not None:
params.append(end)
elif start is None and end is not None:
raise RedisError("start argument is not set, "
"when end is specified")
return self.execute_command('BITPOS', *params)
def decr(self, name, amount=1):
"""
Decrements the value of ``key`` by ``amount``. If no key exists,
the value will be initialized as 0 - ``amount``
"""
return self.execute_command('DECRBY', name, amount)
def delete(self, *names):
"Delete one or more keys specified by ``names``"
return self.execute_command('DEL', *names)
def __delitem__(self, name):
self.delete(name)
def dump(self, name):
"""
Return a serialized version of the value stored at the specified key.
If key does not exist a nil bulk reply is returned.
"""
return self.execute_command('DUMP', name)
def exists(self, name):
"Returns a boolean indicating whether key ``name`` exists"
return self.execute_command('EXISTS', name)
__contains__ = exists
def expire(self, name, time):
"""
Set an expire flag on key ``name`` for ``time`` seconds. ``time``
can be represented by an integer or a Python timedelta object.
"""
if isinstance(time, datetime.timedelta):
time = time.seconds + time.days * 24 * 3600
return self.execute_command('EXPIRE', name, time)
def expireat(self, name, when):
"""
Set an expire flag on key ``name``. ``when`` can be represented
as an integer indicating unix time or a Python datetime object.
"""
if isinstance(when, datetime.datetime):
when = int(mod_time.mktime(when.timetuple()))
return self.execute_command('EXPIREAT', name, when)
def get(self, name):
"""
Return the value at key ``name``, or None if the key doesn't exist
"""
return self.execute_command('GET', name)
def __getitem__(self, name):
"""
Return the value at key ``name``, raises a KeyError if the key
doesn't exist.
"""
value = self.get(name)
if value:
return value
raise KeyError(name)
def getbit(self, name, offset):
"Returns a boolean indicating the value of ``offset`` in ``name``"
return self.execute_command('GETBIT', name, offset)
def getrange(self, key, start, end):
"""
Returns the substring of the string value stored at ``key``,
determined by the offsets ``start`` and ``end`` (both are inclusive)
"""
return self.execute_command('GETRANGE', key, start, end)
def getset(self, name, value):
"""
Sets the value at key ``name`` to ``value``
and returns the old value at key ``name`` atomically.
"""
return self.execute_command('GETSET', name, value)
def incr(self, name, amount=1):
"""
Increments the value of ``key`` by ``amount``. If no key exists,
the value will be initialized as ``amount``
"""
return self.execute_command('INCRBY', name, amount)
def incrby(self, name, amount=1):
"""
Increments the value of ``key`` by ``amount``. If no key exists,
the value will be initialized as ``amount``
"""
# An alias for ``incr()``, because it is already implemented
# as INCRBY redis command.
return self.incr(name, amount)
def incrbyfloat(self, name, amount=1.0):
"""
Increments the value at key ``name`` by floating ``amount``.
If no key exists, the value will be initialized as ``amount``
"""
return self.execute_command('INCRBYFLOAT', name, amount)
def keys(self, pattern='*'):
"Returns a list of keys matching ``pattern``"
return self.execute_command('KEYS', pattern)
def mget(self, keys, *args):
"""
Returns a list of values ordered identically to ``keys``
"""
args = list_or_args(keys, args)
return self.execute_command('MGET', *args)
def mset(self, *args, **kwargs):
"""
Sets key/values based on a mapping. Mapping can be supplied as a single
dictionary argument or as kwargs.
"""
if args:
if len(args) != 1 or not isinstance(args[0], dict):
raise RedisError('MSET requires **kwargs or a single dict arg')
kwargs.update(args[0])
items = []
for pair in iteritems(kwargs):
items.extend(pair)
return self.execute_command('MSET', *items)
def msetnx(self, *args, **kwargs):
"""
Sets key/values based on a mapping if none of the keys are already set.
Mapping can be supplied as a single dictionary argument or as kwargs.
Returns a boolean indicating if the operation was successful.
"""
if args:
if len(args) != 1 or not isinstance(args[0], dict):
raise RedisError('MSETNX requires **kwargs or a single '
'dict arg')
kwargs.update(args[0])
items = []
for pair in iteritems(kwargs):
items.extend(pair)
return self.execute_command('MSETNX', *items)
def move(self, name, db):
"Moves the key ``name`` to a different Redis database ``db``"
return self.execute_command('MOVE', name, db)
def persist(self, name):
"Removes an expiration on ``name``"
return self.execute_command('PERSIST', name)
def pexpire(self, name, time):
"""
Set an expire flag on key ``name`` for ``time`` milliseconds.
``time`` can be represented by an integer or a Python timedelta
object.
"""
if isinstance(time, datetime.timedelta):
ms = int(time.microseconds / 1000)
time = (time.seconds + time.days * 24 * 3600) * 1000 + ms
return self.execute_command('PEXPIRE', name, time)
def pexpireat(self, name, when):
"""
Set an expire flag on key ``name``. ``when`` can be represented
as an integer representing unix time in milliseconds (unix time * 1000)
or a Python datetime object.
"""
if isinstance(when, datetime.datetime):
ms = int(when.microsecond / 1000)
when = int(mod_time.mktime(when.timetuple())) * 1000 + ms
return self.execute_command('PEXPIREAT', name, when)
def psetex(self, name, time_ms, value):
"""
Set the value of key ``name`` to ``value`` that expires in ``time_ms``
milliseconds. ``time_ms`` can be represented by an integer or a Python
timedelta object
"""
if isinstance(time_ms, datetime.timedelta):
ms = int(time_ms.microseconds / 1000)
time_ms = (time_ms.seconds + time_ms.days * 24 * 3600) * 1000 + ms
return self.execute_command('PSETEX', name, time_ms, value)
def pttl(self, name):
"Returns the number of milliseconds until the key ``name`` will expire"
return self.execute_command('PTTL', name)
def randomkey(self):
"Returns the name of a random key"
return self.execute_command('RANDOMKEY')
def rename(self, src, dst):
"""
Rename key ``src`` to ``dst``
"""
return self.execute_command('RENAME', src, dst)
def renamenx(self, src, dst):
"Rename key ``src`` to ``dst`` if ``dst`` doesn't already exist"
return self.execute_command('RENAMENX', src, dst)
def restore(self, name, ttl, value):
"""
Create a key using the provided serialized value, previously obtained
using DUMP.
"""
return self.execute_command('RESTORE', name, ttl, value)
def set(self, name, value, ex=None, px=None, nx=False, xx=False):
"""
Set the value at key ``name`` to ``value``
``ex`` sets an expire flag on key ``name`` for ``ex`` seconds.
``px`` sets an expire flag on key ``name`` for ``px`` milliseconds.
``nx`` if set to True, set the value at key ``name`` to ``value`` if it
does not already exist.
``xx`` if set to True, set the value at key ``name`` to ``value`` if it
already exists.
"""
pieces = [name, value]
if ex:
pieces.append('EX')
if isinstance(ex, datetime.timedelta):
ex = ex.seconds + ex.days * 24 * 3600
pieces.append(ex)
if px:
pieces.append('PX')
if isinstance(px, datetime.timedelta):
ms = int(px.microseconds / 1000)
px = (px.seconds + px.days * 24 * 3600) * 1000 + ms
pieces.append(px)
if nx:
pieces.append('NX')
if xx:
pieces.append('XX')
return self.execute_command('SET', *pieces)
def __setitem__(self, name, value):
self.set(name, value)
def setbit(self, name, offset, value):
"""
Flag the ``offset`` in ``name`` as ``value``. Returns a boolean
indicating the previous value of ``offset``.
"""
value = value and 1 or 0
return self.execute_command('SETBIT', name, offset, value)
def setex(self, name, time, value):
"""
Set the value of key ``name`` to ``value`` that expires in ``time``
seconds. ``time`` can be represented by an integer or a Python
timedelta object.
"""
if isinstance(time, datetime.timedelta):
time = time.seconds + time.days * 24 * 3600
return self.execute_command('SETEX', name, time, value)
def setnx(self, name, value):
"Set the value of key ``name`` to ``value`` if key doesn't exist"
return self.execute_command('SETNX', name, value)
def setrange(self, name, offset, value):
"""
Overwrite bytes in the value of ``name`` starting at ``offset`` with
``value``. If ``offset`` plus the length of ``value`` exceeds the
length of the original value, the new value will be larger than before.
If ``offset`` exceeds the length of the original value, null bytes
will be used to pad between the end of the previous value and the start
of what's being injected.
Returns the length of the new string.
"""
return self.execute_command('SETRANGE', name, offset, value)
def strlen(self, name):
"Return the number of bytes stored in the value of ``name``"
return self.execute_command('STRLEN', name)
def substr(self, name, start, end=-1):
"""
Return a substring of the string at key ``name``. ``start`` and ``end``
are 0-based integers specifying the portion of the string to return.
"""
return self.execute_command('SUBSTR', name, start, end)
def ttl(self, name):
"Returns the number of seconds until the key ``name`` will expire"
return self.execute_command('TTL', name)
def type(self, name):
"Returns the type of key ``name``"
return self.execute_command('TYPE', name)
def watch(self, *names):
"""
Watches the values at keys ``names``, or None if the key doesn't exist
"""
warnings.warn(DeprecationWarning('Call WATCH from a Pipeline object'))
def unwatch(self):
"""
Unwatches the value at key ``name``, or None of the key doesn't exist
"""
warnings.warn(
DeprecationWarning('Call UNWATCH from a Pipeline object'))
# LIST COMMANDS
def blpop(self, keys, timeout=0):
"""
LPOP a value off of the first non-empty list
named in the ``keys`` list.
If none of the lists in ``keys`` has a value to LPOP, then block
for ``timeout`` seconds, or until a value gets pushed on to one
of the lists.
If timeout is 0, then block indefinitely.
"""
if timeout is None:
timeout = 0
if isinstance(keys, basestring):
keys = [keys]
else:
keys = list(keys)
keys.append(timeout)
return self.execute_command('BLPOP', *keys)
def brpop(self, keys, timeout=0):
"""
RPOP a value off of the first non-empty list
named in the ``keys`` list.
If none of the lists in ``keys`` has a value to LPOP, then block
for ``timeout`` seconds, or until a value gets pushed on to one
of the lists.
If timeout is 0, then block indefinitely.
"""
if timeout is None:
timeout = 0
if isinstance(keys, basestring):
keys = [keys]
else:
keys = list(keys)
keys.append(timeout)
return self.execute_command('BRPOP', *keys)
def brpoplpush(self, src, dst, timeout=0):
"""
Pop a value off the tail of ``src``, push it on the head of ``dst``
and then return it.
This command blocks until a value is in ``src`` or until ``timeout``
seconds elapse, whichever is first. A ``timeout`` value of 0 blocks
forever.
"""
if timeout is None:
timeout = 0
return self.execute_command('BRPOPLPUSH', src, dst, timeout)
def lindex(self, name, index):
"""
Return the item from list ``name`` at position ``index``
Negative indexes are supported and will return an item at the
end of the list
"""
return self.execute_command('LINDEX', name, index)
def linsert(self, name, where, refvalue, value):
"""
Insert ``value`` in list ``name`` either immediately before or after
[``where``] ``refvalue``
Returns the new length of the list on success or -1 if ``refvalue``
is not in the list.
"""
return self.execute_command('LINSERT', name, where, refvalue, value)
def llen(self, name):
"Return the length of the list ``name``"
return self.execute_command('LLEN', name)
def lpop(self, name):
"Remove and return the first item of the list ``name``"
return self.execute_command('LPOP', name)
def lpush(self, name, *values):
"Push ``values`` onto the head of the list ``name``"
return self.execute_command('LPUSH', name, *values)
def lpushx(self, name, value):
"Push ``value`` onto the head of the list ``name`` if ``name`` exists"
return self.execute_command('LPUSHX', name, value)
def lrange(self, name, start, end):
"""
Return a slice of the list ``name`` between
position ``start`` and ``end``
``start`` and ``end`` can be negative numbers just like
Python slicing notation
"""
return self.execute_command('LRANGE', name, start, end)
def lrem(self, name, count, value):
"""
Remove the first ``count`` occurrences of elements equal to ``value``
from the list stored at ``name``.
The count argument influences the operation in the following ways:
count > 0: Remove elements equal to value moving from head to tail.
count < 0: Remove elements equal to value moving from tail to head.
count = 0: Remove all elements equal to value.
"""
return self.execute_command('LREM', name, count, value)
def lset(self, name, index, value):
"Set ``position`` of list ``name`` to ``value``"
return self.execute_command('LSET', name, index, value)
def ltrim(self, name, start, end):
"""
Trim the list ``name``, removing all values not within the slice
between ``start`` and ``end``
``start`` and ``end`` can be negative numbers just like
Python slicing notation
"""
return self.execute_command('LTRIM', name, start, end)
def rpop(self, name):
"Remove and return the last item of the list ``name``"
return self.execute_command('RPOP', name)
def rpoplpush(self, src, dst):
"""
RPOP a value off of the ``src`` list and atomically LPUSH it
on to the ``dst`` list. Returns the value.
"""
return self.execute_command('RPOPLPUSH', src, dst)
def rpush(self, name, *values):
"Push ``values`` onto the tail of the list ``name``"
return self.execute_command('RPUSH', name, *values)
def rpushx(self, name, value):
"Push ``value`` onto the tail of the list ``name`` if ``name`` exists"
return self.execute_command('RPUSHX', name, value)
def sort(self, name, start=None, num=None, by=None, get=None,
desc=False, alpha=False, store=None, groups=False):
"""
Sort and return the list, set or sorted set at ``name``.
``start`` and ``num`` allow for paging through the sorted data
``by`` allows using an external key to weight and sort the items.
Use an "*" to indicate where in the key the item value is located
``get`` allows for returning items from external keys rather than the
sorted data itself. Use an "*" to indicate where int he key
the item value is located
``desc`` allows for reversing the sort
``alpha`` allows for sorting lexicographically rather than numerically
``store`` allows for storing the result of the sort into
the key ``store``
``groups`` if set to True and if ``get`` contains at least two
elements, sort will return a list of tuples, each containing the
values fetched from the arguments to ``get``.
"""
if (start is not None and num is None) or \
(num is not None and start is None):
raise RedisError("``start`` and ``num`` must both be specified")
pieces = [name]
if by is not None:
pieces.append(Token('BY'))
pieces.append(by)
if start is not None and num is not None:
pieces.append(Token('LIMIT'))
pieces.append(start)
pieces.append(num)
if get is not None:
# If get is a string assume we want to get a single value.
# Otherwise assume it's an interable and we want to get multiple
# values. We can't just iterate blindly because strings are
# iterable.
if isinstance(get, basestring):
pieces.append(Token('GET'))
pieces.append(get)
else:
for g in get:
pieces.append(Token('GET'))
pieces.append(g)
if desc:
pieces.append(Token('DESC'))
if alpha:
pieces.append(Token('ALPHA'))
if store is not None:
pieces.append(Token('STORE'))
pieces.append(store)
if groups:
if not get or isinstance(get, basestring) or len(get) < 2:
raise DataError('when using "groups" the "get" argument '
'must be specified and contain at least '
'two keys')
options = {'groups': len(get) if groups else None}
return self.execute_command('SORT', *pieces, **options)
# SCAN COMMANDS
def scan(self, cursor=0, match=None, count=None):
"""
Incrementally return lists of key names. Also return a cursor
indicating the scan position.
``match`` allows for filtering the keys by pattern
``count`` allows for hint the minimum number of returns
"""
pieces = [cursor]
if match is not None:
pieces.extend([Token('MATCH'), match])
if count is not None:
pieces.extend([Token('COUNT'), count])
return self.execute_command('SCAN', *pieces)
def scan_iter(self, match=None, count=None):
"""
Make an iterator using the SCAN command so that the client doesn't
need to remember the cursor position.
``match`` allows for filtering the keys by pattern
``count`` allows for hint the minimum number of returns
"""
cursor = '0'
while cursor != 0:
cursor, data = self.scan(cursor=cursor, match=match, count=count)
for item in data:
yield item
def sscan(self, name, cursor=0, match=None, count=None):
"""
Incrementally return lists of elements in a set. Also return a cursor
indicating the scan position.
``match`` allows for filtering the keys by pattern
``count`` allows for hint the minimum number of returns
"""
pieces = [name, cursor]
if match is not None:
pieces.extend([Token('MATCH'), match])
if count is not None:
pieces.extend([Token('COUNT'), count])
return self.execute_command('SSCAN', *pieces)
def sscan_iter(self, name, match=None, count=None):
"""
Make an iterator using the SSCAN command so that the client doesn't
need to remember the cursor position.
``match`` allows for filtering the keys by pattern
``count`` allows for hint the minimum number of returns
"""
cursor = '0'
while cursor != 0:
cursor, data = self.sscan(name, cursor=cursor,
match=match, count=count)
for item in data:
yield item
def hscan(self, name, cursor=0, match=None, count=None):
"""
Incrementally return key/value slices in a hash. Also return a cursor
indicating the scan position.
``match`` allows for filtering the keys by pattern
``count`` allows for hint the minimum number of returns
"""
pieces = [name, cursor]
if match is not None:
pieces.extend([Token('MATCH'), match])
if count is not None:
pieces.extend([Token('COUNT'), count])
return self.execute_command('HSCAN', *pieces)
def hscan_iter(self, name, match=None, count=None):
"""
Make an iterator using the HSCAN command so that the client doesn't
need to remember the cursor position.
``match`` allows for filtering the keys by pattern
``count`` allows for hint the minimum number of returns
"""
cursor = '0'
while cursor != 0:
cursor, data = self.hscan(name, cursor=cursor,
match=match, count=count)
for item in data.items():
yield item
def zscan(self, name, cursor=0, match=None, count=None,
score_cast_func=float):
"""
Incrementally return lists of elements in a sorted set. Also return a
cursor indicating the scan position.
``match`` allows for filtering the keys by pattern
``count`` allows for hint the minimum number of returns
``score_cast_func`` a callable used to cast the score return value
"""
pieces = [name, cursor]
if match is not None:
pieces.extend([Token('MATCH'), match])
if count is not None:
pieces.extend([Token('COUNT'), count])
options = {'score_cast_func': score_cast_func}
return self.execute_command('ZSCAN', *pieces, **options)
def zscan_iter(self, name, match=None, count=None,
score_cast_func=float):
"""
Make an iterator using the ZSCAN command so that the client doesn't
need to remember the cursor position.
``match`` allows for filtering the keys by pattern
``count`` allows for hint the minimum number of returns
``score_cast_func`` a callable used to cast the score return value
"""
cursor = '0'
while cursor != 0:
cursor, data = self.zscan(name, cursor=cursor, match=match,
count=count,
score_cast_func=score_cast_func)
for item in data:
yield item
# SET COMMANDS
def sadd(self, name, *values):
"Add ``value(s)`` to set ``name``"
return self.execute_command('SADD', name, *values)
def scard(self, name):
"Return the number of elements in set ``name``"
return self.execute_command('SCARD', name)
def sdiff(self, keys, *args):
"Return the difference of sets specified by ``keys``"
args = list_or_args(keys, args)
return self.execute_command('SDIFF', *args)
def sdiffstore(self, dest, keys, *args):
"""
Store the difference of sets specified by ``keys`` into a new
set named ``dest``. Returns the number of keys in the new set.
"""
args = list_or_args(keys, args)
return self.execute_command('SDIFFSTORE', dest, *args)
def sinter(self, keys, *args):
"Return the intersection of sets specified by ``keys``"
args = list_or_args(keys, args)
return self.execute_command('SINTER', *args)
def sinterstore(self, dest, keys, *args):
"""
Store the intersection of sets specified by ``keys`` into a new
set named ``dest``. Returns the number of keys in the new set.
"""
args = list_or_args(keys, args)
return self.execute_command('SINTERSTORE', dest, *args)
def sismember(self, name, value):
"Return a boolean indicating if ``value`` is a member of set ``name``"
return self.execute_command('SISMEMBER', name, value)
def smembers(self, name):
"Return all members of the set ``name``"
return self.execute_command('SMEMBERS', name)
def smove(self, src, dst, value):
"Move ``value`` from set ``src`` to set ``dst`` atomically"
return self.execute_command('SMOVE', src, dst, value)
def spop(self, name):
"Remove and return a random member of set ``name``"
return self.execute_command('SPOP', name)
def srandmember(self, name, number=None):
"""
If ``number`` is None, returns a random member of set ``name``.
If ``number`` is supplied, returns a list of ``number`` random
memebers of set ``name``. Note this is only available when running
Redis 2.6+.
"""
args = number and [number] or []
return self.execute_command('SRANDMEMBER', name, *args)
def srem(self, name, *values):
"Remove ``values`` from set ``name``"
return self.execute_command('SREM', name, *values)
def sunion(self, keys, *args):
"Return the union of sets specified by ``keys``"
args = list_or_args(keys, args)
return self.execute_command('SUNION', *args)
def sunionstore(self, dest, keys, *args):
"""
Store the union of sets specified by ``keys`` into a new
set named ``dest``. Returns the number of keys in the new set.
"""
args = list_or_args(keys, args)
return self.execute_command('SUNIONSTORE', dest, *args)
# SORTED SET COMMANDS
def zadd(self, name, *args, **kwargs):
"""
Set any number of score, element-name pairs to the key ``name``. Pairs
can be specified in two ways:
As *args, in the form of: score1, name1, score2, name2, ...
or as **kwargs, in the form of: name1=score1, name2=score2, ...
The following example would add four values to the 'my-key' key:
redis.zadd('my-key', 1.1, 'name1', 2.2, 'name2', name3=3.3, name4=4.4)
"""
pieces = []
if args:
if len(args) % 2 != 0:
raise RedisError("ZADD requires an equal number of "
"values and scores")
pieces.extend(args)
for pair in iteritems(kwargs):
pieces.append(pair[1])
pieces.append(pair[0])
return self.execute_command('ZADD', name, *pieces)
def zcard(self, name):
"Return the number of elements in the sorted set ``name``"
return self.execute_command('ZCARD', name)
def zcount(self, name, min, max):
"""
Returns the number of elements in the sorted set at key ``name`` with
a score between ``min`` and ``max``.
"""
return self.execute_command('ZCOUNT', name, min, max)
def zincrby(self, name, value, amount=1):
"Increment the score of ``value`` in sorted set ``name`` by ``amount``"
return self.execute_command('ZINCRBY', name, amount, value)
def zinterstore(self, dest, keys, aggregate=None):
"""
Intersect multiple sorted sets specified by ``keys`` into
a new sorted set, ``dest``. Scores in the destination will be
aggregated based on the ``aggregate``, or SUM if none is provided.
"""
return self._zaggregate('ZINTERSTORE', dest, keys, aggregate)
def zlexcount(self, name, min, max):
"""
Return the number of items in the sorted set ``name`` between the
lexicographical range ``min`` and ``max``.
"""
return self.execute_command('ZLEXCOUNT', name, min, max)
def zrange(self, name, start, end, desc=False, withscores=False,
score_cast_func=float):
"""
Return a range of values from sorted set ``name`` between
``start`` and ``end`` sorted in ascending order.
``start`` and ``end`` can be negative, indicating the end of the range.
``desc`` a boolean indicating whether to sort the results descendingly
``withscores`` indicates to return the scores along with the values.
The return type is a list of (value, score) pairs
``score_cast_func`` a callable used to cast the score return value
"""
if desc:
return self.zrevrange(name, start, end, withscores,
score_cast_func)
pieces = ['ZRANGE', name, start, end]
if withscores:
pieces.append(Token('WITHSCORES'))
options = {
'withscores': withscores,
'score_cast_func': score_cast_func
}
return self.execute_command(*pieces, **options)
def zrangebylex(self, name, min, max, start=None, num=None):
"""
Return the lexicographical range of values from sorted set ``name``
between ``min`` and ``max``.
If ``start`` and ``num`` are specified, then return a slice of the
range.
"""
if (start is not None and num is None) or \
(num is not None and start is None):
raise RedisError("``start`` and ``num`` must both be specified")
pieces = ['ZRANGEBYLEX', name, min, max]
if start is not None and num is not None:
pieces.extend([Token('LIMIT'), start, num])
return self.execute_command(*pieces)
def zrangebyscore(self, name, min, max, start=None, num=None,
withscores=False, score_cast_func=float):
"""
Return a range of values from the sorted set ``name`` with scores
between ``min`` and ``max``.
If ``start`` and ``num`` are specified, then return a slice
of the range.
``withscores`` indicates to return the scores along with the values.
The return type is a list of (value, score) pairs
`score_cast_func`` a callable used to cast the score return value
"""
if (start is not None and num is None) or \
(num is not None and start is None):
raise RedisError("``start`` and ``num`` must both be specified")
pieces = ['ZRANGEBYSCORE', name, min, max]
if start is not None and num is not None:
pieces.extend([Token('LIMIT'), start, num])
if withscores:
pieces.append(Token('WITHSCORES'))
options = {
'withscores': withscores,
'score_cast_func': score_cast_func
}
return self.execute_command(*pieces, **options)
def zrank(self, name, value):
"""
Returns a 0-based value indicating the rank of ``value`` in sorted set
``name``
"""
return self.execute_command('ZRANK', name, value)
def zrem(self, name, *values):
"Remove member ``values`` from sorted set ``name``"
return self.execute_command('ZREM', name, *values)
def zremrangebylex(self, name, min, max):
"""
Remove all elements in the sorted set ``name`` between the
lexicographical range specified by ``min`` and ``max``.
Returns the number of elements removed.
"""
return self.execute_command('ZREMRANGEBYLEX', name, min, max)
def zremrangebyrank(self, name, min, max):
"""
Remove all elements in the sorted set ``name`` with ranks between
``min`` and ``max``. Values are 0-based, ordered from smallest score
to largest. Values can be negative indicating the highest scores.
Returns the number of elements removed
"""
return self.execute_command('ZREMRANGEBYRANK', name, min, max)
def zremrangebyscore(self, name, min, max):
"""
Remove all elements in the sorted set ``name`` with scores
between ``min`` and ``max``. Returns the number of elements removed.
"""
return self.execute_command('ZREMRANGEBYSCORE', name, min, max)
def zrevrange(self, name, start, end, withscores=False,
score_cast_func=float):
"""
Return a range of values from sorted set ``name`` between
``start`` and ``end`` sorted in descending order.
``start`` and ``end`` can be negative, indicating the end of the range.
``withscores`` indicates to return the scores along with the values
The return type is a list of (value, score) pairs
``score_cast_func`` a callable used to cast the score return value
"""
pieces = ['ZREVRANGE', name, start, end]
if withscores:
pieces.append(Token('WITHSCORES'))
options = {
'withscores': withscores,
'score_cast_func': score_cast_func
}
return self.execute_command(*pieces, **options)
def zrevrangebyscore(self, name, max, min, start=None, num=None,
withscores=False, score_cast_func=float):
"""
Return a range of values from the sorted set ``name`` with scores
between ``min`` and ``max`` in descending order.
If ``start`` and ``num`` are specified, then return a slice
of the range.
``withscores`` indicates to return the scores along with the values.
The return type is a list of (value, score) pairs
``score_cast_func`` a callable used to cast the score return value
"""
if (start is not None and num is None) or \
(num is not None and start is None):
raise RedisError("``start`` and ``num`` must both be specified")
pieces = ['ZREVRANGEBYSCORE', name, max, min]
if start is not None and num is not None:
pieces.extend([Token('LIMIT'), start, num])
if withscores:
pieces.append(Token('WITHSCORES'))
options = {
'withscores': withscores,
'score_cast_func': score_cast_func
}
return self.execute_command(*pieces, **options)
def zrevrank(self, name, value):
"""
Returns a 0-based value indicating the descending rank of
``value`` in sorted set ``name``
"""
return self.execute_command('ZREVRANK', name, value)
def zscore(self, name, value):
"Return the score of element ``value`` in sorted set ``name``"
return self.execute_command('ZSCORE', name, value)
def zunionstore(self, dest, keys, aggregate=None):
"""
Union multiple sorted sets specified by ``keys`` into
a new sorted set, ``dest``. Scores in the destination will be
aggregated based on the ``aggregate``, or SUM if none is provided.
"""
return self._zaggregate('ZUNIONSTORE', dest, keys, aggregate)
def _zaggregate(self, command, dest, keys, aggregate=None):
pieces = [command, dest, len(keys)]
if isinstance(keys, dict):
keys, weights = iterkeys(keys), itervalues(keys)
else:
weights = None
pieces.extend(keys)
if weights:
pieces.append(Token('WEIGHTS'))
pieces.extend(weights)
if aggregate:
pieces.append(Token('AGGREGATE'))
pieces.append(aggregate)
return self.execute_command(*pieces)
# HYPERLOGLOG COMMANDS
def pfadd(self, name, *values):
"Adds the specified elements to the specified HyperLogLog."
return self.execute_command('PFADD', name, *values)
def pfcount(self, *sources):
"""
Return the approximated cardinality of
the set observed by the HyperLogLog at key(s).
"""
return self.execute_command('PFCOUNT', *sources)
def pfmerge(self, dest, *sources):
"Merge N different HyperLogLogs into a single one."
return self.execute_command('PFMERGE', dest, *sources)
# HASH COMMANDS
def hdel(self, name, *keys):
"Delete ``keys`` from hash ``name``"
return self.execute_command('HDEL', name, *keys)
def hexists(self, name, key):
"Returns a boolean indicating if ``key`` exists within hash ``name``"
return self.execute_command('HEXISTS', name, key)
def hget(self, name, key):
"Return the value of ``key`` within the hash ``name``"
return self.execute_command('HGET', name, key)
def hgetall(self, name):
"Return a Python dict of the hash's name/value pairs"
return self.execute_command('HGETALL', name)
def hincrby(self, name, key, amount=1):
"Increment the value of ``key`` in hash ``name`` by ``amount``"
return self.execute_command('HINCRBY', name, key, amount)
def hincrbyfloat(self, name, key, amount=1.0):
"""
Increment the value of ``key`` in hash ``name`` by floating ``amount``
"""
return self.execute_command('HINCRBYFLOAT', name, key, amount)
def hkeys(self, name):
"Return the list of keys within hash ``name``"
return self.execute_command('HKEYS', name)
def hlen(self, name):
"Return the number of elements in hash ``name``"
return self.execute_command('HLEN', name)
def hset(self, name, key, value):
"""
Set ``key`` to ``value`` within hash ``name``
Returns 1 if HSET created a new field, otherwise 0
"""
return self.execute_command('HSET', name, key, value)
def hsetnx(self, name, key, value):
"""
Set ``key`` to ``value`` within hash ``name`` if ``key`` does not
exist. Returns 1 if HSETNX created a field, otherwise 0.
"""
return self.execute_command('HSETNX', name, key, value)
def hmset(self, name, mapping):
"""
Set key to value within hash ``name`` for each corresponding
key and value from the ``mapping`` dict.
"""
if not mapping:
raise DataError("'hmset' with 'mapping' of length 0")
items = []
for pair in iteritems(mapping):
items.extend(pair)
return self.execute_command('HMSET', name, *items)
def hmget(self, name, keys, *args):
"Returns a list of values ordered identically to ``keys``"
args = list_or_args(keys, args)
return self.execute_command('HMGET', name, *args)
def hvals(self, name):
"Return the list of values within hash ``name``"
return self.execute_command('HVALS', name)
def publish(self, channel, message):
"""
Publish ``message`` on ``channel``.
Returns the number of subscribers the message was delivered to.
"""
return self.execute_command('PUBLISH', channel, message)
def eval(self, script, numkeys, *keys_and_args):
"""
Execute the Lua ``script``, specifying the ``numkeys`` the script
will touch and the key names and argument values in ``keys_and_args``.
Returns the result of the script.
In practice, use the object returned by ``register_script``. This
function exists purely for Redis API completion.
"""
return self.execute_command('EVAL', script, numkeys, *keys_and_args)
def evalsha(self, sha, numkeys, *keys_and_args):
"""
Use the ``sha`` to execute a Lua script already registered via EVAL
or SCRIPT LOAD. Specify the ``numkeys`` the script will touch and the
key names and argument values in ``keys_and_args``. Returns the result
of the script.
In practice, use the object returned by ``register_script``. This
function exists purely for Redis API completion.
"""
return self.execute_command('EVALSHA', sha, numkeys, *keys_and_args)
def script_exists(self, *args):
"""
Check if a script exists in the script cache by specifying the SHAs of
each script as ``args``. Returns a list of boolean values indicating if
if each already script exists in the cache.
"""
return self.execute_command('SCRIPT EXISTS', *args)
def script_flush(self):
"Flush all scripts from the script cache"
return self.execute_command('SCRIPT FLUSH')
def script_kill(self):
"Kill the currently executing Lua script"
return self.execute_command('SCRIPT KILL')
def script_load(self, script):
"Load a Lua ``script`` into the script cache. Returns the SHA."
return self.execute_command('SCRIPT LOAD', script)
def register_script(self, script):
"""
Register a Lua ``script`` specifying the ``keys`` it will touch.
Returns a Script object that is callable and hides the complexity of
deal with scripts, keys, and shas. This is the preferred way to work
with Lua scripts.
"""
return Script(self, script)
class Redis(StrictRedis):
"""
Provides backwards compatibility with older versions of redis-py that
changed arguments to some commands to be more Pythonic, sane, or by
accident.
"""
# Overridden callbacks
RESPONSE_CALLBACKS = dict_merge(
StrictRedis.RESPONSE_CALLBACKS,
{
'TTL': lambda r: r >= 0 and r or None,
'PTTL': lambda r: r >= 0 and r or None,
}
)
def pipeline(self, transaction=True, shard_hint=None):
"""
Return a new pipeline object that can queue multiple commands for
later execution. ``transaction`` indicates whether all commands
should be executed atomically. Apart from making a group of operations
atomic, pipelines are useful for reducing the back-and-forth overhead
between the client and server.
"""
return Pipeline(
self.connection_pool,
self.response_callbacks,
transaction,
shard_hint)
def setex(self, name, value, time):
"""
Set the value of key ``name`` to ``value`` that expires in ``time``
seconds. ``time`` can be represented by an integer or a Python
timedelta object.
"""
if isinstance(time, datetime.timedelta):
time = time.seconds + time.days * 24 * 3600
return self.execute_command('SETEX', name, time, value)
def lrem(self, name, value, num=0):
"""
Remove the first ``num`` occurrences of elements equal to ``value``
from the list stored at ``name``.
The ``num`` argument influences the operation in the following ways:
num > 0: Remove elements equal to value moving from head to tail.
num < 0: Remove elements equal to value moving from tail to head.
num = 0: Remove all elements equal to value.
"""
return self.execute_command('LREM', name, num, value)
def zadd(self, name, *args, **kwargs):
"""
NOTE: The order of arguments differs from that of the official ZADD
command. For backwards compatability, this method accepts arguments
in the form of name1, score1, name2, score2, while the official Redis
documents expects score1, name1, score2, name2.
If you're looking to use the standard syntax, consider using the
StrictRedis class. See the API Reference section of the docs for more
information.
Set any number of element-name, score pairs to the key ``name``. Pairs
can be specified in two ways:
As *args, in the form of: name1, score1, name2, score2, ...
or as **kwargs, in the form of: name1=score1, name2=score2, ...
The following example would add four values to the 'my-key' key:
redis.zadd('my-key', 'name1', 1.1, 'name2', 2.2, name3=3.3, name4=4.4)
"""
pieces = []
if args:
if len(args) % 2 != 0:
raise RedisError("ZADD requires an equal number of "
"values and scores")
pieces.extend(reversed(args))
for pair in iteritems(kwargs):
pieces.append(pair[1])
pieces.append(pair[0])
return self.execute_command('ZADD', name, *pieces)
class PubSub(object):
"""
PubSub provides publish, subscribe and listen support to Redis channels.
After subscribing to one or more channels, the listen() method will block
until a message arrives on one of the subscribed channels. That message
will be returned and it's safe to start listening again.
"""
PUBLISH_MESSAGE_TYPES = ('message', 'pmessage')
UNSUBSCRIBE_MESSAGE_TYPES = ('unsubscribe', 'punsubscribe')
def __init__(self, connection_pool, shard_hint=None,
ignore_subscribe_messages=False):
self.connection_pool = connection_pool
self.shard_hint = shard_hint
self.ignore_subscribe_messages = ignore_subscribe_messages
self.connection = None
# we need to know the encoding options for this connection in order
# to lookup channel and pattern names for callback handlers.
conn = connection_pool.get_connection('pubsub', shard_hint)
try:
self.encoding = conn.encoding
self.encoding_errors = conn.encoding_errors
self.decode_responses = conn.decode_responses
finally:
connection_pool.release(conn)
self.reset()
def __del__(self):
try:
# if this object went out of scope prior to shutting down
# subscriptions, close the connection manually before
# returning it to the connection pool
self.reset()
except Exception:
pass
def reset(self):
if self.connection:
self.connection.disconnect()
self.connection.clear_connect_callbacks()
self.connection_pool.release(self.connection)
self.connection = None
self.channels = {}
self.patterns = {}
def close(self):
self.reset()
def on_connect(self, connection):
"Re-subscribe to any channels and patterns previously subscribed to"
# NOTE: for python3, we can't pass bytestrings as keyword arguments
# so we need to decode channel/pattern names back to unicode strings
# before passing them to [p]subscribe.
if self.channels:
channels = {}
for k, v in iteritems(self.channels):
if not self.decode_responses:
k = k.decode(self.encoding, self.encoding_errors)
channels[k] = v
self.subscribe(**channels)
if self.patterns:
patterns = {}
for k, v in iteritems(self.patterns):
if not self.decode_responses:
k = k.decode(self.encoding, self.encoding_errors)
patterns[k] = v
self.psubscribe(**patterns)
def encode(self, value):
"""
Encode the value so that it's identical to what we'll
read off the connection
"""
if self.decode_responses and isinstance(value, bytes):
value = value.decode(self.encoding, self.encoding_errors)
elif not self.decode_responses and isinstance(value, unicode):
value = value.encode(self.encoding, self.encoding_errors)
return value
@property
def subscribed(self):
"Indicates if there are subscriptions to any channels or patterns"
return bool(self.channels or self.patterns)
def execute_command(self, *args, **kwargs):
"Execute a publish/subscribe command"
# NOTE: don't parse the response in this function. it could pull a
# legitmate message off the stack if the connection is already
# subscribed to one or more channels
if self.connection is None:
self.connection = self.connection_pool.get_connection(
'pubsub',
self.shard_hint
)
# register a callback that re-subscribes to any channels we
# were listening to when we were disconnected
self.connection.register_connect_callback(self.on_connect)
connection = self.connection
self._execute(connection, connection.send_command, *args)
def _execute(self, connection, command, *args):
try:
return command(*args)
except (ConnectionError, TimeoutError) as e:
connection.disconnect()
if not connection.retry_on_timeout and isinstance(e, TimeoutError):
raise
# Connect manually here. If the Redis server is down, this will
# fail and raise a ConnectionError as desired.
connection.connect()
# the ``on_connect`` callback should haven been called by the
# connection to resubscribe us to any channels and patterns we were
# previously listening to
return command(*args)
def parse_response(self, block=True, timeout=0):
"Parse the response from a publish/subscribe command"
connection = self.connection
if not block and not connection.can_read(timeout=timeout):
return None
return self._execute(connection, connection.read_response)
def psubscribe(self, *args, **kwargs):
"""
Subscribe to channel patterns. Patterns supplied as keyword arguments
expect a pattern name as the key and a callable as the value. A
pattern's callable will be invoked automatically when a message is
received on that pattern rather than producing a message via
``listen()``.
"""
if args:
args = list_or_args(args[0], args[1:])
new_patterns = {}
new_patterns.update(dict.fromkeys(imap(self.encode, args)))
for pattern, handler in iteritems(kwargs):
new_patterns[self.encode(pattern)] = handler
ret_val = self.execute_command('PSUBSCRIBE', *iterkeys(new_patterns))
# update the patterns dict AFTER we send the command. we don't want to
# subscribe twice to these patterns, once for the command and again
# for the reconnection.
self.patterns.update(new_patterns)
return ret_val
def punsubscribe(self, *args):
"""
Unsubscribe from the supplied patterns. If empy, unsubscribe from
all patterns.
"""
if args:
args = list_or_args(args[0], args[1:])
return self.execute_command('PUNSUBSCRIBE', *args)
def subscribe(self, *args, **kwargs):
"""
Subscribe to channels. Channels supplied as keyword arguments expect
a channel name as the key and a callable as the value. A channel's
callable will be invoked automatically when a message is received on
that channel rather than producing a message via ``listen()`` or
``get_message()``.
"""
if args:
args = list_or_args(args[0], args[1:])
new_channels = {}
new_channels.update(dict.fromkeys(imap(self.encode, args)))
for channel, handler in iteritems(kwargs):
new_channels[self.encode(channel)] = handler
ret_val = self.execute_command('SUBSCRIBE', *iterkeys(new_channels))
# update the channels dict AFTER we send the command. we don't want to
# subscribe twice to these channels, once for the command and again
# for the reconnection.
self.channels.update(new_channels)
return ret_val
def unsubscribe(self, *args):
"""
Unsubscribe from the supplied channels. If empty, unsubscribe from
all channels
"""
if args:
args = list_or_args(args[0], args[1:])
return self.execute_command('UNSUBSCRIBE', *args)
def listen(self):
"Listen for messages on channels this client has been subscribed to"
while self.subscribed:
response = self.handle_message(self.parse_response(block=True))
if response is not None:
yield response
def get_message(self, ignore_subscribe_messages=False, timeout=0):
"""
Get the next message if one is available, otherwise None.
If timeout is specified, the system will wait for `timeout` seconds
before returning. Timeout should be specified as a floating point
number.
"""
response = self.parse_response(block=False, timeout=timeout)
if response:
return self.handle_message(response, ignore_subscribe_messages)
return None
def handle_message(self, response, ignore_subscribe_messages=False):
"""
Parses a pub/sub message. If the channel or pattern was subscribed to
with a message handler, the handler is invoked instead of a parsed
message being returned.
"""
message_type = nativestr(response[0])
if message_type == 'pmessage':
message = {
'type': message_type,
'pattern': response[1],
'channel': response[2],
'data': response[3]
}
else:
message = {
'type': message_type,
'pattern': None,
'channel': response[1],
'data': response[2]
}
# if this is an unsubscribe message, remove it from memory
if message_type in self.UNSUBSCRIBE_MESSAGE_TYPES:
subscribed_dict = None
if message_type == 'punsubscribe':
subscribed_dict = self.patterns
else:
subscribed_dict = self.channels
try:
del subscribed_dict[message['channel']]
except KeyError:
pass
if message_type in self.PUBLISH_MESSAGE_TYPES:
# if there's a message handler, invoke it
handler = None
if message_type == 'pmessage':
handler = self.patterns.get(message['pattern'], None)
else:
handler = self.channels.get(message['channel'], None)
if handler:
handler(message)
return None
else:
# this is a subscribe/unsubscribe message. ignore if we don't
# want them
if ignore_subscribe_messages or self.ignore_subscribe_messages:
return None
return message
def run_in_thread(self, sleep_time=0):
for channel, handler in iteritems(self.channels):
if handler is None:
raise PubSubError("Channel: '%s' has no handler registered")
for pattern, handler in iteritems(self.patterns):
if handler is None:
raise PubSubError("Pattern: '%s' has no handler registered")
thread = PubSubWorkerThread(self, sleep_time)
thread.start()
return thread
class PubSubWorkerThread(threading.Thread):
def __init__(self, pubsub, sleep_time):
super(PubSubWorkerThread, self).__init__()
self.pubsub = pubsub
self.sleep_time = sleep_time
self._running = False
def run(self):
if self._running:
return
self._running = True
pubsub = self.pubsub
sleep_time = self.sleep_time
while pubsub.subscribed:
pubsub.get_message(ignore_subscribe_messages=True,
timeout=sleep_time)
pubsub.close()
self._running = False
def stop(self):
# stopping simply unsubscribes from all channels and patterns.
# the unsubscribe responses that are generated will short circuit
# the loop in run(), calling pubsub.close() to clean up the connection
self.pubsub.unsubscribe()
self.pubsub.punsubscribe()
class BasePipeline(object):
"""
Pipelines provide a way to transmit multiple commands to the Redis server
in one transmission. This is convenient for batch processing, such as
saving all the values in a list to Redis.
All commands executed within a pipeline are wrapped with MULTI and EXEC
calls. This guarantees all commands executed in the pipeline will be
executed atomically.
Any command raising an exception does *not* halt the execution of
subsequent commands in the pipeline. Instead, the exception is caught
and its instance is placed into the response list returned by execute().
Code iterating over the response list should be able to deal with an
instance of an exception as a potential value. In general, these will be
ResponseError exceptions, such as those raised when issuing a command
on a key of a different datatype.
"""
UNWATCH_COMMANDS = set(('DISCARD', 'EXEC', 'UNWATCH'))
def __init__(self, connection_pool, response_callbacks, transaction,
shard_hint):
self.connection_pool = connection_pool
self.connection = None
self.response_callbacks = response_callbacks
self.transaction = transaction
self.shard_hint = shard_hint
self.watching = False
self.reset()
def __enter__(self):
return self
def __exit__(self, exc_type, exc_value, traceback):
self.reset()
def __del__(self):
try:
self.reset()
except Exception:
pass
def __len__(self):
return len(self.command_stack)
def reset(self):
self.command_stack = []
self.scripts = set()
# make sure to reset the connection state in the event that we were
# watching something
if self.watching and self.connection:
try:
# call this manually since our unwatch or
# immediate_execute_command methods can call reset()
self.connection.send_command('UNWATCH')
self.connection.read_response()
except ConnectionError:
# disconnect will also remove any previous WATCHes
self.connection.disconnect()
# clean up the other instance attributes
self.watching = False
self.explicit_transaction = False
# we can safely return the connection to the pool here since we're
# sure we're no longer WATCHing anything
if self.connection:
self.connection_pool.release(self.connection)
self.connection = None
def multi(self):
"""
Start a transactional block of the pipeline after WATCH commands
are issued. End the transactional block with `execute`.
"""
if self.explicit_transaction:
raise RedisError('Cannot issue nested calls to MULTI')
if self.command_stack:
raise RedisError('Commands without an initial WATCH have already '
'been issued')
self.explicit_transaction = True
def execute_command(self, *args, **kwargs):
if (self.watching or args[0] == 'WATCH') and \
not self.explicit_transaction:
return self.immediate_execute_command(*args, **kwargs)
return self.pipeline_execute_command(*args, **kwargs)
def immediate_execute_command(self, *args, **options):
"""
Execute a command immediately, but don't auto-retry on a
ConnectionError if we're already WATCHing a variable. Used when
issuing WATCH or subsequent commands retrieving their values but before
MULTI is called.
"""
command_name = args[0]
conn = self.connection
# if this is the first call, we need a connection
if not conn:
conn = self.connection_pool.get_connection(command_name,
self.shard_hint)
self.connection = conn
try:
conn.send_command(*args)
return self.parse_response(conn, command_name, **options)
except (ConnectionError, TimeoutError) as e:
conn.disconnect()
if not conn.retry_on_timeout and isinstance(e, TimeoutError):
raise
# if we're not already watching, we can safely retry the command
try:
if not self.watching:
conn.send_command(*args)
return self.parse_response(conn, command_name, **options)
except ConnectionError:
# the retry failed so cleanup.
conn.disconnect()
self.reset()
raise
def pipeline_execute_command(self, *args, **options):
"""
Stage a command to be executed when execute() is next called
Returns the current Pipeline object back so commands can be
chained together, such as:
pipe = pipe.set('foo', 'bar').incr('baz').decr('bang')
At some other point, you can then run: pipe.execute(),
which will execute all commands queued in the pipe.
"""
self.command_stack.append((args, options))
return self
def _execute_transaction(self, connection, commands, raise_on_error):
cmds = chain([(('MULTI', ), {})], commands, [(('EXEC', ), {})])
all_cmds = connection.pack_commands([args for args, _ in cmds])
connection.send_packed_command(all_cmds)
errors = []
# parse off the response for MULTI
# NOTE: we need to handle ResponseErrors here and continue
# so that we read all the additional command messages from
# the socket
try:
self.parse_response(connection, '_')
except ResponseError:
errors.append((0, sys.exc_info()[1]))
# and all the other commands
for i, command in enumerate(commands):
try:
self.parse_response(connection, '_')
except ResponseError:
ex = sys.exc_info()[1]
self.annotate_exception(ex, i + 1, command[0])
errors.append((i, ex))
# parse the EXEC.
try:
response = self.parse_response(connection, '_')
except ExecAbortError:
if self.explicit_transaction:
self.immediate_execute_command('DISCARD')
if errors:
raise errors[0][1]
raise sys.exc_info()[1]
if response is None:
raise WatchError("Watched variable changed.")
# put any parse errors into the response
for i, e in errors:
response.insert(i, e)
if len(response) != len(commands):
self.connection.disconnect()
raise ResponseError("Wrong number of response items from "
"pipeline execution")
# find any errors in the response and raise if necessary
if raise_on_error:
self.raise_first_error(commands, response)
# We have to run response callbacks manually
data = []
for r, cmd in izip(response, commands):
if not isinstance(r, Exception):
args, options = cmd
command_name = args[0]
if command_name in self.response_callbacks:
r = self.response_callbacks[command_name](r, **options)
data.append(r)
return data
def _execute_pipeline(self, connection, commands, raise_on_error):
# build up all commands into a single request to increase network perf
all_cmds = connection.pack_commands([args for args, _ in commands])
connection.send_packed_command(all_cmds)
response = []
for args, options in commands:
try:
response.append(
self.parse_response(connection, args[0], **options))
except ResponseError:
response.append(sys.exc_info()[1])
if raise_on_error:
self.raise_first_error(commands, response)
return response
def raise_first_error(self, commands, response):
for i, r in enumerate(response):
if isinstance(r, ResponseError):
self.annotate_exception(r, i + 1, commands[i][0])
raise r
def annotate_exception(self, exception, number, command):
cmd = safe_unicode(' ').join(imap(safe_unicode, command))
msg = unicode('Command # %d (%s) of pipeline caused error: %s') % (
number, cmd, safe_unicode(exception.args[0]))
exception.args = (msg,) + exception.args[1:]
def parse_response(self, connection, command_name, **options):
result = StrictRedis.parse_response(
self, connection, command_name, **options)
if command_name in self.UNWATCH_COMMANDS:
self.watching = False
elif command_name == 'WATCH':
self.watching = True
return result
def load_scripts(self):
# make sure all scripts that are about to be run on this pipeline exist
scripts = list(self.scripts)
immediate = self.immediate_execute_command
shas = [s.sha for s in scripts]
# we can't use the normal script_* methods because they would just
# get buffered in the pipeline.
exists = immediate('SCRIPT', 'EXISTS', *shas, **{'parse': 'EXISTS'})
if not all(exists):
for s, exist in izip(scripts, exists):
if not exist:
s.sha = immediate('SCRIPT', 'LOAD', s.script,
**{'parse': 'LOAD'})
def execute(self, raise_on_error=True):
"Execute all the commands in the current pipeline"
stack = self.command_stack
if not stack:
return []
if self.scripts:
self.load_scripts()
if self.transaction or self.explicit_transaction:
execute = self._execute_transaction
else:
execute = self._execute_pipeline
conn = self.connection
if not conn:
conn = self.connection_pool.get_connection('MULTI',
self.shard_hint)
# assign to self.connection so reset() releases the connection
# back to the pool after we're done
self.connection = conn
try:
return execute(conn, stack, raise_on_error)
except (ConnectionError, TimeoutError) as e:
conn.disconnect()
if not conn.retry_on_timeout and isinstance(e, TimeoutError):
raise
# if we were watching a variable, the watch is no longer valid
# since this connection has died. raise a WatchError, which
# indicates the user should retry his transaction. If this is more
# than a temporary failure, the WATCH that the user next issues
# will fail, propegating the real ConnectionError
if self.watching:
raise WatchError("A ConnectionError occured on while watching "
"one or more keys")
# otherwise, it's safe to retry since the transaction isn't
# predicated on any state
return execute(conn, stack, raise_on_error)
finally:
self.reset()
def watch(self, *names):
"Watches the values at keys ``names``"
if self.explicit_transaction:
raise RedisError('Cannot issue a WATCH after a MULTI')
return self.execute_command('WATCH', *names)
def unwatch(self):
"Unwatches all previously specified keys"
return self.watching and self.execute_command('UNWATCH') or True
def script_load_for_pipeline(self, script):
"Make sure scripts are loaded prior to pipeline execution"
# we need the sha now so that Script.__call__ can use it to run
# evalsha.
if not script.sha:
script.sha = self.immediate_execute_command('SCRIPT', 'LOAD',
script.script,
**{'parse': 'LOAD'})
self.scripts.add(script)
class StrictPipeline(BasePipeline, StrictRedis):
"Pipeline for the StrictRedis class"
pass
class Pipeline(BasePipeline, Redis):
"Pipeline for the Redis class"
pass
class Script(object):
"An executable Lua script object returned by ``register_script``"
def __init__(self, registered_client, script):
self.registered_client = registered_client
self.script = script
self.sha = ''
def __call__(self, keys=[], args=[], client=None):
"Execute the script, passing any required ``args``"
if client is None:
client = self.registered_client
args = tuple(keys) + tuple(args)
# make sure the Redis server knows about the script
if isinstance(client, BasePipeline):
# make sure this script is good to go on pipeline
client.script_load_for_pipeline(self)
try:
return client.evalsha(self.sha, len(keys), *args)
except NoScriptError:
# Maybe the client is pointed to a differnet server than the client
# that created this instance?
self.sha = client.script_load(self.script)
return client.evalsha(self.sha, len(keys), *args)
| mit |
slagle/ansible-modules-extras | packaging/os/yum_repository.py | 8 | 24683 | #!/usr/bin/python
# encoding: utf-8
# (c) 2015-2016, Jiri Tyr <[email protected]>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
import ConfigParser
import os
DOCUMENTATION = '''
---
module: yum_repository
author: Jiri Tyr (@jtyr)
version_added: '2.1'
short_description: Add and remove YUM repositories
description:
- Add or remove YUM repositories in RPM-based Linux distributions.
options:
async:
required: false
choices: ['yes', 'no']
default: 'yes'
description:
- If set to C(yes) Yum will download packages and metadata from this
repo in parallel, if possible.
bandwidth:
required: false
default: 0
description:
- Maximum available network bandwidth in bytes/second. Used with the
I(throttle) option.
- If I(throttle) is a percentage and bandwidth is C(0) then bandwidth
throttling will be disabled. If I(throttle) is expressed as a data rate
(bytes/sec) then this option is ignored. Default is C(0) (no bandwidth
throttling).
baseurl:
required: false
default: null
description:
- URL to the directory where the yum repository's 'repodata' directory
lives.
- This or the I(mirrorlist) parameter is required if I(state) is set to
C(present).
cost:
required: false
default: 1000
description:
- Relative cost of accessing this repository. Useful for weighing one
repo's packages as greater/less than any other.
deltarpm_metadata_percentage:
required: false
default: 100
description:
- When the relative size of deltarpm metadata vs pkgs is larger than
this, deltarpm metadata is not downloaded from the repo. Note that you
can give values over C(100), so C(200) means that the metadata is
required to be half the size of the packages. Use C(0) to turn off
this check, and always download metadata.
deltarpm_percentage:
required: false
default: 75
description:
- When the relative size of delta vs pkg is larger than this, delta is
not used. Use C(0) to turn off delta rpm processing. Local repositories
(with file:// I(baseurl)) have delta rpms turned off by default.
description:
required: false
default: null
description:
- A human readable string describing the repository.
- This parameter is only required if I(state) is set to C(present).
enabled:
required: false
choices: ['yes', 'no']
default: 'yes'
description:
- This tells yum whether or not use this repository.
enablegroups:
required: false
choices: ['yes', 'no']
default: 'yes'
description:
- Determines whether yum will allow the use of package groups for this
repository.
exclude:
required: false
default: null
description:
- List of packages to exclude from updates or installs. This should be a
space separated list. Shell globs using wildcards (eg. C(*) and C(?))
are allowed.
- The list can also be a regular YAML array.
failovermethod:
required: false
choices: [roundrobin, priority]
default: roundrobin
description:
- C(roundrobin) randomly selects a URL out of the list of URLs to start
with and proceeds through each of them as it encounters a failure
contacting the host.
- C(priority) starts from the first I(baseurl) listed and reads through
them sequentially.
file:
required: false
default: null
description:
- File to use to save the repo in. Defaults to the value of I(name).
gpgcakey:
required: false
default: null
description:
- A URL pointing to the ASCII-armored CA key file for the repository.
gpgcheck:
required: false
choices: ['yes', 'no']
default: 'no'
description:
- Tells yum whether or not it should perform a GPG signature check on
packages.
gpgkey:
required: false
default: null
description:
- A URL pointing to the ASCII-armored GPG key file for the repository.
http_caching:
required: false
choices: [all, packages, none]
default: all
description:
- Determines how upstream HTTP caches are instructed to handle any HTTP
downloads that Yum does.
- C(all) means that all HTTP downloads should be cached.
- C(packages) means that only RPM package downloads should be cached (but
not repository metadata downloads).
- C(none) means that no HTTP downloads should be cached.
include:
required: false
default: null
description:
- Include external configuration file. Both, local path and URL is
supported. Configuration file will be inserted at the position of the
I(include=) line. Included files may contain further include lines.
Yum will abort with an error if an inclusion loop is detected.
includepkgs:
required: false
default: null
description:
- List of packages you want to only use from a repository. This should be
a space separated list. Shell globs using wildcards (eg. C(*) and C(?))
are allowed. Substitution variables (e.g. C($releasever)) are honored
here.
- The list can also be a regular YAML array.
ip_resolve:
required: false
choices: [4, 6, IPv4, IPv6, whatever]
default: whatever
description:
- Determines how yum resolves host names.
- C(4) or C(IPv4) - resolve to IPv4 addresses only.
- C(6) or C(IPv6) - resolve to IPv6 addresses only.
keepalive:
required: false
choices: ['yes', 'no']
default: 'no'
description:
- This tells yum whether or not HTTP/1.1 keepalive should be used with
this repository. This can improve transfer speeds by using one
connection when downloading multiple files from a repository.
keepcache:
required: false
choices: ['0', '1']
default: '1'
description:
- Either C(1) or C(0). Determines whether or not yum keeps the cache of
headers and packages after successful installation.
metadata_expire:
required: false
default: 21600
description:
- Time (in seconds) after which the metadata will expire.
- Default value is 6 hours.
metadata_expire_filter:
required: false
choices: [never, 'read-only:past', 'read-only:present', 'read-only:future']
default: 'read-only:present'
description:
- Filter the I(metadata_expire) time, allowing a trade of speed for
accuracy if a command doesn't require it. Each yum command can specify
that it requires a certain level of timeliness quality from the remote
repos. from "I'm about to install/upgrade, so this better be current"
to "Anything that's available is good enough".
- C(never) - Nothing is filtered, always obey I(metadata_expire).
- C(read-only:past) - Commands that only care about past information are
filtered from metadata expiring. Eg. I(yum history) info (if history
needs to lookup anything about a previous transaction, then by
definition the remote package was available in the past).
- C(read-only:present) - Commands that are balanced between past and
future. Eg. I(yum list yum).
- C(read-only:future) - Commands that are likely to result in running
other commands which will require the latest metadata. Eg.
I(yum check-update).
- Note that this option does not override "yum clean expire-cache".
metalink:
required: false
default: null
description:
- Specifies a URL to a metalink file for the repomd.xml, a list of
mirrors for the entire repository are generated by converting the
mirrors for the repomd.xml file to a I(baseurl).
mirrorlist:
required: false
default: null
description:
- Specifies a URL to a file containing a list of baseurls.
- This or the I(baseurl) parameter is required if I(state) is set to
C(present).
mirrorlist_expire:
required: false
default: 21600
description:
- Time (in seconds) after which the mirrorlist locally cached will
expire.
- Default value is 6 hours.
name:
required: true
description:
- Unique repository ID.
- This parameter is only required if I(state) is set to C(present) or
C(absent).
params:
required: false
default: null
description:
- Option used to allow the user to overwrite any of the other options.
To remove an option, set the value of the option to C(null).
password:
required: false
default: null
description:
- Password to use with the username for basic authentication.
priority:
required: false
default: 99
description:
- Enforce ordered protection of repositories. The value is an integer
from 1 to 99.
- This option only works if the YUM Priorities plugin is installed.
protect:
required: false
choices: ['yes', 'no']
default: 'no'
description:
- Protect packages from updates from other repositories.
proxy:
required: false
default: null
description:
- URL to the proxy server that yum should use.
proxy_password:
required: false
default: null
description:
- Username to use for proxy.
proxy_username:
required: false
default: null
description:
- Password for this proxy.
repo_gpgcheck:
required: false
choices: ['yes', 'no']
default: 'no'
description:
- This tells yum whether or not it should perform a GPG signature check
on the repodata from this repository.
reposdir:
required: false
default: /etc/yum.repos.d
description:
- Directory where the C(.repo) files will be stored.
retries:
required: false
default: 10
description:
- Set the number of times any attempt to retrieve a file should retry
before returning an error. Setting this to C(0) makes yum try forever.
s3_enabled:
required: false
choices: ['yes', 'no']
default: 'no'
description:
- Enables support for S3 repositories.
- This option only works if the YUM S3 plugin is installed.
skip_if_unavailable:
required: false
choices: ['yes', 'no']
default: 'no'
description:
- If set to C(yes) yum will continue running if this repository cannot be
contacted for any reason. This should be set carefully as all repos are
consulted for any given command.
ssl_check_cert_permissions:
required: false
choices: ['yes', 'no']
default: 'no'
description:
- Whether yum should check the permissions on the paths for the
certificates on the repository (both remote and local).
- If we can't read any of the files then yum will force
I(skip_if_unavailable) to be C(yes). This is most useful for non-root
processes which use yum on repos that have client cert files which are
readable only by root.
sslcacert:
required: false
default: null
description:
- Path to the directory containing the databases of the certificate
authorities yum should use to verify SSL certificates.
sslclientcert:
required: false
default: null
description:
- Path to the SSL client certificate yum should use to connect to
repos/remote sites.
sslclientkey:
required: false
default: null
description:
- Path to the SSL client key yum should use to connect to repos/remote
sites.
sslverify:
required: false
choices: ['yes', 'no']
default: 'yes'
description:
- Defines whether yum should verify SSL certificates/hosts at all.
state:
required: false
choices: [absent, present]
default: present
description:
- State of the repo file.
throttle:
required: false
default: null
description:
- Enable bandwidth throttling for downloads.
- This option can be expressed as a absolute data rate in bytes/sec. An
SI prefix (k, M or G) may be appended to the bandwidth value.
timeout:
required: false
default: 30
description:
- Number of seconds to wait for a connection before timing out.
ui_repoid_vars:
required: false
default: releasever basearch
description:
- When a repository id is displayed, append these yum variables to the
string if they are used in the I(baseurl)/etc. Variables are appended
in the order listed (and found).
username:
required: false
default: null
description:
- Username to use for basic authentication to a repo or really any url.
extends_documentation_fragment:
- files
notes:
- All comments will be removed if modifying an existing repo file.
- Section order is preserved in an existing repo file.
- Parameters in a section are ordered alphabetically in an existing repo
file.
- The repo file will be automatically deleted if it contains no repository.
'''
EXAMPLES = '''
- name: Add repository
yum_repository:
name: epel
description: EPEL YUM repo
baseurl: http://download.fedoraproject.org/pub/epel/$releasever/$basearch/
- name: Add multiple repositories into the same file (1/2)
yum_repository:
name: epel
description: EPEL YUM repo
file: external_repos
baseurl: http://download.fedoraproject.org/pub/epel/$releasever/$basearch/
gpgcheck: no
- name: Add multiple repositories into the same file (2/2)
yum_repository:
name: rpmforge
description: RPMforge YUM repo
file: external_repos
baseurl: http://apt.sw.be/redhat/el7/en/$basearch/rpmforge
mirrorlist: http://mirrorlist.repoforge.org/el7/mirrors-rpmforge
enabled: no
- name: Remove repository
yum_repository:
name: epel
state: absent
- name: Remove repository from a specific repo file
yum_repository:
name: epel
file: external_repos
state: absent
#
# Allow to overwrite the yum_repository parameters by defining the parameters
# as a variable in the defaults or vars file:
#
# my_role_somerepo_params:
# # Disable GPG checking
# gpgcheck: no
# # Remove the gpgkey option
# gpgkey: null
#
- name: Add Some repo
yum_repository:
name: somerepo
description: Some YUM repo
baseurl: http://server.com/path/to/the/repo
gpgkey: http://server.com/keys/somerepo.pub
gpgcheck: yes
params: "{{ my_role_somerepo_params }}"
'''
RETURN = '''
repo:
description: repository name
returned: success
type: string
sample: "epel"
state:
description: state of the target, after execution
returned: success
type: string
sample: "present"
'''
class YumRepo(object):
# Class global variables
module = None
params = None
section = None
repofile = ConfigParser.RawConfigParser()
# List of parameters which will be allowed in the repo file output
allowed_params = [
'async',
'bandwidth',
'baseurl',
'cost',
'deltarpm_metadata_percentage',
'deltarpm_percentage',
'enabled',
'enablegroups',
'exclude',
'failovermethod',
'gpgcakey',
'gpgcheck',
'gpgkey',
'http_caching',
'include',
'includepkgs',
'ip_resolve',
'keepalive',
'keepcache',
'metadata_expire',
'metadata_expire_filter',
'metalink',
'mirrorlist',
'mirrorlist_expire',
'name',
'password',
'priority',
'protect',
'proxy',
'proxy_password',
'proxy_username',
'repo_gpgcheck',
'retries',
's3_enabled',
'skip_if_unavailable',
'sslcacert',
'ssl_check_cert_permissions',
'sslclientcert',
'sslclientkey',
'sslverify',
'throttle',
'timeout',
'ui_repoid_vars',
'username']
# List of parameters which can be a list
list_params = ['exclude', 'includepkgs']
def __init__(self, module):
# To be able to use fail_json
self.module = module
# Shortcut for the params
self.params = self.module.params
# Section is always the repoid
self.section = self.params['repoid']
# Check if repo directory exists
repos_dir = self.params['reposdir']
if not os.path.isdir(repos_dir):
self.module.fail_json(
msg="Repo directory '%s' does not exist." % repos_dir)
# Set dest; also used to set dest parameter for the FS attributes
self.params['dest'] = os.path.join(
repos_dir, "%s.repo" % self.params['file'])
# Read the repo file if it exists
if os.path.isfile(self.params['dest']):
self.repofile.read(self.params['dest'])
def add(self):
# Remove already existing repo and create a new one
if self.repofile.has_section(self.section):
self.repofile.remove_section(self.section)
# Add section
self.repofile.add_section(self.section)
# Baseurl/mirrorlist is not required because for removal we need only
# the repo name. This is why we check if the baseurl/mirrorlist is
# defined.
if (self.params['baseurl'], self.params['mirrorlist']) == (None, None):
self.module.fail_json(
msg='Paramater "baseurl" or "mirrorlist" is required for '
'adding a new repo.')
# Set options
for key, value in sorted(self.params.items()):
if key in self.list_params and isinstance(value, list):
# Join items into one string for specific parameters
value = ' '.join(value)
elif isinstance(value, bool):
# Convert boolean value to integer
value = int(value)
# Set the value only if it was defined (default is None)
if value is not None and key in self.allowed_params:
self.repofile.set(self.section, key, value)
def save(self):
if len(self.repofile.sections()):
# Write data into the file
try:
fd = open(self.params['dest'], 'wb')
except IOError, e:
self.module.fail_json(
msg="Cannot open repo file %s." % self.params['dest'],
details=str(e))
self.repofile.write(fd)
try:
fd.close()
except IOError, e:
self.module.fail_json(
msg="Cannot write repo file %s." % self.params['dest'],
details=str(e))
else:
# Remove the file if there are not repos
try:
os.remove(self.params['dest'])
except OSError, e:
self.module.fail_json(
msg=(
"Cannot remove empty repo file %s." %
self.params['dest']),
details=str(e))
def remove(self):
# Remove section if exists
if self.repofile.has_section(self.section):
self.repofile.remove_section(self.section)
def dump(self):
repo_string = ""
# Compose the repo file
for section in sorted(self.repofile.sections()):
repo_string += "[%s]\n" % section
for key, value in sorted(self.repofile.items(section)):
repo_string += "%s = %s\n" % (key, value)
repo_string += "\n"
return repo_string
def main():
# Module settings
module = AnsibleModule(
argument_spec=dict(
async=dict(type='bool'),
bandwidth=dict(),
baseurl=dict(),
cost=dict(),
deltarpm_metadata_percentage=dict(),
deltarpm_percentage=dict(),
description=dict(),
enabled=dict(type='bool'),
enablegroups=dict(type='bool'),
exclude=dict(),
failovermethod=dict(choices=['roundrobin', 'priority']),
file=dict(),
gpgcakey=dict(),
gpgcheck=dict(type='bool'),
gpgkey=dict(),
http_caching=dict(choices=['all', 'packages', 'none']),
include=dict(),
includepkgs=dict(),
ip_resolve=dict(choices=['4', '6', 'IPv4', 'IPv6', 'whatever']),
keepalive=dict(type='bool'),
keepcache=dict(choices=['0', '1']),
metadata_expire=dict(),
metadata_expire_filter=dict(
choices=[
'never',
'read-only:past',
'read-only:present',
'read-only:future']),
metalink=dict(),
mirrorlist=dict(),
mirrorlist_expire=dict(),
name=dict(required=True),
params=dict(),
password=dict(no_log=True),
priority=dict(),
protect=dict(type='bool'),
proxy=dict(),
proxy_password=dict(no_log=True),
proxy_username=dict(),
repo_gpgcheck=dict(type='bool'),
reposdir=dict(default='/etc/yum.repos.d'),
retries=dict(),
s3_enabled=dict(type='bool'),
skip_if_unavailable=dict(type='bool'),
sslcacert=dict(),
ssl_check_cert_permissions=dict(type='bool'),
sslclientcert=dict(),
sslclientkey=dict(),
sslverify=dict(type='bool'),
state=dict(choices=['present', 'absent'], default='present'),
throttle=dict(),
timeout=dict(),
ui_repoid_vars=dict(),
username=dict(),
),
add_file_common_args=True,
supports_check_mode=True,
)
# Update module parameters by user's parameters if defined
if 'params' in module.params and isinstance(module.params['params'], dict):
module.params.update(module.params['params'])
# Remove the params
module.params.pop('params', None)
name = module.params['name']
state = module.params['state']
# Check if required parameters are present
if state == 'present':
if (
module.params['baseurl'] is None and
module.params['mirrorlist'] is None):
module.fail_json(
msg="Parameter 'baseurl' or 'mirrorlist' is required.")
if module.params['description'] is None:
module.fail_json(
msg="Parameter 'description' is required.")
# Rename "name" and "description" to ensure correct key sorting
module.params['repoid'] = module.params['name']
module.params['name'] = module.params['description']
del module.params['description']
# Define repo file name if it doesn't exist
if module.params['file'] is None:
module.params['file'] = module.params['repoid']
# Instantiate the YumRepo object
yumrepo = YumRepo(module)
# Get repo status before change
yumrepo_before = yumrepo.dump()
# Perform action depending on the state
if state == 'present':
yumrepo.add()
elif state == 'absent':
yumrepo.remove()
# Get repo status after change
yumrepo_after = yumrepo.dump()
# Compare repo states
changed = yumrepo_before != yumrepo_after
# Save the file only if not in check mode and if there was a change
if not module.check_mode and changed:
yumrepo.save()
# Change file attributes if needed
if os.path.isfile(module.params['dest']):
file_args = module.load_file_common_arguments(module.params)
changed = module.set_fs_attributes_if_different(file_args, changed)
# Print status of the change
module.exit_json(changed=changed, repo=name, state=state)
# Import module snippets
from ansible.module_utils.basic import *
if __name__ == '__main__':
main()
| gpl-3.0 |
wfx/epack | epack/libarchive/ffi.py | 1 | 7623 | # This file is part of a program licensed under the terms of the GNU Lesser
# General Public License version 2 (or at your option any later version)
# as published by the Free Software Foundation: http://www.gnu.org/licenses/
from __future__ import division, print_function, unicode_literals
from ctypes import (
c_char_p, c_int, c_uint, c_longlong, c_size_t, c_void_p,
c_wchar_p, CFUNCTYPE, POINTER,
)
try:
from ctypes import c_ssize_t
except ImportError:
from ctypes import c_longlong as c_ssize_t
import ctypes
from ctypes.util import find_library
import logging
import mmap
import os
from .exception import ArchiveError
logger = logging.getLogger('libarchive')
page_size = mmap.PAGESIZE
libarchive_path = os.environ.get('LIBARCHIVE') or \
find_library('archive') or \
find_library('libarchive') or \
'libarchive.so'
libarchive = ctypes.cdll.LoadLibrary(libarchive_path)
# Constants
ARCHIVE_EOF = 1 # Found end of archive.
ARCHIVE_OK = 0 # Operation was successful.
ARCHIVE_RETRY = -10 # Retry might succeed.
ARCHIVE_WARN = -20 # Partial success.
ARCHIVE_FAILED = -25 # Current operation cannot complete.
ARCHIVE_FATAL = -30 # No more operations are possible.
AE_IFMT = 0o170000
AE_IFREG = 0o100000
AE_IFLNK = 0o120000
AE_IFSOCK = 0o140000
AE_IFCHR = 0o020000
AE_IFBLK = 0o060000
AE_IFDIR = 0o040000
AE_IFIFO = 0o010000
# Callback types
WRITE_CALLBACK = CFUNCTYPE(
c_ssize_t, c_void_p, c_void_p, POINTER(c_void_p), c_size_t
)
OPEN_CALLBACK = CFUNCTYPE(c_int, c_void_p, c_void_p)
CLOSE_CALLBACK = CFUNCTYPE(c_int, c_void_p, c_void_p)
VOID_CB = lambda *_: ARCHIVE_OK
# Type aliases, for readability
c_archive_p = c_void_p
c_archive_entry_p = c_void_p
# Helper functions
def _error_string(archive_p):
msg = error_string(archive_p)
if msg is None:
return
try:
return msg.decode('ascii')
except UnicodeDecodeError:
return msg
def archive_error(archive_p, retcode):
msg = _error_string(archive_p)
raise ArchiveError(msg, errno(archive_p), retcode, archive_p)
def check_null(ret, func, args):
if ret is None:
raise ArchiveError(func.__name__+' returned NULL')
return ret
def check_int(retcode, func, args):
if retcode >= 0:
return retcode
elif retcode == ARCHIVE_WARN:
logger.warning(_error_string(args[0]))
return retcode
else:
raise archive_error(args[0], retcode)
def ffi(name, argtypes, restype, errcheck=None):
f = getattr(libarchive, 'archive_'+name)
f.argtypes = argtypes
f.restype = restype
if errcheck:
f.errcheck = errcheck
globals()[name] = f
return f
# FFI declarations
# archive_util
errno = ffi('errno', [c_archive_p], c_int)
error_string = ffi('error_string', [c_archive_p], c_char_p)
# archive_entry
ffi('entry_new', [], c_archive_entry_p, check_null)
ffi('entry_filetype', [c_archive_entry_p], c_int)
ffi('entry_mtime', [c_archive_entry_p], c_int)
ffi('entry_perm', [c_archive_entry_p], c_int)
ffi('entry_pathname_w', [c_archive_entry_p], c_wchar_p)
ffi('entry_sourcepath', [c_archive_entry_p], c_char_p)
ffi('entry_size', [c_archive_entry_p], c_longlong)
ffi('entry_size_is_set', [c_archive_entry_p], c_int)
ffi('entry_update_pathname_utf8', [c_archive_entry_p, c_char_p], None)
ffi('entry_clear', [c_archive_entry_p], c_archive_entry_p)
ffi('entry_free', [c_archive_entry_p], None)
# archive_read
ffi('read_new', [], c_archive_p, check_null)
READ_FORMATS = set((
'7zip', 'all', 'ar', 'cab', 'cpio', 'empty', 'iso9660', 'lha', 'mtree',
'rar', 'raw', 'tar', 'xar', 'zip'
))
for f_name in list(READ_FORMATS):
try:
ffi('read_support_format_'+f_name, [c_archive_p], c_int, check_int)
except AttributeError: # pragma: no cover
logger.warning('read format "%s" is not supported' % f_name)
READ_FORMATS.remove(f_name)
READ_FILTERS = set((
'all', 'bzip2', 'compress', 'grzip', 'gzip', 'lrzip', 'lzip', 'lzma',
'lzop', 'none', 'rpm', 'uu', 'xz'
))
for f_name in list(READ_FILTERS):
try:
ffi('read_support_filter_'+f_name, [c_archive_p], c_int, check_int)
except AttributeError: # pragma: no cover
logger.warning('read filter "%s" is not supported' % f_name)
READ_FILTERS.remove(f_name)
ffi('read_open_fd', [c_archive_p, c_int, c_size_t], c_int, check_int)
ffi('read_open_filename_w', [c_archive_p, c_wchar_p, c_size_t],
c_int, check_int)
ffi('read_open_memory', [c_archive_p, c_void_p, c_size_t], c_int, check_int)
ffi('read_next_header', [c_archive_p, POINTER(c_void_p)], c_int, check_int)
ffi('read_next_header2', [c_archive_p, c_void_p], c_int, check_int)
ffi('read_close', [c_archive_p], c_int, check_int)
ffi('read_free', [c_archive_p], c_int, check_int)
# archive_read_disk
ffi('read_disk_new', [], c_archive_p, check_null)
ffi('read_disk_set_standard_lookup', [c_archive_p], c_int, check_int)
ffi('read_disk_open', [c_archive_p, c_char_p], c_int, check_int)
ffi('read_disk_open_w', [c_archive_p, c_wchar_p], c_int, check_int)
ffi('read_disk_descend', [c_archive_p], c_int, check_int)
# archive_read_data
ffi('read_data_block',
[c_archive_p, POINTER(c_void_p), POINTER(c_size_t), POINTER(c_longlong)],
c_int, check_int)
ffi('read_data', [c_archive_p, c_void_p, c_size_t], c_ssize_t, check_int)
ffi('read_data_skip', [c_archive_p], c_int, check_int)
# archive_write
ffi('write_new', [], c_archive_p, check_null)
ffi('write_disk_new', [], c_archive_p, check_null)
ffi('write_disk_set_options', [c_archive_p, c_int], c_int, check_int)
WRITE_FORMATS = set((
'7zip', 'ar_bsd', 'ar_svr4', 'cpio', 'cpio_newc', 'gnutar', 'iso9660',
'mtree', 'mtree_classic', 'pax', 'pax_restricted', 'shar', 'shar_dump',
'ustar', 'v7tar', 'xar', 'zip'
))
for f_name in list(WRITE_FORMATS):
try:
ffi('write_set_format_'+f_name, [c_archive_p], c_int, check_int)
except AttributeError: # pragma: no cover
logger.warning('write format "%s" is not supported' % f_name)
WRITE_FORMATS.remove(f_name)
WRITE_FILTERS = set((
'b64encode', 'bzip2', 'compress', 'grzip', 'gzip', 'lrzip', 'lzip', 'lzma',
'lzop', 'uuencode', 'xz'
))
for f_name in list(WRITE_FILTERS):
try:
ffi('write_add_filter_'+f_name, [c_archive_p], c_int, check_int)
except AttributeError: # pragma: no cover
logger.warning('write filter "%s" is not supported' % f_name)
WRITE_FILTERS.remove(f_name)
ffi('write_open',
[c_archive_p, c_void_p, OPEN_CALLBACK, WRITE_CALLBACK, CLOSE_CALLBACK],
c_int, check_int)
ffi('write_open_fd', [c_archive_p, c_int], c_int, check_int)
ffi('write_open_filename', [c_archive_p, c_char_p], c_int, check_int)
ffi('write_open_filename_w', [c_archive_p, c_wchar_p], c_int, check_int)
ffi('write_open_memory',
[c_archive_p, c_void_p, c_size_t, POINTER(c_size_t)],
c_int, check_int)
ffi('write_get_bytes_in_last_block', [c_archive_p], c_int, check_int)
ffi('write_get_bytes_per_block', [c_archive_p], c_int, check_int)
ffi('write_set_bytes_in_last_block', [c_archive_p, c_int], c_int, check_int)
ffi('write_set_bytes_per_block', [c_archive_p, c_int], c_int, check_int)
ffi('write_header', [c_archive_p, c_void_p], c_int, check_int)
ffi('write_data', [c_archive_p, c_void_p, c_size_t], c_ssize_t, check_int)
ffi('write_data_block', [c_archive_p, c_void_p, c_size_t, c_longlong],
c_int, check_int)
ffi('write_finish_entry', [c_archive_p], c_int, check_int)
ffi('write_close', [c_archive_p], c_int, check_int)
ffi('write_free', [c_archive_p], c_int, check_int)
| gpl-3.0 |
vlachoudis/sl4a | python/src/Lib/ast.py | 139 | 11347 | # -*- coding: utf-8 -*-
"""
ast
~~~
The `ast` module helps Python applications to process trees of the Python
abstract syntax grammar. The abstract syntax itself might change with
each Python release; this module helps to find out programmatically what
the current grammar looks like and allows modifications of it.
An abstract syntax tree can be generated by passing `ast.PyCF_ONLY_AST` as
a flag to the `compile()` builtin function or by using the `parse()`
function from this module. The result will be a tree of objects whose
classes all inherit from `ast.AST`.
A modified abstract syntax tree can be compiled into a Python code object
using the built-in `compile()` function.
Additionally various helper functions are provided that make working with
the trees simpler. The main intention of the helper functions and this
module in general is to provide an easy to use interface for libraries
that work tightly with the python syntax (template engines for example).
:copyright: Copyright 2008 by Armin Ronacher.
:license: Python License.
"""
from _ast import *
from _ast import __version__
def parse(expr, filename='<unknown>', mode='exec'):
"""
Parse an expression into an AST node.
Equivalent to compile(expr, filename, mode, PyCF_ONLY_AST).
"""
return compile(expr, filename, mode, PyCF_ONLY_AST)
def literal_eval(node_or_string):
"""
Safely evaluate an expression node or a string containing a Python
expression. The string or node provided may only consist of the following
Python literal structures: strings, numbers, tuples, lists, dicts, booleans,
and None.
"""
_safe_names = {'None': None, 'True': True, 'False': False}
if isinstance(node_or_string, basestring):
node_or_string = parse(node_or_string, mode='eval')
if isinstance(node_or_string, Expression):
node_or_string = node_or_string.body
def _convert(node):
if isinstance(node, Str):
return node.s
elif isinstance(node, Num):
return node.n
elif isinstance(node, Tuple):
return tuple(map(_convert, node.elts))
elif isinstance(node, List):
return list(map(_convert, node.elts))
elif isinstance(node, Dict):
return dict((_convert(k), _convert(v)) for k, v
in zip(node.keys, node.values))
elif isinstance(node, Name):
if node.id in _safe_names:
return _safe_names[node.id]
raise ValueError('malformed string')
return _convert(node_or_string)
def dump(node, annotate_fields=True, include_attributes=False):
"""
Return a formatted dump of the tree in *node*. This is mainly useful for
debugging purposes. The returned string will show the names and the values
for fields. This makes the code impossible to evaluate, so if evaluation is
wanted *annotate_fields* must be set to False. Attributes such as line
numbers and column offsets are not dumped by default. If this is wanted,
*include_attributes* can be set to True.
"""
def _format(node):
if isinstance(node, AST):
fields = [(a, _format(b)) for a, b in iter_fields(node)]
rv = '%s(%s' % (node.__class__.__name__, ', '.join(
('%s=%s' % field for field in fields)
if annotate_fields else
(b for a, b in fields)
))
if include_attributes and node._attributes:
rv += fields and ', ' or ' '
rv += ', '.join('%s=%s' % (a, _format(getattr(node, a)))
for a in node._attributes)
return rv + ')'
elif isinstance(node, list):
return '[%s]' % ', '.join(_format(x) for x in node)
return repr(node)
if not isinstance(node, AST):
raise TypeError('expected AST, got %r' % node.__class__.__name__)
return _format(node)
def copy_location(new_node, old_node):
"""
Copy source location (`lineno` and `col_offset` attributes) from
*old_node* to *new_node* if possible, and return *new_node*.
"""
for attr in 'lineno', 'col_offset':
if attr in old_node._attributes and attr in new_node._attributes \
and hasattr(old_node, attr):
setattr(new_node, attr, getattr(old_node, attr))
return new_node
def fix_missing_locations(node):
"""
When you compile a node tree with compile(), the compiler expects lineno and
col_offset attributes for every node that supports them. This is rather
tedious to fill in for generated nodes, so this helper adds these attributes
recursively where not already set, by setting them to the values of the
parent node. It works recursively starting at *node*.
"""
def _fix(node, lineno, col_offset):
if 'lineno' in node._attributes:
if not hasattr(node, 'lineno'):
node.lineno = lineno
else:
lineno = node.lineno
if 'col_offset' in node._attributes:
if not hasattr(node, 'col_offset'):
node.col_offset = col_offset
else:
col_offset = node.col_offset
for child in iter_child_nodes(node):
_fix(child, lineno, col_offset)
_fix(node, 1, 0)
return node
def increment_lineno(node, n=1):
"""
Increment the line number of each node in the tree starting at *node* by *n*.
This is useful to "move code" to a different location in a file.
"""
if 'lineno' in node._attributes:
node.lineno = getattr(node, 'lineno', 0) + n
for child in walk(node):
if 'lineno' in child._attributes:
child.lineno = getattr(child, 'lineno', 0) + n
return node
def iter_fields(node):
"""
Yield a tuple of ``(fieldname, value)`` for each field in ``node._fields``
that is present on *node*.
"""
for field in node._fields:
try:
yield field, getattr(node, field)
except AttributeError:
pass
def iter_child_nodes(node):
"""
Yield all direct child nodes of *node*, that is, all fields that are nodes
and all items of fields that are lists of nodes.
"""
for name, field in iter_fields(node):
if isinstance(field, AST):
yield field
elif isinstance(field, list):
for item in field:
if isinstance(item, AST):
yield item
def get_docstring(node, clean=True):
"""
Return the docstring for the given node or None if no docstring can
be found. If the node provided does not have docstrings a TypeError
will be raised.
"""
if not isinstance(node, (FunctionDef, ClassDef, Module)):
raise TypeError("%r can't have docstrings" % node.__class__.__name__)
if node.body and isinstance(node.body[0], Expr) and \
isinstance(node.body[0].value, Str):
if clean:
import inspect
return inspect.cleandoc(node.body[0].value.s)
return node.body[0].value.s
def walk(node):
"""
Recursively yield all child nodes of *node*, in no specified order. This is
useful if you only want to modify nodes in place and don't care about the
context.
"""
from collections import deque
todo = deque([node])
while todo:
node = todo.popleft()
todo.extend(iter_child_nodes(node))
yield node
class NodeVisitor(object):
"""
A node visitor base class that walks the abstract syntax tree and calls a
visitor function for every node found. This function may return a value
which is forwarded by the `visit` method.
This class is meant to be subclassed, with the subclass adding visitor
methods.
Per default the visitor functions for the nodes are ``'visit_'`` +
class name of the node. So a `TryFinally` node visit function would
be `visit_TryFinally`. This behavior can be changed by overriding
the `visit` method. If no visitor function exists for a node
(return value `None`) the `generic_visit` visitor is used instead.
Don't use the `NodeVisitor` if you want to apply changes to nodes during
traversing. For this a special visitor exists (`NodeTransformer`) that
allows modifications.
"""
def visit(self, node):
"""Visit a node."""
method = 'visit_' + node.__class__.__name__
visitor = getattr(self, method, self.generic_visit)
return visitor(node)
def generic_visit(self, node):
"""Called if no explicit visitor function exists for a node."""
for field, value in iter_fields(node):
if isinstance(value, list):
for item in value:
if isinstance(item, AST):
self.visit(item)
elif isinstance(value, AST):
self.visit(value)
class NodeTransformer(NodeVisitor):
"""
A :class:`NodeVisitor` subclass that walks the abstract syntax tree and
allows modification of nodes.
The `NodeTransformer` will walk the AST and use the return value of the
visitor methods to replace or remove the old node. If the return value of
the visitor method is ``None``, the node will be removed from its location,
otherwise it is replaced with the return value. The return value may be the
original node in which case no replacement takes place.
Here is an example transformer that rewrites all occurrences of name lookups
(``foo``) to ``data['foo']``::
class RewriteName(NodeTransformer):
def visit_Name(self, node):
return copy_location(Subscript(
value=Name(id='data', ctx=Load()),
slice=Index(value=Str(s=node.id)),
ctx=node.ctx
), node)
Keep in mind that if the node you're operating on has child nodes you must
either transform the child nodes yourself or call the :meth:`generic_visit`
method for the node first.
For nodes that were part of a collection of statements (that applies to all
statement nodes), the visitor may also return a list of nodes rather than
just a single node.
Usually you use the transformer like this::
node = YourTransformer().visit(node)
"""
def generic_visit(self, node):
for field, old_value in iter_fields(node):
old_value = getattr(node, field, None)
if isinstance(old_value, list):
new_values = []
for value in old_value:
if isinstance(value, AST):
value = self.visit(value)
if value is None:
continue
elif not isinstance(value, AST):
new_values.extend(value)
continue
new_values.append(value)
old_value[:] = new_values
elif isinstance(old_value, AST):
new_node = self.visit(old_value)
if new_node is None:
delattr(node, field)
else:
setattr(node, field, new_node)
return node
| apache-2.0 |
gtko/Sick-Beard | lib/hachoir_parser/image/xcf.py | 90 | 10369 | """
Gimp image parser (XCF file, ".xcf" extension).
You can find informations about XCF file in Gimp source code. URL to read
CVS online:
http://cvs.gnome.org/viewcvs/gimp/app/xcf/
\--> files xcf-read.c and xcf-load.c
Author: Victor Stinner
"""
from lib.hachoir_parser import Parser
from lib.hachoir_core.field import (StaticFieldSet, FieldSet, ParserError,
UInt8, UInt32, Enum, Float32, String, PascalString32, RawBytes)
from lib.hachoir_parser.image.common import RGBA
from lib.hachoir_core.endian import NETWORK_ENDIAN
class XcfCompression(FieldSet):
static_size = 8
COMPRESSION_NAME = {
0: u"None",
1: u"RLE",
2: u"Zlib",
3: u"Fractal"
}
def createFields(self):
yield Enum(UInt8(self, "compression", "Compression method"), self.COMPRESSION_NAME)
class XcfResolution(StaticFieldSet):
format = (
(Float32, "xres", "X resolution in DPI"),
(Float32, "yres", "Y resolution in DPI")
)
class XcfTattoo(StaticFieldSet):
format = ((UInt32, "tattoo", "Tattoo"),)
class LayerOffsets(StaticFieldSet):
format = (
(UInt32, "ofst_x", "Offset X"),
(UInt32, "ofst_y", "Offset Y")
)
class LayerMode(FieldSet):
static_size = 32
MODE_NAME = {
0: u"Normal",
1: u"Dissolve",
2: u"Behind",
3: u"Multiply",
4: u"Screen",
5: u"Overlay",
6: u"Difference",
7: u"Addition",
8: u"Subtract",
9: u"Darken only",
10: u"Lighten only",
11: u"Hue",
12: u"Saturation",
13: u"Color",
14: u"Value",
15: u"Divide",
16: u"Dodge",
17: u"Burn",
18: u"Hard light",
19: u"Soft light",
20: u"Grain extract",
21: u"Grain merge",
22: u"Color erase"
}
def createFields(self):
yield Enum(UInt32(self, "mode", "Layer mode"), self.MODE_NAME)
class GimpBoolean(UInt32):
def __init__(self, parent, name):
UInt32.__init__(self, parent, name)
def createValue(self):
return 1 == UInt32.createValue(self)
class XcfUnit(StaticFieldSet):
format = ((UInt32, "unit", "Unit"),)
class XcfParasiteEntry(FieldSet):
def createFields(self):
yield PascalString32(self, "name", "Name", strip="\0", charset="UTF-8")
yield UInt32(self, "flags", "Flags")
yield PascalString32(self, "data", "Data", strip=" \0", charset="UTF-8")
class XcfLevel(FieldSet):
def createFields(self):
yield UInt32(self, "width", "Width in pixel")
yield UInt32(self, "height", "Height in pixel")
yield UInt32(self, "offset", "Offset")
offset = self["offset"].value
if offset == 0:
return
data_offsets = []
while (self.absolute_address + self.current_size)/8 < offset:
chunk = UInt32(self, "data_offset[]", "Data offset")
yield chunk
if chunk.value == 0:
break
data_offsets.append(chunk)
if (self.absolute_address + self.current_size)/8 != offset:
raise ParserError("Problem with level offset.")
previous = offset
for chunk in data_offsets:
data_offset = chunk.value
size = data_offset - previous
yield RawBytes(self, "data[]", size, "Data content of %s" % chunk.name)
previous = data_offset
class XcfHierarchy(FieldSet):
def createFields(self):
yield UInt32(self, "width", "Width")
yield UInt32(self, "height", "Height")
yield UInt32(self, "bpp", "Bits/pixel")
offsets = []
while True:
chunk = UInt32(self, "offset[]", "Level offset")
yield chunk
if chunk.value == 0:
break
offsets.append(chunk.value)
for offset in offsets:
padding = self.seekByte(offset, relative=False)
if padding is not None:
yield padding
yield XcfLevel(self, "level[]", "Level")
# yield XcfChannel(self, "channel[]", "Channel"))
class XcfChannel(FieldSet):
def createFields(self):
yield UInt32(self, "width", "Channel width")
yield UInt32(self, "height", "Channel height")
yield PascalString32(self, "name", "Channel name", strip="\0", charset="UTF-8")
for field in readProperties(self):
yield field
yield UInt32(self, "hierarchy_ofs", "Hierarchy offset")
yield XcfHierarchy(self, "hierarchy", "Hierarchy")
def createDescription(self):
return 'Channel "%s"' % self["name"].value
class XcfLayer(FieldSet):
def createFields(self):
yield UInt32(self, "width", "Layer width in pixels")
yield UInt32(self, "height", "Layer height in pixels")
yield Enum(UInt32(self, "type", "Layer type"), XcfFile.IMAGE_TYPE_NAME)
yield PascalString32(self, "name", "Layer name", strip="\0", charset="UTF-8")
for prop in readProperties(self):
yield prop
# --
# TODO: Hack for Gimp 1.2 files
# --
yield UInt32(self, "hierarchy_ofs", "Hierarchy offset")
yield UInt32(self, "mask_ofs", "Layer mask offset")
padding = self.seekByte(self["hierarchy_ofs"].value, relative=False)
if padding is not None:
yield padding
yield XcfHierarchy(self, "hierarchy", "Hierarchy")
# TODO: Read layer mask if needed: self["mask_ofs"].value != 0
def createDescription(self):
return 'Layer "%s"' % self["name"].value
class XcfParasites(FieldSet):
def createFields(self):
size = self["../size"].value * 8
while self.current_size < size:
yield XcfParasiteEntry(self, "parasite[]", "Parasite")
class XcfProperty(FieldSet):
PROP_COMPRESSION = 17
PROP_RESOLUTION = 19
PROP_PARASITES = 21
TYPE_NAME = {
0: u"End",
1: u"Colormap",
2: u"Active layer",
3: u"Active channel",
4: u"Selection",
5: u"Floating selection",
6: u"Opacity",
7: u"Mode",
8: u"Visible",
9: u"Linked",
10: u"Lock alpha",
11: u"Apply mask",
12: u"Edit mask",
13: u"Show mask",
14: u"Show masked",
15: u"Offsets",
16: u"Color",
17: u"Compression",
18: u"Guides",
19: u"Resolution",
20: u"Tattoo",
21: u"Parasites",
22: u"Unit",
23: u"Paths",
24: u"User unit",
25: u"Vectors",
26: u"Text layer flags",
}
handler = {
6: RGBA,
7: LayerMode,
8: GimpBoolean,
9: GimpBoolean,
10: GimpBoolean,
11: GimpBoolean,
12: GimpBoolean,
13: GimpBoolean,
15: LayerOffsets,
17: XcfCompression,
19: XcfResolution,
20: XcfTattoo,
21: XcfParasites,
22: XcfUnit
}
def __init__(self, *args, **kw):
FieldSet.__init__(self, *args, **kw)
self._size = (8 + self["size"].value) * 8
def createFields(self):
yield Enum(UInt32(self, "type", "Property type"), self.TYPE_NAME)
yield UInt32(self, "size", "Property size")
size = self["size"].value
if 0 < size:
cls = self.handler.get(self["type"].value, None)
if cls:
yield cls(self, "data", size=size*8)
else:
yield RawBytes(self, "data", size, "Data")
def createDescription(self):
return "Property: %s" % self["type"].display
def readProperties(parser):
while True:
prop = XcfProperty(parser, "property[]")
yield prop
if prop["type"].value == 0:
return
class XcfFile(Parser):
PARSER_TAGS = {
"id": "xcf",
"category": "image",
"file_ext": ("xcf",),
"mime": (u"image/x-xcf", u"application/x-gimp-image"),
"min_size": (26 + 8 + 4 + 4)*8, # header+empty property+layer offset+channel offset
"magic": (
('gimp xcf file\0', 0),
('gimp xcf v002\0', 0),
),
"description": "Gimp (XCF) picture"
}
endian = NETWORK_ENDIAN
IMAGE_TYPE_NAME = {
0: u"RGB",
1: u"Gray",
2: u"Indexed"
}
def validate(self):
if self.stream.readBytes(0, 14) not in ('gimp xcf file\0', 'gimp xcf v002\0'):
return "Wrong signature"
return True
def createFields(self):
# Read signature
yield String(self, "signature", 14, "Gimp picture signature (ends with nul byte)", charset="ASCII")
# Read image general informations (width, height, type)
yield UInt32(self, "width", "Image width")
yield UInt32(self, "height", "Image height")
yield Enum(UInt32(self, "type", "Image type"), self.IMAGE_TYPE_NAME)
for prop in readProperties(self):
yield prop
# Read layer offsets
layer_offsets = []
while True:
chunk = UInt32(self, "layer_offset[]", "Layer offset")
yield chunk
if chunk.value == 0:
break
layer_offsets.append(chunk.value)
# Read channel offsets
channel_offsets = []
while True:
chunk = UInt32(self, "channel_offset[]", "Channel offset")
yield chunk
if chunk.value == 0:
break
channel_offsets.append(chunk.value)
# Read layers
for index, offset in enumerate(layer_offsets):
if index+1 < len(layer_offsets):
size = (layer_offsets[index+1] - offset) * 8
else:
size = None
padding = self.seekByte(offset, relative=False)
if padding:
yield padding
yield XcfLayer(self, "layer[]", size=size)
# Read channels
for index, offset in enumerate(channel_offsets):
if index+1 < len(channel_offsets):
size = (channel_offsets[index+1] - offset) * 8
else:
size = None
padding = self.seekByte(offset, relative=False)
if padding is not None:
yield padding
yield XcfChannel(self, "channel[]", "Channel", size=size)
| gpl-3.0 |
vladmm/intellij-community | python/helpers/python-skeletons/multiprocessing/__init__.py | 40 | 4217 | """Skeleton for 'multiprocessing' stdlib module."""
from multiprocessing.pool import Pool
class Process(object):
def __init__(self, group=None, target=None, name=None, args=(), kwargs={}):
self.name = ''
self.daemon = False
self.authkey = None
self.exitcode = None
self.ident = 0
self.pid = 0
self.sentinel = None
def run(self):
pass
def start(self):
pass
def terminate(self):
pass
def join(self, timeout=None):
pass
def is_alive(self):
return False
class ProcessError(Exception):
pass
class BufferTooShort(ProcessError):
pass
class AuthenticationError(ProcessError):
pass
class TimeoutError(ProcessError):
pass
class Connection(object):
def send(self, obj):
pass
def recv(self):
pass
def fileno(self):
return 0
def close(self):
pass
def poll(self, timeout=None):
pass
def send_bytes(self, buffer, offset=-1, size=-1):
pass
def recv_bytes(self, maxlength=-1):
pass
def recv_bytes_into(self, buffer, offset=-1):
pass
def __enter__(self):
pass
def __exit__(self, exc_type, exc_val, exc_tb):
pass
def Pipe(duplex=True):
return Connection(), Connection()
class Queue(object):
def __init__(self, maxsize=-1):
self._maxsize = maxsize
def qsize(self):
return 0
def empty(self):
return False
def full(self):
return False
def put(self, obj, block=True, timeout=None):
pass
def put_nowait(self, obj):
pass
def get(self, block=True, timeout=None):
pass
def get_nowait(self):
pass
def close(self):
pass
def join_thread(self):
pass
def cancel_join_thread(self):
pass
class SimpleQueue(object):
def empty(self):
return False
def get(self):
pass
def put(self, item):
pass
class JoinableQueue(multiprocessing.Queue):
def task_done(self):
pass
def join(self):
pass
def active_childern():
"""
:rtype: list[multiprocessing.Process]
"""
return []
def cpu_count():
return 0
def current_process():
"""
:rtype: multiprocessing.Process
"""
return Process()
def freeze_support():
pass
def get_all_start_methods():
return []
def get_context(method=None):
pass
def get_start_method(allow_none=False):
pass
def set_executable(path):
pass
def set_start_method(method):
pass
class Barrier(object):
def __init__(self, parties, action=None, timeout=None):
self.parties = parties
self.n_waiting = 0
self.broken = False
def wait(self, timeout=None):
pass
def reset(self):
pass
def abort(self):
pass
class Semaphore(object):
def __init__(self, value=1):
pass
def acquire(self, blocking=True, timeout=None):
pass
def release(self):
pass
class BoundedSemaphore(multiprocessing.Semaphore):
pass
class Condition(object):
def __init__(self, lock=None):
pass
def acquire(self, *args):
pass
def release(self):
pass
def wait(self, timeout=None):
pass
def wait_for(self, predicate, timeout=None):
pass
def notify(self, n=1):
pass
def notify_all(self):
pass
class Event(object):
def is_set(self):
return False
def set(self):
pass
def clear(self):
pass
def wait(self, timeout=None):
pass
class Lock(object):
def acquire(self, blocking=True, timeout=-1):
pass
def release(self):
pass
class RLock(object):
def acquire(self, blocking=True, timeout=-1):
pass
def release(self):
pass
def __enter__(self):
pass
def __exit__(self, exc_type, exc_val, exc_tb):
pass
def Value(typecode_or_type, *args, **kwargs):
pass
def Array(typecode_or_type, size_or_initializer, lock=True):
pass
def Manager():
return multiprocessing.SyncManager()
| apache-2.0 |
dzz007/photivo | scons-local-2.2.0/SCons/Platform/darwin.py | 14 | 2578 | """engine.SCons.Platform.darwin
Platform-specific initialization for Mac OS X systems.
There normally shouldn't be any need to import this module directly. It
will usually be imported through the generic SCons.Platform.Platform()
selection method.
"""
#
# Copyright (c) 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010, 2011, 2012 The SCons Foundation
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
__revision__ = "src/engine/SCons/Platform/darwin.py issue-2856:2676:d23b7a2f45e8 2012/08/05 15:38:28 garyo"
import posix
import os
def generate(env):
posix.generate(env)
env['SHLIBSUFFIX'] = '.dylib'
# put macports paths at front to override Apple's versions, fink path is after
# For now let people who want Macports or Fink tools specify it!
# env['ENV']['PATH'] = '/opt/local/bin:/opt/local/sbin:' + env['ENV']['PATH'] + ':/sw/bin'
# Store extra system paths in env['ENV']['PATHOSX']
filelist = ['/etc/paths',]
# make sure this works on Macs with Tiger or earlier
try:
dirlist = os.listdir('/etc/paths.d')
except:
dirlist = []
for file in dirlist:
filelist.append('/etc/paths.d/'+file)
for file in filelist:
if os.path.isfile(file):
f = open(file, 'r')
lines = f.readlines()
for line in lines:
if line:
env.AppendENVPath('PATHOSX', line.strip('\n'))
f.close()
# Local Variables:
# tab-width:4
# indent-tabs-mode:nil
# End:
# vim: set expandtab tabstop=4 shiftwidth=4:
| gpl-3.0 |
gilessbrown/wextracto | wex/composed.py | 3 | 4382 | """
Wextracto uses `Function composition <http://en.wikipedia.org/wiki/Function_composition_%28computer_science%29>`_
as an easy way to build new functions from existing ones:
.. code-block:: pycon
>>> from wex.composed import compose
>>> def add1(x):
... return x + 1
...
>>> def mult2(x):
... return x * 2
...
>>> f = compose(add1, mult2)
>>> f(2)
6
Wextracto uses the pipe operator, ``|``, as a shorthand for function composition.
This shorthand can be a powerful technique for reducing boilerplate code when
used in combination with :func:`.named` extractors:
.. code-block:: python
from wex.etree import css, text
from wex.extractor import named
attrs = named(title = css('h1') | text
description = css('#description') | text)
"""
from itertools import chain
from functools import WRAPPER_ASSIGNMENTS, WRAPPER_UPDATES, partial as functools_partial
from six.moves import map as six_map
def compose(*functions):
""" Create a :class:`.ComposedCallable` from zero more functions. """
return ComposedCallable(*functions)
def composable(func):
""" Decorates a callable to support function composition using ``|``.
For example:
.. code-block:: python
@Composable.decorate
def add1(x):
return x + 1
def mult2(x):
return x * 2
composed = add1 | mult2
"""
return Composable.decorate(func)
class Composable(object):
@classmethod
def decorate(cls, func, **kw):
name = getattr(func, '__name__', str(func))
clsdict = dict(
__call__=staticmethod(func),
__doc__=getattr(func, '__doc__', None),
__name__=getattr(func, '__name__', None),
__module__=getattr(func, '__module__', None),
)
clsdict.update(kw)
return type(name, (cls,), clsdict)()
@classmethod
def __getattr__(cls, name):
return getattr(cls.__call__, name)
@classmethod
def __compose__(cls):
return (cls.__call__,)
def __or__(self, rhs):
assert hasattr(rhs, '__call__')
return compose(self, rhs)
def __ror__(self, lhs):
assert hasattr(lhs, '__call__')
return compose(lhs, self)
def __call__(self, arg):
raise NotImplementedError
def flatten_composed_callables(functions):
iterable = (getattr(f, 'functions', (f,)) for f in functions)
return tuple(chain.from_iterable(iterable))
class ComposedCallable(Composable):
""" A callable, taking one argument, composed from other callables.
.. code-block:: python
def mult2(x):
return x * 2
def add1(x):
return x + 1
composed = ComposedCallable(add1, mult2)
for x in (1, 2, 3):
assert composed(x) == mult2(add1(x))
ComposedCallable objects are :func:`composable <wex.composed.composable>`.
It can be composed of other ComposedCallable objects.
"""
def __init__(self, *functions):
self.functions = flatten_composed_callables(functions)
def __call__(self, arg, **kw):
res = arg
for func in self.functions:
res = func(res, **kw)
return res
def __compose__(self):
return self.functions
def __repr__(self):
return '<%s.%s%r>' % (self.__class__.__module__,
self.__class__.__name__,
self.functions)
def wraps(wrapped,
assigned = WRAPPER_ASSIGNMENTS,
updated = WRAPPER_UPDATES):
defaults = {
'__annotations__': {}
}
def decorator(wrapper):
for attr in assigned:
class_ = getattr(wrapped, '__class__', None)
try:
value = getattr(wrapped, attr)
except AttributeError:
try:
value = getattr(class_, attr)
except AttributeError:
value = defaults[attr]
setattr(wrapper, attr, value)
for attr in updated:
value = getattr(wrapped, attr, {})
getattr(wrapper, attr).update(value)
return wrapper
return decorator
def partial(func, *args, **kwargs):
return composable(functools_partial(func, *args, **kwargs))
def map(func):
return partial(six_map, func)
| bsd-3-clause |
aljscott/phantomjs | src/qt/qtwebkit/Tools/Scripts/webkitpy/tool/steps/runtests_unittest.py | 123 | 3157 | # Copyright (C) 2011 Google Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import platform
import sys
import unittest2 as unittest
from webkitpy.common.system.outputcapture import OutputCapture
from webkitpy.tool.mocktool import MockOptions, MockTool
from webkitpy.tool.steps.runtests import RunTests
class RunTestsTest(unittest.TestCase):
def test_webkit_run_unit_tests(self):
tool = MockTool(log_executive=True)
tool._deprecated_port.run_python_unittests_command = lambda: None
tool._deprecated_port.run_perl_unittests_command = lambda: None
step = RunTests(tool, MockOptions(test=True, non_interactive=True, quiet=False))
if sys.platform != "cygwin":
expected_logs = """Running bindings generation tests
MOCK run_and_throw_if_fail: ['mock-run-bindings-tests'], cwd=/mock-checkout
Running WebKit unit tests
MOCK run_and_throw_if_fail: ['mock-run-webkit-unit-tests'], cwd=/mock-checkout
Running run-webkit-tests
MOCK run_and_throw_if_fail: ['mock-run-webkit-tests', '--no-new-test-results', '--no-show-results', '--exit-after-n-failures=30', '--quiet', '--skip-failing-tests'], cwd=/mock-checkout
"""
else:
expected_logs = """Running bindings generation tests
MOCK run_and_throw_if_fail: ['mock-run-bindings-tests'], cwd=/mock-checkout
Running WebKit unit tests
MOCK run_and_throw_if_fail: ['mock-run-webkit-unit-tests'], cwd=/mock-checkout
Running run-webkit-tests
MOCK run_and_throw_if_fail: ['mock-run-webkit-tests', '--no-new-test-results', '--no-show-results', '--exit-after-n-failures=30', '--no-build'], cwd=/mock-checkout
"""
OutputCapture().assert_outputs(self, step.run, [{}], expected_logs=expected_logs)
| bsd-3-clause |
zlaja/android_kernel_lge_msm8610 | tools/perf/scripts/python/Perf-Trace-Util/lib/Perf/Trace/Core.py | 11088 | 3246 | # Core.py - Python extension for perf script, core functions
#
# Copyright (C) 2010 by Tom Zanussi <[email protected]>
#
# This software may be distributed under the terms of the GNU General
# Public License ("GPL") version 2 as published by the Free Software
# Foundation.
from collections import defaultdict
def autodict():
return defaultdict(autodict)
flag_fields = autodict()
symbolic_fields = autodict()
def define_flag_field(event_name, field_name, delim):
flag_fields[event_name][field_name]['delim'] = delim
def define_flag_value(event_name, field_name, value, field_str):
flag_fields[event_name][field_name]['values'][value] = field_str
def define_symbolic_field(event_name, field_name):
# nothing to do, really
pass
def define_symbolic_value(event_name, field_name, value, field_str):
symbolic_fields[event_name][field_name]['values'][value] = field_str
def flag_str(event_name, field_name, value):
string = ""
if flag_fields[event_name][field_name]:
print_delim = 0
keys = flag_fields[event_name][field_name]['values'].keys()
keys.sort()
for idx in keys:
if not value and not idx:
string += flag_fields[event_name][field_name]['values'][idx]
break
if idx and (value & idx) == idx:
if print_delim and flag_fields[event_name][field_name]['delim']:
string += " " + flag_fields[event_name][field_name]['delim'] + " "
string += flag_fields[event_name][field_name]['values'][idx]
print_delim = 1
value &= ~idx
return string
def symbol_str(event_name, field_name, value):
string = ""
if symbolic_fields[event_name][field_name]:
keys = symbolic_fields[event_name][field_name]['values'].keys()
keys.sort()
for idx in keys:
if not value and not idx:
string = symbolic_fields[event_name][field_name]['values'][idx]
break
if (value == idx):
string = symbolic_fields[event_name][field_name]['values'][idx]
break
return string
trace_flags = { 0x00: "NONE", \
0x01: "IRQS_OFF", \
0x02: "IRQS_NOSUPPORT", \
0x04: "NEED_RESCHED", \
0x08: "HARDIRQ", \
0x10: "SOFTIRQ" }
def trace_flag_str(value):
string = ""
print_delim = 0
keys = trace_flags.keys()
for idx in keys:
if not value and not idx:
string += "NONE"
break
if idx and (value & idx) == idx:
if print_delim:
string += " | ";
string += trace_flags[idx]
print_delim = 1
value &= ~idx
return string
def taskState(state):
states = {
0 : "R",
1 : "S",
2 : "D",
64: "DEAD"
}
if state not in states:
return "Unknown"
return states[state]
class EventHeaders:
def __init__(self, common_cpu, common_secs, common_nsecs,
common_pid, common_comm):
self.cpu = common_cpu
self.secs = common_secs
self.nsecs = common_nsecs
self.pid = common_pid
self.comm = common_comm
def ts(self):
return (self.secs * (10 ** 9)) + self.nsecs
def ts_format(self):
return "%d.%d" % (self.secs, int(self.nsecs / 1000))
| gpl-2.0 |
NewpTone/stacklab-nova | debian/tmp/usr/lib/python2.7/dist-packages/nova/tests/test_quota.py | 6 | 74369 | # vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import datetime
from nova import compute
from nova.compute import instance_types
from nova import context
from nova import db
from nova.db.sqlalchemy import api as sqa_api
from nova.db.sqlalchemy import models as sqa_models
from nova import exception
from nova import flags
from nova.openstack.common import rpc
from nova.openstack.common import timeutils
from nova import quota
from nova.scheduler import driver as scheduler_driver
from nova import test
import nova.tests.image.fake
from nova import volume
FLAGS = flags.FLAGS
class QuotaIntegrationTestCase(test.TestCase):
def setUp(self):
super(QuotaIntegrationTestCase, self).setUp()
self.flags(compute_driver='nova.virt.fake.FakeDriver',
quota_instances=2,
quota_cores=4,
quota_volumes=2,
quota_gigabytes=20,
quota_floating_ips=1,
network_manager='nova.network.manager.FlatDHCPManager')
# Apparently needed by the RPC tests...
self.network = self.start_service('network')
self.user_id = 'admin'
self.project_id = 'admin'
self.context = context.RequestContext(self.user_id,
self.project_id,
is_admin=True)
orig_rpc_call = rpc.call
def rpc_call_wrapper(context, topic, msg, timeout=None):
"""Stub out the scheduler creating the instance entry"""
if (topic == FLAGS.scheduler_topic and
msg['method'] == 'run_instance'):
scheduler = scheduler_driver.Scheduler
instance = scheduler().create_instance_db_entry(
context,
msg['args']['request_spec'],
None)
return [scheduler_driver.encode_instance(instance)]
else:
return orig_rpc_call(context, topic, msg)
self.stubs.Set(rpc, 'call', rpc_call_wrapper)
nova.tests.image.fake.stub_out_image_service(self.stubs)
def tearDown(self):
super(QuotaIntegrationTestCase, self).tearDown()
nova.tests.image.fake.FakeImageService_reset()
def _create_instance(self, cores=2):
"""Create a test instance"""
inst = {}
inst['image_id'] = 'cedef40a-ed67-4d10-800e-17455edce175'
inst['reservation_id'] = 'r-fakeres'
inst['user_id'] = self.user_id
inst['project_id'] = self.project_id
inst['instance_type_id'] = '3' # m1.large
inst['vcpus'] = cores
return db.instance_create(self.context, inst)
def _create_volume(self, size=10):
"""Create a test volume"""
vol = {}
vol['user_id'] = self.user_id
vol['project_id'] = self.project_id
vol['size'] = size
return db.volume_create(self.context, vol)['id']
def test_too_many_instances(self):
instance_uuids = []
for i in range(FLAGS.quota_instances):
instance = self._create_instance()
instance_uuids.append(instance['uuid'])
inst_type = instance_types.get_instance_type_by_name('m1.small')
image_uuid = 'cedef40a-ed67-4d10-800e-17455edce175'
self.assertRaises(exception.QuotaError, compute.API().create,
self.context,
min_count=1,
max_count=1,
instance_type=inst_type,
image_href=image_uuid)
for instance_uuid in instance_uuids:
db.instance_destroy(self.context, instance_uuid)
def test_too_many_cores(self):
instance = self._create_instance(cores=4)
inst_type = instance_types.get_instance_type_by_name('m1.small')
image_uuid = 'cedef40a-ed67-4d10-800e-17455edce175'
self.assertRaises(exception.QuotaError, compute.API().create,
self.context,
min_count=1,
max_count=1,
instance_type=inst_type,
image_href=image_uuid)
db.instance_destroy(self.context, instance['uuid'])
def test_too_many_volumes(self):
volume_ids = []
for i in range(FLAGS.quota_volumes):
volume_id = self._create_volume()
volume_ids.append(volume_id)
self.assertRaises(exception.QuotaError,
volume.API().create,
self.context, 10, '', '', None)
for volume_id in volume_ids:
db.volume_destroy(self.context, volume_id)
def test_too_many_gigabytes(self):
volume_ids = []
volume_id = self._create_volume(size=20)
volume_ids.append(volume_id)
self.assertRaises(exception.QuotaError,
volume.API().create,
self.context, 10, '', '', None)
for volume_id in volume_ids:
db.volume_destroy(self.context, volume_id)
def test_too_many_addresses(self):
address = '192.168.0.100'
db.floating_ip_create(context.get_admin_context(),
{'address': address,
'project_id': self.project_id})
self.assertRaises(exception.QuotaError,
self.network.allocate_floating_ip,
self.context,
self.project_id)
db.floating_ip_destroy(context.get_admin_context(), address)
def test_auto_assigned(self):
address = '192.168.0.100'
db.floating_ip_create(context.get_admin_context(),
{'address': address,
'project_id': self.project_id})
# auto allocated addresses should not be counted
self.assertRaises(exception.NoMoreFloatingIps,
self.network.allocate_floating_ip,
self.context,
self.project_id,
True)
db.floating_ip_destroy(context.get_admin_context(), address)
def test_too_many_metadata_items(self):
metadata = {}
for i in range(FLAGS.quota_metadata_items + 1):
metadata['key%s' % i] = 'value%s' % i
inst_type = instance_types.get_instance_type_by_name('m1.small')
image_uuid = 'cedef40a-ed67-4d10-800e-17455edce175'
self.assertRaises(exception.QuotaError, compute.API().create,
self.context,
min_count=1,
max_count=1,
instance_type=inst_type,
image_href=image_uuid,
metadata=metadata)
def _create_with_injected_files(self, files):
api = compute.API()
inst_type = instance_types.get_instance_type_by_name('m1.small')
image_uuid = 'cedef40a-ed67-4d10-800e-17455edce175'
api.create(self.context, min_count=1, max_count=1,
instance_type=inst_type, image_href=image_uuid,
injected_files=files)
def test_no_injected_files(self):
api = compute.API()
inst_type = instance_types.get_instance_type_by_name('m1.small')
image_uuid = 'cedef40a-ed67-4d10-800e-17455edce175'
api.create(self.context,
instance_type=inst_type,
image_href=image_uuid)
def test_max_injected_files(self):
files = []
for i in xrange(FLAGS.quota_injected_files):
files.append(('/my/path%d' % i, 'config = test\n'))
self._create_with_injected_files(files) # no QuotaError
def test_too_many_injected_files(self):
files = []
for i in xrange(FLAGS.quota_injected_files + 1):
files.append(('/my/path%d' % i, 'my\ncontent%d\n' % i))
self.assertRaises(exception.QuotaError,
self._create_with_injected_files, files)
def test_max_injected_file_content_bytes(self):
max = FLAGS.quota_injected_file_content_bytes
content = ''.join(['a' for i in xrange(max)])
files = [('/test/path', content)]
self._create_with_injected_files(files) # no QuotaError
def test_too_many_injected_file_content_bytes(self):
max = FLAGS.quota_injected_file_content_bytes
content = ''.join(['a' for i in xrange(max + 1)])
files = [('/test/path', content)]
self.assertRaises(exception.QuotaError,
self._create_with_injected_files, files)
def test_max_injected_file_path_bytes(self):
max = FLAGS.quota_injected_file_path_bytes
path = ''.join(['a' for i in xrange(max)])
files = [(path, 'config = quotatest')]
self._create_with_injected_files(files) # no QuotaError
def test_too_many_injected_file_path_bytes(self):
max = FLAGS.quota_injected_file_path_bytes
path = ''.join(['a' for i in xrange(max + 1)])
files = [(path, 'config = quotatest')]
self.assertRaises(exception.QuotaError,
self._create_with_injected_files, files)
def test_reservation_expire(self):
timeutils.set_time_override()
def assertInstancesReserved(reserved):
result = quota.QUOTAS.get_project_quotas(self.context,
self.context.project_id)
self.assertEqual(result['instances']['reserved'], reserved)
quota.QUOTAS.reserve(self.context,
expire=60,
instances=2)
assertInstancesReserved(2)
timeutils.advance_time_seconds(80)
result = quota.QUOTAS.expire(self.context)
assertInstancesReserved(0)
class FakeContext(object):
def __init__(self, project_id, quota_class):
self.is_admin = False
self.user_id = 'fake_user'
self.project_id = project_id
self.quota_class = quota_class
def elevated(self):
elevated = self.__class__(self.project_id, self.quota_class)
elevated.is_admin = True
return elevated
class FakeDriver(object):
def __init__(self, by_project=None, by_class=None, reservations=None):
self.called = []
self.by_project = by_project or {}
self.by_class = by_class or {}
self.reservations = reservations or []
def get_by_project(self, context, project_id, resource):
self.called.append(('get_by_project', context, project_id, resource))
try:
return self.by_project[project_id][resource]
except KeyError:
raise exception.ProjectQuotaNotFound(project_id=project_id)
def get_by_class(self, context, quota_class, resource):
self.called.append(('get_by_class', context, quota_class, resource))
try:
return self.by_class[quota_class][resource]
except KeyError:
raise exception.QuotaClassNotFound(class_name=quota_class)
def get_defaults(self, context, resources):
self.called.append(('get_defaults', context, resources))
return resources
def get_class_quotas(self, context, resources, quota_class,
defaults=True):
self.called.append(('get_class_quotas', context, resources,
quota_class, defaults))
return resources
def get_project_quotas(self, context, resources, project_id,
quota_class=None, defaults=True, usages=True):
self.called.append(('get_project_quotas', context, resources,
project_id, quota_class, defaults, usages))
return resources
def limit_check(self, context, resources, values):
self.called.append(('limit_check', context, resources, values))
def reserve(self, context, resources, deltas, expire=None):
self.called.append(('reserve', context, resources, deltas, expire))
return self.reservations
def commit(self, context, reservations):
self.called.append(('commit', context, reservations))
def rollback(self, context, reservations):
self.called.append(('rollback', context, reservations))
def destroy_all_by_project(self, context, project_id):
self.called.append(('destroy_all_by_project', context, project_id))
def expire(self, context):
self.called.append(('expire', context))
class BaseResourceTestCase(test.TestCase):
def test_no_flag(self):
resource = quota.BaseResource('test_resource')
self.assertEqual(resource.name, 'test_resource')
self.assertEqual(resource.flag, None)
self.assertEqual(resource.default, -1)
def test_with_flag(self):
# We know this flag exists, so use it...
self.flags(quota_instances=10)
resource = quota.BaseResource('test_resource', 'quota_instances')
self.assertEqual(resource.name, 'test_resource')
self.assertEqual(resource.flag, 'quota_instances')
self.assertEqual(resource.default, 10)
def test_with_flag_no_quota(self):
self.flags(quota_instances=-1)
resource = quota.BaseResource('test_resource', 'quota_instances')
self.assertEqual(resource.name, 'test_resource')
self.assertEqual(resource.flag, 'quota_instances')
self.assertEqual(resource.default, -1)
def test_quota_no_project_no_class(self):
self.flags(quota_instances=10)
resource = quota.BaseResource('test_resource', 'quota_instances')
driver = FakeDriver()
context = FakeContext(None, None)
quota_value = resource.quota(driver, context)
self.assertEqual(quota_value, 10)
def test_quota_with_project_no_class(self):
self.flags(quota_instances=10)
resource = quota.BaseResource('test_resource', 'quota_instances')
driver = FakeDriver(by_project=dict(
test_project=dict(test_resource=15),
))
context = FakeContext('test_project', None)
quota_value = resource.quota(driver, context)
self.assertEqual(quota_value, 15)
def test_quota_no_project_with_class(self):
self.flags(quota_instances=10)
resource = quota.BaseResource('test_resource', 'quota_instances')
driver = FakeDriver(by_class=dict(
test_class=dict(test_resource=20),
))
context = FakeContext(None, 'test_class')
quota_value = resource.quota(driver, context)
self.assertEqual(quota_value, 20)
def test_quota_with_project_with_class(self):
self.flags(quota_instances=10)
resource = quota.BaseResource('test_resource', 'quota_instances')
driver = FakeDriver(by_project=dict(
test_project=dict(test_resource=15),
),
by_class=dict(
test_class=dict(test_resource=20),
))
context = FakeContext('test_project', 'test_class')
quota_value = resource.quota(driver, context)
self.assertEqual(quota_value, 15)
def test_quota_override_project_with_class(self):
self.flags(quota_instances=10)
resource = quota.BaseResource('test_resource', 'quota_instances')
driver = FakeDriver(by_project=dict(
test_project=dict(test_resource=15),
override_project=dict(test_resource=20),
))
context = FakeContext('test_project', 'test_class')
quota_value = resource.quota(driver, context,
project_id='override_project')
self.assertEqual(quota_value, 20)
def test_quota_with_project_override_class(self):
self.flags(quota_instances=10)
resource = quota.BaseResource('test_resource', 'quota_instances')
driver = FakeDriver(by_class=dict(
test_class=dict(test_resource=15),
override_class=dict(test_resource=20),
))
context = FakeContext('test_project', 'test_class')
quota_value = resource.quota(driver, context,
quota_class='override_class')
self.assertEqual(quota_value, 20)
class QuotaEngineTestCase(test.TestCase):
def test_init(self):
quota_obj = quota.QuotaEngine()
self.assertEqual(quota_obj._resources, {})
self.assertTrue(isinstance(quota_obj._driver, quota.DbQuotaDriver))
def test_init_override_string(self):
quota_obj = quota.QuotaEngine(
quota_driver_class='nova.tests.test_quota.FakeDriver')
self.assertEqual(quota_obj._resources, {})
self.assertTrue(isinstance(quota_obj._driver, FakeDriver))
def test_init_override_obj(self):
quota_obj = quota.QuotaEngine(quota_driver_class=FakeDriver)
self.assertEqual(quota_obj._resources, {})
self.assertEqual(quota_obj._driver, FakeDriver)
def test_register_resource(self):
quota_obj = quota.QuotaEngine()
resource = quota.AbsoluteResource('test_resource')
quota_obj.register_resource(resource)
self.assertEqual(quota_obj._resources, dict(test_resource=resource))
def test_register_resources(self):
quota_obj = quota.QuotaEngine()
resources = [
quota.AbsoluteResource('test_resource1'),
quota.AbsoluteResource('test_resource2'),
quota.AbsoluteResource('test_resource3'),
]
quota_obj.register_resources(resources)
self.assertEqual(quota_obj._resources, dict(
test_resource1=resources[0],
test_resource2=resources[1],
test_resource3=resources[2],
))
def test_sync_predeclared(self):
quota_obj = quota.QuotaEngine()
def spam(*args, **kwargs):
pass
resource = quota.ReservableResource('test_resource', spam)
quota_obj.register_resource(resource)
self.assertEqual(resource.sync, spam)
def test_sync_multi(self):
quota_obj = quota.QuotaEngine()
def spam(*args, **kwargs):
pass
resources = [
quota.ReservableResource('test_resource1', spam),
quota.ReservableResource('test_resource2', spam),
quota.ReservableResource('test_resource3', spam),
quota.ReservableResource('test_resource4', spam),
]
quota_obj.register_resources(resources[:2])
self.assertEqual(resources[0].sync, spam)
self.assertEqual(resources[1].sync, spam)
self.assertEqual(resources[2].sync, spam)
self.assertEqual(resources[3].sync, spam)
def test_get_by_project(self):
context = FakeContext('test_project', 'test_class')
driver = FakeDriver(by_project=dict(
test_project=dict(test_resource=42)))
quota_obj = quota.QuotaEngine(quota_driver_class=driver)
result = quota_obj.get_by_project(context, 'test_project',
'test_resource')
self.assertEqual(driver.called, [
('get_by_project', context, 'test_project', 'test_resource'),
])
self.assertEqual(result, 42)
def test_get_by_class(self):
context = FakeContext('test_project', 'test_class')
driver = FakeDriver(by_class=dict(
test_class=dict(test_resource=42)))
quota_obj = quota.QuotaEngine(quota_driver_class=driver)
result = quota_obj.get_by_class(context, 'test_class', 'test_resource')
self.assertEqual(driver.called, [
('get_by_class', context, 'test_class', 'test_resource'),
])
self.assertEqual(result, 42)
def _make_quota_obj(self, driver):
quota_obj = quota.QuotaEngine(quota_driver_class=driver)
resources = [
quota.AbsoluteResource('test_resource4'),
quota.AbsoluteResource('test_resource3'),
quota.AbsoluteResource('test_resource2'),
quota.AbsoluteResource('test_resource1'),
]
quota_obj.register_resources(resources)
return quota_obj
def test_get_defaults(self):
context = FakeContext(None, None)
driver = FakeDriver()
quota_obj = self._make_quota_obj(driver)
result = quota_obj.get_defaults(context)
self.assertEqual(driver.called, [
('get_defaults', context, quota_obj._resources),
])
self.assertEqual(result, quota_obj._resources)
def test_get_class_quotas(self):
context = FakeContext(None, None)
driver = FakeDriver()
quota_obj = self._make_quota_obj(driver)
result1 = quota_obj.get_class_quotas(context, 'test_class')
result2 = quota_obj.get_class_quotas(context, 'test_class', False)
self.assertEqual(driver.called, [
('get_class_quotas', context, quota_obj._resources,
'test_class', True),
('get_class_quotas', context, quota_obj._resources,
'test_class', False),
])
self.assertEqual(result1, quota_obj._resources)
self.assertEqual(result2, quota_obj._resources)
def test_get_project_quotas(self):
context = FakeContext(None, None)
driver = FakeDriver()
quota_obj = self._make_quota_obj(driver)
result1 = quota_obj.get_project_quotas(context, 'test_project')
result2 = quota_obj.get_project_quotas(context, 'test_project',
quota_class='test_class',
defaults=False,
usages=False)
self.assertEqual(driver.called, [
('get_project_quotas', context, quota_obj._resources,
'test_project', None, True, True),
('get_project_quotas', context, quota_obj._resources,
'test_project', 'test_class', False, False),
])
self.assertEqual(result1, quota_obj._resources)
self.assertEqual(result2, quota_obj._resources)
def test_count_no_resource(self):
context = FakeContext(None, None)
driver = FakeDriver()
quota_obj = self._make_quota_obj(driver)
self.assertRaises(exception.QuotaResourceUnknown,
quota_obj.count, context, 'test_resource5',
True, foo='bar')
def test_count_wrong_resource(self):
context = FakeContext(None, None)
driver = FakeDriver()
quota_obj = self._make_quota_obj(driver)
self.assertRaises(exception.QuotaResourceUnknown,
quota_obj.count, context, 'test_resource1',
True, foo='bar')
def test_count(self):
def fake_count(context, *args, **kwargs):
self.assertEqual(args, (True,))
self.assertEqual(kwargs, dict(foo='bar'))
return 5
context = FakeContext(None, None)
driver = FakeDriver()
quota_obj = self._make_quota_obj(driver)
quota_obj.register_resource(quota.CountableResource('test_resource5',
fake_count))
result = quota_obj.count(context, 'test_resource5', True, foo='bar')
self.assertEqual(result, 5)
def test_limit_check(self):
context = FakeContext(None, None)
driver = FakeDriver()
quota_obj = self._make_quota_obj(driver)
quota_obj.limit_check(context, test_resource1=4, test_resource2=3,
test_resource3=2, test_resource4=1)
self.assertEqual(driver.called, [
('limit_check', context, quota_obj._resources, dict(
test_resource1=4,
test_resource2=3,
test_resource3=2,
test_resource4=1,
)),
])
def test_reserve(self):
context = FakeContext(None, None)
driver = FakeDriver(reservations=[
'resv-01', 'resv-02', 'resv-03', 'resv-04',
])
quota_obj = self._make_quota_obj(driver)
result1 = quota_obj.reserve(context, test_resource1=4,
test_resource2=3, test_resource3=2,
test_resource4=1)
result2 = quota_obj.reserve(context, expire=3600,
test_resource1=1, test_resource2=2,
test_resource3=3, test_resource4=4)
self.assertEqual(driver.called, [
('reserve', context, quota_obj._resources, dict(
test_resource1=4,
test_resource2=3,
test_resource3=2,
test_resource4=1,
), None),
('reserve', context, quota_obj._resources, dict(
test_resource1=1,
test_resource2=2,
test_resource3=3,
test_resource4=4,
), 3600),
])
self.assertEqual(result1, [
'resv-01', 'resv-02', 'resv-03', 'resv-04',
])
self.assertEqual(result2, [
'resv-01', 'resv-02', 'resv-03', 'resv-04',
])
def test_commit(self):
context = FakeContext(None, None)
driver = FakeDriver()
quota_obj = self._make_quota_obj(driver)
quota_obj.commit(context, ['resv-01', 'resv-02', 'resv-03'])
self.assertEqual(driver.called, [
('commit', context, ['resv-01', 'resv-02', 'resv-03']),
])
def test_rollback(self):
context = FakeContext(None, None)
driver = FakeDriver()
quota_obj = self._make_quota_obj(driver)
quota_obj.rollback(context, ['resv-01', 'resv-02', 'resv-03'])
self.assertEqual(driver.called, [
('rollback', context, ['resv-01', 'resv-02', 'resv-03']),
])
def test_destroy_all_by_project(self):
context = FakeContext(None, None)
driver = FakeDriver()
quota_obj = self._make_quota_obj(driver)
quota_obj.destroy_all_by_project(context, 'test_project')
self.assertEqual(driver.called, [
('destroy_all_by_project', context, 'test_project'),
])
def test_expire(self):
context = FakeContext(None, None)
driver = FakeDriver()
quota_obj = self._make_quota_obj(driver)
quota_obj.expire(context)
self.assertEqual(driver.called, [
('expire', context),
])
def test_resources(self):
quota_obj = self._make_quota_obj(None)
self.assertEqual(quota_obj.resources,
['test_resource1', 'test_resource2',
'test_resource3', 'test_resource4'])
class DbQuotaDriverTestCase(test.TestCase):
def setUp(self):
super(DbQuotaDriverTestCase, self).setUp()
self.flags(quota_instances=10,
quota_cores=20,
quota_ram=50 * 1024,
quota_volumes=10,
quota_gigabytes=1000,
quota_floating_ips=10,
quota_metadata_items=128,
quota_injected_files=5,
quota_injected_file_content_bytes=10 * 1024,
quota_injected_file_path_bytes=255,
quota_security_groups=10,
quota_security_group_rules=20,
reservation_expire=86400,
until_refresh=0,
max_age=0,
)
self.driver = quota.DbQuotaDriver()
self.calls = []
timeutils.set_time_override()
def tearDown(self):
timeutils.clear_time_override()
super(DbQuotaDriverTestCase, self).tearDown()
def test_get_defaults(self):
# Use our pre-defined resources
result = self.driver.get_defaults(None, quota.QUOTAS._resources)
self.assertEqual(result, dict(
instances=10,
cores=20,
ram=50 * 1024,
volumes=10,
gigabytes=1000,
floating_ips=10,
metadata_items=128,
injected_files=5,
injected_file_content_bytes=10 * 1024,
injected_file_path_bytes=255,
security_groups=10,
security_group_rules=20,
key_pairs=100,
))
def _stub_quota_class_get_all_by_name(self):
# Stub out quota_class_get_all_by_name
def fake_qcgabn(context, quota_class):
self.calls.append('quota_class_get_all_by_name')
self.assertEqual(quota_class, 'test_class')
return dict(
instances=5,
ram=25 * 1024,
gigabytes=500,
metadata_items=64,
injected_file_content_bytes=5 * 1024,
)
self.stubs.Set(db, 'quota_class_get_all_by_name', fake_qcgabn)
def test_get_class_quotas(self):
self._stub_quota_class_get_all_by_name()
result = self.driver.get_class_quotas(None, quota.QUOTAS._resources,
'test_class')
self.assertEqual(self.calls, ['quota_class_get_all_by_name'])
self.assertEqual(result, dict(
instances=5,
cores=20,
ram=25 * 1024,
volumes=10,
gigabytes=500,
floating_ips=10,
metadata_items=64,
injected_files=5,
injected_file_content_bytes=5 * 1024,
injected_file_path_bytes=255,
security_groups=10,
security_group_rules=20,
key_pairs=100,
))
def test_get_class_quotas_no_defaults(self):
self._stub_quota_class_get_all_by_name()
result = self.driver.get_class_quotas(None, quota.QUOTAS._resources,
'test_class', False)
self.assertEqual(self.calls, ['quota_class_get_all_by_name'])
self.assertEqual(result, dict(
instances=5,
ram=25 * 1024,
gigabytes=500,
metadata_items=64,
injected_file_content_bytes=5 * 1024,
))
def _stub_get_by_project(self):
def fake_qgabp(context, project_id):
self.calls.append('quota_get_all_by_project')
self.assertEqual(project_id, 'test_project')
return dict(
cores=10,
gigabytes=50,
injected_files=2,
injected_file_path_bytes=127,
)
def fake_qugabp(context, project_id):
self.calls.append('quota_usage_get_all_by_project')
self.assertEqual(project_id, 'test_project')
return dict(
instances=dict(in_use=2, reserved=2),
cores=dict(in_use=4, reserved=4),
ram=dict(in_use=10 * 1024, reserved=0),
volumes=dict(in_use=2, reserved=0),
gigabytes=dict(in_use=10, reserved=0),
floating_ips=dict(in_use=2, reserved=0),
metadata_items=dict(in_use=0, reserved=0),
injected_files=dict(in_use=0, reserved=0),
injected_file_content_bytes=dict(in_use=0, reserved=0),
injected_file_path_bytes=dict(in_use=0, reserved=0),
)
self.stubs.Set(db, 'quota_get_all_by_project', fake_qgabp)
self.stubs.Set(db, 'quota_usage_get_all_by_project', fake_qugabp)
self._stub_quota_class_get_all_by_name()
def test_get_project_quotas(self):
self._stub_get_by_project()
result = self.driver.get_project_quotas(
FakeContext('test_project', 'test_class'),
quota.QUOTAS._resources, 'test_project')
self.assertEqual(self.calls, [
'quota_get_all_by_project',
'quota_usage_get_all_by_project',
'quota_class_get_all_by_name',
])
self.assertEqual(result, dict(
instances=dict(
limit=5,
in_use=2,
reserved=2,
),
cores=dict(
limit=10,
in_use=4,
reserved=4,
),
ram=dict(
limit=25 * 1024,
in_use=10 * 1024,
reserved=0,
),
volumes=dict(
limit=10,
in_use=2,
reserved=0,
),
gigabytes=dict(
limit=50,
in_use=10,
reserved=0,
),
floating_ips=dict(
limit=10,
in_use=2,
reserved=0,
),
metadata_items=dict(
limit=64,
in_use=0,
reserved=0,
),
injected_files=dict(
limit=2,
in_use=0,
reserved=0,
),
injected_file_content_bytes=dict(
limit=5 * 1024,
in_use=0,
reserved=0,
),
injected_file_path_bytes=dict(
limit=127,
in_use=0,
reserved=0,
),
security_groups=dict(
limit=10,
in_use=0,
reserved=0,
),
security_group_rules=dict(
limit=20,
in_use=0,
reserved=0,
),
key_pairs=dict(
limit=100,
in_use=0,
reserved=0,
),
))
def test_get_project_quotas_alt_context_no_class(self):
self._stub_get_by_project()
result = self.driver.get_project_quotas(
FakeContext('other_project', 'other_class'),
quota.QUOTAS._resources, 'test_project')
self.assertEqual(self.calls, [
'quota_get_all_by_project',
'quota_usage_get_all_by_project',
])
self.assertEqual(result, dict(
instances=dict(
limit=10,
in_use=2,
reserved=2,
),
cores=dict(
limit=10,
in_use=4,
reserved=4,
),
ram=dict(
limit=50 * 1024,
in_use=10 * 1024,
reserved=0,
),
volumes=dict(
limit=10,
in_use=2,
reserved=0,
),
gigabytes=dict(
limit=50,
in_use=10,
reserved=0,
),
floating_ips=dict(
limit=10,
in_use=2,
reserved=0,
),
metadata_items=dict(
limit=128,
in_use=0,
reserved=0,
),
injected_files=dict(
limit=2,
in_use=0,
reserved=0,
),
injected_file_content_bytes=dict(
limit=10 * 1024,
in_use=0,
reserved=0,
),
injected_file_path_bytes=dict(
limit=127,
in_use=0,
reserved=0,
),
security_groups=dict(
limit=10,
in_use=0,
reserved=0,
),
security_group_rules=dict(
limit=20,
in_use=0,
reserved=0,
),
key_pairs=dict(
limit=100,
in_use=0,
reserved=0,
),
))
def test_get_project_quotas_alt_context_with_class(self):
self._stub_get_by_project()
result = self.driver.get_project_quotas(
FakeContext('other_project', 'other_class'),
quota.QUOTAS._resources, 'test_project', quota_class='test_class')
self.assertEqual(self.calls, [
'quota_get_all_by_project',
'quota_usage_get_all_by_project',
'quota_class_get_all_by_name',
])
self.assertEqual(result, dict(
instances=dict(
limit=5,
in_use=2,
reserved=2,
),
cores=dict(
limit=10,
in_use=4,
reserved=4,
),
ram=dict(
limit=25 * 1024,
in_use=10 * 1024,
reserved=0,
),
volumes=dict(
limit=10,
in_use=2,
reserved=0,
),
gigabytes=dict(
limit=50,
in_use=10,
reserved=0,
),
floating_ips=dict(
limit=10,
in_use=2,
reserved=0,
),
metadata_items=dict(
limit=64,
in_use=0,
reserved=0,
),
injected_files=dict(
limit=2,
in_use=0,
reserved=0,
),
injected_file_content_bytes=dict(
limit=5 * 1024,
in_use=0,
reserved=0,
),
injected_file_path_bytes=dict(
limit=127,
in_use=0,
reserved=0,
),
security_groups=dict(
limit=10,
in_use=0,
reserved=0,
),
security_group_rules=dict(
limit=20,
in_use=0,
reserved=0,
),
key_pairs=dict(
limit=100,
in_use=0,
reserved=0,
),
))
def test_get_project_quotas_no_defaults(self):
self._stub_get_by_project()
result = self.driver.get_project_quotas(
FakeContext('test_project', 'test_class'),
quota.QUOTAS._resources, 'test_project', defaults=False)
self.assertEqual(self.calls, [
'quota_get_all_by_project',
'quota_usage_get_all_by_project',
'quota_class_get_all_by_name',
])
self.assertEqual(result, dict(
cores=dict(
limit=10,
in_use=4,
reserved=4,
),
gigabytes=dict(
limit=50,
in_use=10,
reserved=0,
),
injected_files=dict(
limit=2,
in_use=0,
reserved=0,
),
injected_file_path_bytes=dict(
limit=127,
in_use=0,
reserved=0,
),
))
def test_get_project_quotas_no_usages(self):
self._stub_get_by_project()
result = self.driver.get_project_quotas(
FakeContext('test_project', 'test_class'),
quota.QUOTAS._resources, 'test_project', usages=False)
self.assertEqual(self.calls, [
'quota_get_all_by_project',
'quota_class_get_all_by_name',
])
self.assertEqual(result, dict(
instances=dict(
limit=5,
),
cores=dict(
limit=10,
),
ram=dict(
limit=25 * 1024,
),
volumes=dict(
limit=10,
),
gigabytes=dict(
limit=50,
),
floating_ips=dict(
limit=10,
),
metadata_items=dict(
limit=64,
),
injected_files=dict(
limit=2,
),
injected_file_content_bytes=dict(
limit=5 * 1024,
),
injected_file_path_bytes=dict(
limit=127,
),
security_groups=dict(
limit=10,
),
security_group_rules=dict(
limit=20,
),
key_pairs=dict(
limit=100,
),
))
def _stub_get_project_quotas(self):
def fake_get_project_quotas(context, resources, project_id,
quota_class=None, defaults=True,
usages=True):
self.calls.append('get_project_quotas')
return dict((k, dict(limit=v.default))
for k, v in resources.items())
self.stubs.Set(self.driver, 'get_project_quotas',
fake_get_project_quotas)
def test_get_quotas_has_sync_unknown(self):
self._stub_get_project_quotas()
self.assertRaises(exception.QuotaResourceUnknown,
self.driver._get_quotas,
None, quota.QUOTAS._resources,
['unknown'], True)
self.assertEqual(self.calls, [])
def test_get_quotas_no_sync_unknown(self):
self._stub_get_project_quotas()
self.assertRaises(exception.QuotaResourceUnknown,
self.driver._get_quotas,
None, quota.QUOTAS._resources,
['unknown'], False)
self.assertEqual(self.calls, [])
def test_get_quotas_has_sync_no_sync_resource(self):
self._stub_get_project_quotas()
self.assertRaises(exception.QuotaResourceUnknown,
self.driver._get_quotas,
None, quota.QUOTAS._resources,
['metadata_items'], True)
self.assertEqual(self.calls, [])
def test_get_quotas_no_sync_has_sync_resource(self):
self._stub_get_project_quotas()
self.assertRaises(exception.QuotaResourceUnknown,
self.driver._get_quotas,
None, quota.QUOTAS._resources,
['instances'], False)
self.assertEqual(self.calls, [])
def test_get_quotas_has_sync(self):
self._stub_get_project_quotas()
result = self.driver._get_quotas(FakeContext('test_project',
'test_class'),
quota.QUOTAS._resources,
['instances', 'cores', 'ram',
'volumes', 'gigabytes',
'floating_ips', 'security_groups'],
True)
self.assertEqual(self.calls, ['get_project_quotas'])
self.assertEqual(result, dict(
instances=10,
cores=20,
ram=50 * 1024,
volumes=10,
gigabytes=1000,
floating_ips=10,
security_groups=10,
))
def test_get_quotas_no_sync(self):
self._stub_get_project_quotas()
result = self.driver._get_quotas(FakeContext('test_project',
'test_class'),
quota.QUOTAS._resources,
['metadata_items', 'injected_files',
'injected_file_content_bytes',
'injected_file_path_bytes',
'security_group_rules'], False)
self.assertEqual(self.calls, ['get_project_quotas'])
self.assertEqual(result, dict(
metadata_items=128,
injected_files=5,
injected_file_content_bytes=10 * 1024,
injected_file_path_bytes=255,
security_group_rules=20,
))
def test_limit_check_under(self):
self._stub_get_project_quotas()
self.assertRaises(exception.InvalidQuotaValue,
self.driver.limit_check,
FakeContext('test_project', 'test_class'),
quota.QUOTAS._resources,
dict(metadata_items=-1))
def test_limit_check_over(self):
self._stub_get_project_quotas()
self.assertRaises(exception.OverQuota,
self.driver.limit_check,
FakeContext('test_project', 'test_class'),
quota.QUOTAS._resources,
dict(metadata_items=129))
def test_limit_check_unlimited(self):
self.flags(quota_metadata_items=-1)
self._stub_get_project_quotas()
self.driver.limit_check(FakeContext('test_project', 'test_class'),
quota.QUOTAS._resources,
dict(metadata_items=32767))
def test_limit_check(self):
self._stub_get_project_quotas()
self.driver.limit_check(FakeContext('test_project', 'test_class'),
quota.QUOTAS._resources,
dict(metadata_items=128))
def _stub_quota_reserve(self):
def fake_quota_reserve(context, resources, quotas, deltas, expire,
until_refresh, max_age):
self.calls.append(('quota_reserve', expire, until_refresh,
max_age))
return ['resv-1', 'resv-2', 'resv-3']
self.stubs.Set(db, 'quota_reserve', fake_quota_reserve)
def test_reserve_bad_expire(self):
self._stub_get_project_quotas()
self._stub_quota_reserve()
self.assertRaises(exception.InvalidReservationExpiration,
self.driver.reserve,
FakeContext('test_project', 'test_class'),
quota.QUOTAS._resources,
dict(instances=2), expire='invalid')
self.assertEqual(self.calls, [])
def test_reserve_default_expire(self):
self._stub_get_project_quotas()
self._stub_quota_reserve()
result = self.driver.reserve(FakeContext('test_project', 'test_class'),
quota.QUOTAS._resources,
dict(instances=2))
expire = timeutils.utcnow() + datetime.timedelta(seconds=86400)
self.assertEqual(self.calls, [
'get_project_quotas',
('quota_reserve', expire, 0, 0),
])
self.assertEqual(result, ['resv-1', 'resv-2', 'resv-3'])
def test_reserve_int_expire(self):
self._stub_get_project_quotas()
self._stub_quota_reserve()
result = self.driver.reserve(FakeContext('test_project', 'test_class'),
quota.QUOTAS._resources,
dict(instances=2), expire=3600)
expire = timeutils.utcnow() + datetime.timedelta(seconds=3600)
self.assertEqual(self.calls, [
'get_project_quotas',
('quota_reserve', expire, 0, 0),
])
self.assertEqual(result, ['resv-1', 'resv-2', 'resv-3'])
def test_reserve_timedelta_expire(self):
self._stub_get_project_quotas()
self._stub_quota_reserve()
expire_delta = datetime.timedelta(seconds=60)
result = self.driver.reserve(FakeContext('test_project', 'test_class'),
quota.QUOTAS._resources,
dict(instances=2), expire=expire_delta)
expire = timeutils.utcnow() + expire_delta
self.assertEqual(self.calls, [
'get_project_quotas',
('quota_reserve', expire, 0, 0),
])
self.assertEqual(result, ['resv-1', 'resv-2', 'resv-3'])
def test_reserve_datetime_expire(self):
self._stub_get_project_quotas()
self._stub_quota_reserve()
expire = timeutils.utcnow() + datetime.timedelta(seconds=120)
result = self.driver.reserve(FakeContext('test_project', 'test_class'),
quota.QUOTAS._resources,
dict(instances=2), expire=expire)
self.assertEqual(self.calls, [
'get_project_quotas',
('quota_reserve', expire, 0, 0),
])
self.assertEqual(result, ['resv-1', 'resv-2', 'resv-3'])
def test_reserve_until_refresh(self):
self._stub_get_project_quotas()
self._stub_quota_reserve()
self.flags(until_refresh=500)
expire = timeutils.utcnow() + datetime.timedelta(seconds=120)
result = self.driver.reserve(FakeContext('test_project', 'test_class'),
quota.QUOTAS._resources,
dict(instances=2), expire=expire)
self.assertEqual(self.calls, [
'get_project_quotas',
('quota_reserve', expire, 500, 0),
])
self.assertEqual(result, ['resv-1', 'resv-2', 'resv-3'])
def test_reserve_max_age(self):
self._stub_get_project_quotas()
self._stub_quota_reserve()
self.flags(max_age=86400)
expire = timeutils.utcnow() + datetime.timedelta(seconds=120)
result = self.driver.reserve(FakeContext('test_project', 'test_class'),
quota.QUOTAS._resources,
dict(instances=2), expire=expire)
self.assertEqual(self.calls, [
'get_project_quotas',
('quota_reserve', expire, 0, 86400),
])
self.assertEqual(result, ['resv-1', 'resv-2', 'resv-3'])
class FakeSession(object):
def begin(self):
return self
def __enter__(self):
return self
def __exit__(self, exc_type, exc_value, exc_traceback):
return False
class FakeUsage(sqa_models.QuotaUsage):
def save(self, *args, **kwargs):
pass
class QuotaReserveSqlAlchemyTestCase(test.TestCase):
# nova.db.sqlalchemy.api.quota_reserve is so complex it needs its
# own test case, and since it's a quota manipulator, this is the
# best place to put it...
def setUp(self):
super(QuotaReserveSqlAlchemyTestCase, self).setUp()
self.sync_called = set()
def make_sync(res_name):
def sync(context, project_id, session):
self.sync_called.add(res_name)
if res_name in self.usages:
if self.usages[res_name].in_use < 0:
return {res_name: 2}
else:
return {res_name: self.usages[res_name].in_use - 1}
return {res_name: 0}
return sync
self.resources = {}
for res_name in ('instances', 'cores', 'ram'):
res = quota.ReservableResource(res_name, make_sync(res_name))
self.resources[res_name] = res
self.expire = timeutils.utcnow() + datetime.timedelta(seconds=3600)
self.usages = {}
self.usages_created = {}
self.reservations_created = {}
def fake_get_session():
return FakeSession()
def fake_get_quota_usages(context, session):
return self.usages.copy()
def fake_quota_usage_create(context, project_id, resource, in_use,
reserved, until_refresh, session=None,
save=True):
quota_usage_ref = self._make_quota_usage(
project_id, resource, in_use, reserved, until_refresh,
timeutils.utcnow(), timeutils.utcnow())
self.usages_created[resource] = quota_usage_ref
return quota_usage_ref
def fake_reservation_create(context, uuid, usage_id, project_id,
resource, delta, expire, session=None):
reservation_ref = self._make_reservation(
uuid, usage_id, project_id, resource, delta, expire,
timeutils.utcnow(), timeutils.utcnow())
self.reservations_created[resource] = reservation_ref
return reservation_ref
self.stubs.Set(sqa_api, 'get_session', fake_get_session)
self.stubs.Set(sqa_api, '_get_quota_usages', fake_get_quota_usages)
self.stubs.Set(sqa_api, 'quota_usage_create', fake_quota_usage_create)
self.stubs.Set(sqa_api, 'reservation_create', fake_reservation_create)
timeutils.set_time_override()
def _make_quota_usage(self, project_id, resource, in_use, reserved,
until_refresh, created_at, updated_at):
quota_usage_ref = FakeUsage()
quota_usage_ref.id = len(self.usages) + len(self.usages_created)
quota_usage_ref.project_id = project_id
quota_usage_ref.resource = resource
quota_usage_ref.in_use = in_use
quota_usage_ref.reserved = reserved
quota_usage_ref.until_refresh = until_refresh
quota_usage_ref.created_at = created_at
quota_usage_ref.updated_at = updated_at
quota_usage_ref.deleted_at = None
quota_usage_ref.deleted = False
return quota_usage_ref
def init_usage(self, project_id, resource, in_use, reserved,
until_refresh=None, created_at=None, updated_at=None):
if created_at is None:
created_at = timeutils.utcnow()
if updated_at is None:
updated_at = timeutils.utcnow()
quota_usage_ref = self._make_quota_usage(project_id, resource, in_use,
reserved, until_refresh,
created_at, updated_at)
self.usages[resource] = quota_usage_ref
def compare_usage(self, usage_dict, expected):
for usage in expected:
resource = usage['resource']
for key, value in usage.items():
actual = getattr(usage_dict[resource], key)
self.assertEqual(actual, value,
"%s != %s on usage for resource %s" %
(actual, value, resource))
def _make_reservation(self, uuid, usage_id, project_id, resource,
delta, expire, created_at, updated_at):
reservation_ref = sqa_models.Reservation()
reservation_ref.id = len(self.reservations_created)
reservation_ref.uuid = uuid
reservation_ref.usage_id = usage_id
reservation_ref.project_id = project_id
reservation_ref.resource = resource
reservation_ref.delta = delta
reservation_ref.expire = expire
reservation_ref.created_at = created_at
reservation_ref.updated_at = updated_at
reservation_ref.deleted_at = None
reservation_ref.deleted = False
return reservation_ref
def compare_reservation(self, reservations, expected):
reservations = set(reservations)
for resv in expected:
resource = resv['resource']
resv_obj = self.reservations_created[resource]
self.assertIn(resv_obj.uuid, reservations)
reservations.discard(resv_obj.uuid)
for key, value in resv.items():
actual = getattr(resv_obj, key)
self.assertEqual(actual, value,
"%s != %s on reservation for resource %s" %
(actual, value, resource))
self.assertEqual(len(reservations), 0)
def test_quota_reserve_create_usages(self):
context = FakeContext('test_project', 'test_class')
quotas = dict(
instances=5,
cores=10,
ram=10 * 1024,
)
deltas = dict(
instances=2,
cores=4,
ram=2 * 1024,
)
result = sqa_api.quota_reserve(context, self.resources, quotas,
deltas, self.expire, 0, 0)
self.assertEqual(self.sync_called, set(['instances', 'cores', 'ram']))
self.compare_usage(self.usages_created, [
dict(resource='instances',
project_id='test_project',
in_use=0,
reserved=2,
until_refresh=None),
dict(resource='cores',
project_id='test_project',
in_use=0,
reserved=4,
until_refresh=None),
dict(resource='ram',
project_id='test_project',
in_use=0,
reserved=2 * 1024,
until_refresh=None),
])
self.compare_reservation(result, [
dict(resource='instances',
usage_id=self.usages_created['instances'],
project_id='test_project',
delta=2),
dict(resource='cores',
usage_id=self.usages_created['cores'],
project_id='test_project',
delta=4),
dict(resource='ram',
usage_id=self.usages_created['ram'],
delta=2 * 1024),
])
def test_quota_reserve_negative_in_use(self):
self.init_usage('test_project', 'instances', -1, 0, until_refresh=1)
self.init_usage('test_project', 'cores', -1, 0, until_refresh=1)
self.init_usage('test_project', 'ram', -1, 0, until_refresh=1)
context = FakeContext('test_project', 'test_class')
quotas = dict(
instances=5,
cores=10,
ram=10 * 1024,
)
deltas = dict(
instances=2,
cores=4,
ram=2 * 1024,
)
result = sqa_api.quota_reserve(context, self.resources, quotas,
deltas, self.expire, 5, 0)
self.assertEqual(self.sync_called, set(['instances', 'cores', 'ram']))
self.compare_usage(self.usages, [
dict(resource='instances',
project_id='test_project',
in_use=2,
reserved=2,
until_refresh=5),
dict(resource='cores',
project_id='test_project',
in_use=2,
reserved=4,
until_refresh=5),
dict(resource='ram',
project_id='test_project',
in_use=2,
reserved=2 * 1024,
until_refresh=5),
])
self.assertEqual(self.usages_created, {})
self.compare_reservation(result, [
dict(resource='instances',
usage_id=self.usages['instances'],
project_id='test_project',
delta=2),
dict(resource='cores',
usage_id=self.usages['cores'],
project_id='test_project',
delta=4),
dict(resource='ram',
usage_id=self.usages['ram'],
delta=2 * 1024),
])
def test_quota_reserve_until_refresh(self):
self.init_usage('test_project', 'instances', 3, 0, until_refresh=1)
self.init_usage('test_project', 'cores', 3, 0, until_refresh=1)
self.init_usage('test_project', 'ram', 3, 0, until_refresh=1)
context = FakeContext('test_project', 'test_class')
quotas = dict(
instances=5,
cores=10,
ram=10 * 1024,
)
deltas = dict(
instances=2,
cores=4,
ram=2 * 1024,
)
result = sqa_api.quota_reserve(context, self.resources, quotas,
deltas, self.expire, 5, 0)
self.assertEqual(self.sync_called, set(['instances', 'cores', 'ram']))
self.compare_usage(self.usages, [
dict(resource='instances',
project_id='test_project',
in_use=2,
reserved=2,
until_refresh=5),
dict(resource='cores',
project_id='test_project',
in_use=2,
reserved=4,
until_refresh=5),
dict(resource='ram',
project_id='test_project',
in_use=2,
reserved=2 * 1024,
until_refresh=5),
])
self.assertEqual(self.usages_created, {})
self.compare_reservation(result, [
dict(resource='instances',
usage_id=self.usages['instances'],
project_id='test_project',
delta=2),
dict(resource='cores',
usage_id=self.usages['cores'],
project_id='test_project',
delta=4),
dict(resource='ram',
usage_id=self.usages['ram'],
delta=2 * 1024),
])
def test_quota_reserve_max_age(self):
max_age = 3600
record_created = (timeutils.utcnow() -
datetime.timedelta(seconds=max_age))
self.init_usage('test_project', 'instances', 3, 0,
created_at=record_created, updated_at=record_created)
self.init_usage('test_project', 'cores', 3, 0,
created_at=record_created, updated_at=record_created)
self.init_usage('test_project', 'ram', 3, 0,
created_at=record_created, updated_at=record_created)
context = FakeContext('test_project', 'test_class')
quotas = dict(
instances=5,
cores=10,
ram=10 * 1024,
)
deltas = dict(
instances=2,
cores=4,
ram=2 * 1024,
)
result = sqa_api.quota_reserve(context, self.resources, quotas,
deltas, self.expire, 0, max_age)
self.assertEqual(self.sync_called, set(['instances', 'cores', 'ram']))
self.compare_usage(self.usages, [
dict(resource='instances',
project_id='test_project',
in_use=2,
reserved=2,
until_refresh=None),
dict(resource='cores',
project_id='test_project',
in_use=2,
reserved=4,
until_refresh=None),
dict(resource='ram',
project_id='test_project',
in_use=2,
reserved=2 * 1024,
until_refresh=None),
])
self.assertEqual(self.usages_created, {})
self.compare_reservation(result, [
dict(resource='instances',
usage_id=self.usages['instances'],
project_id='test_project',
delta=2),
dict(resource='cores',
usage_id=self.usages['cores'],
project_id='test_project',
delta=4),
dict(resource='ram',
usage_id=self.usages['ram'],
delta=2 * 1024),
])
def test_quota_reserve_no_refresh(self):
self.init_usage('test_project', 'instances', 3, 0)
self.init_usage('test_project', 'cores', 3, 0)
self.init_usage('test_project', 'ram', 3, 0)
context = FakeContext('test_project', 'test_class')
quotas = dict(
instances=5,
cores=10,
ram=10 * 1024,
)
deltas = dict(
instances=2,
cores=4,
ram=2 * 1024,
)
result = sqa_api.quota_reserve(context, self.resources, quotas,
deltas, self.expire, 0, 0)
self.assertEqual(self.sync_called, set([]))
self.compare_usage(self.usages, [
dict(resource='instances',
project_id='test_project',
in_use=3,
reserved=2,
until_refresh=None),
dict(resource='cores',
project_id='test_project',
in_use=3,
reserved=4,
until_refresh=None),
dict(resource='ram',
project_id='test_project',
in_use=3,
reserved=2 * 1024,
until_refresh=None),
])
self.assertEqual(self.usages_created, {})
self.compare_reservation(result, [
dict(resource='instances',
usage_id=self.usages['instances'],
project_id='test_project',
delta=2),
dict(resource='cores',
usage_id=self.usages['cores'],
project_id='test_project',
delta=4),
dict(resource='ram',
usage_id=self.usages['ram'],
delta=2 * 1024),
])
def test_quota_reserve_unders(self):
self.init_usage('test_project', 'instances', 1, 0)
self.init_usage('test_project', 'cores', 3, 0)
self.init_usage('test_project', 'ram', 1 * 1024, 0)
context = FakeContext('test_project', 'test_class')
quotas = dict(
instances=5,
cores=10,
ram=10 * 1024,
)
deltas = dict(
instances=-2,
cores=-4,
ram=-2 * 1024,
)
result = sqa_api.quota_reserve(context, self.resources, quotas,
deltas, self.expire, 0, 0)
self.assertEqual(self.sync_called, set([]))
self.compare_usage(self.usages, [
dict(resource='instances',
project_id='test_project',
in_use=1,
reserved=0,
until_refresh=None),
dict(resource='cores',
project_id='test_project',
in_use=3,
reserved=0,
until_refresh=None),
dict(resource='ram',
project_id='test_project',
in_use=1 * 1024,
reserved=0,
until_refresh=None),
])
self.assertEqual(self.usages_created, {})
self.compare_reservation(result, [
dict(resource='instances',
usage_id=self.usages['instances'],
project_id='test_project',
delta=-2),
dict(resource='cores',
usage_id=self.usages['cores'],
project_id='test_project',
delta=-4),
dict(resource='ram',
usage_id=self.usages['ram'],
delta=-2 * 1024),
])
def test_quota_reserve_overs(self):
self.init_usage('test_project', 'instances', 4, 0)
self.init_usage('test_project', 'cores', 8, 0)
self.init_usage('test_project', 'ram', 10 * 1024, 0)
context = FakeContext('test_project', 'test_class')
quotas = dict(
instances=5,
cores=10,
ram=10 * 1024,
)
deltas = dict(
instances=2,
cores=4,
ram=2 * 1024,
)
self.assertRaises(exception.OverQuota,
sqa_api.quota_reserve,
context, self.resources, quotas,
deltas, self.expire, 0, 0)
self.assertEqual(self.sync_called, set([]))
self.compare_usage(self.usages, [
dict(resource='instances',
project_id='test_project',
in_use=4,
reserved=0,
until_refresh=None),
dict(resource='cores',
project_id='test_project',
in_use=8,
reserved=0,
until_refresh=None),
dict(resource='ram',
project_id='test_project',
in_use=10 * 1024,
reserved=0,
until_refresh=None),
])
self.assertEqual(self.usages_created, {})
self.assertEqual(self.reservations_created, {})
def test_quota_reserve_reduction(self):
self.init_usage('test_project', 'instances', 10, 0)
self.init_usage('test_project', 'cores', 20, 0)
self.init_usage('test_project', 'ram', 20 * 1024, 0)
context = FakeContext('test_project', 'test_class')
quotas = dict(
instances=5,
cores=10,
ram=10 * 1024,
)
deltas = dict(
instances=-2,
cores=-4,
ram=-2 * 1024,
)
result = sqa_api.quota_reserve(context, self.resources, quotas,
deltas, self.expire, 0, 0)
self.assertEqual(self.sync_called, set([]))
self.compare_usage(self.usages, [
dict(resource='instances',
project_id='test_project',
in_use=10,
reserved=0,
until_refresh=None),
dict(resource='cores',
project_id='test_project',
in_use=20,
reserved=0,
until_refresh=None),
dict(resource='ram',
project_id='test_project',
in_use=20 * 1024,
reserved=0,
until_refresh=None),
])
self.assertEqual(self.usages_created, {})
self.compare_reservation(result, [
dict(resource='instances',
usage_id=self.usages['instances'],
project_id='test_project',
delta=-2),
dict(resource='cores',
usage_id=self.usages['cores'],
project_id='test_project',
delta=-4),
dict(resource='ram',
usage_id=self.usages['ram'],
project_id='test_project',
delta=-2 * 1024),
])
| apache-2.0 |
AustereCuriosity/astropy | astropy/io/fits/verify.py | 4 | 5728 | # Licensed under a 3-clause BSD style license - see PYFITS.rst
from __future__ import unicode_literals
import operator
import warnings
from ...extern.six import next
from ...utils import indent
from ...utils.exceptions import AstropyUserWarning
class VerifyError(Exception):
"""
Verify exception class.
"""
class VerifyWarning(AstropyUserWarning):
"""
Verify warning class.
"""
VERIFY_OPTIONS = ['ignore', 'warn', 'exception', 'fix', 'silentfix',
'fix+ignore', 'fix+warn', 'fix+exception',
'silentfix+ignore', 'silentfix+warn', 'silentfix+exception']
class _Verify(object):
"""
Shared methods for verification.
"""
def run_option(self, option='warn', err_text='', fix_text='Fixed.',
fix=None, fixable=True):
"""
Execute the verification with selected option.
"""
text = err_text
if option in ['warn', 'exception']:
fixable = False
# fix the value
elif not fixable:
text = 'Unfixable error: {}'.format(text)
else:
if fix:
fix()
text += ' ' + fix_text
return (fixable, text)
def verify(self, option='warn'):
"""
Verify all values in the instance.
Parameters
----------
option : str
Output verification option. Must be one of ``"fix"``,
``"silentfix"``, ``"ignore"``, ``"warn"``, or
``"exception"``. May also be any combination of ``"fix"`` or
``"silentfix"`` with ``"+ignore"``, ``+warn``, or ``+exception"
(e.g. ``"fix+warn"``). See :ref:`verify` for more info.
"""
opt = option.lower()
if opt not in VERIFY_OPTIONS:
raise ValueError('Option {!r} not recognized.'.format(option))
if opt == 'ignore':
return
errs = self._verify(opt)
# Break the verify option into separate options related to reporting of
# errors, and fixing of fixable errors
if '+' in opt:
fix_opt, report_opt = opt.split('+')
elif opt in ['fix', 'silentfix']:
# The original default behavior for 'fix' and 'silentfix' was to
# raise an exception for unfixable errors
fix_opt, report_opt = opt, 'exception'
else:
fix_opt, report_opt = None, opt
if fix_opt == 'silentfix' and report_opt == 'ignore':
# Fixable errors were fixed, but don't report anything
return
if fix_opt == 'silentfix':
# Don't print out fixable issues; the first element of each verify
# item is a boolean indicating whether or not the issue was fixable
line_filter = lambda x: not x[0]
elif fix_opt == 'fix' and report_opt == 'ignore':
# Don't print *unfixable* issues, but do print fixed issues; this
# is probably not very useful but the option exists for
# completeness
line_filter = operator.itemgetter(0)
else:
line_filter = None
unfixable = False
messages = []
for fixable, message in errs.iter_lines(filter=line_filter):
if fixable is not None:
unfixable = not fixable
messages.append(message)
if messages:
messages.insert(0, 'Verification reported errors:')
messages.append('Note: astropy.io.fits uses zero-based indexing.\n')
if fix_opt == 'silentfix' and not unfixable:
return
elif report_opt == 'warn' or (fix_opt == 'fix' and not unfixable):
for line in messages:
warnings.warn(line, VerifyWarning)
else:
raise VerifyError('\n' + '\n'.join(messages))
class _ErrList(list):
"""
Verification errors list class. It has a nested list structure
constructed by error messages generated by verifications at
different class levels.
"""
def __new__(cls, val=None, unit='Element'):
return super(cls, cls).__new__(cls, val)
def __init__(self, val=None, unit='Element'):
self.unit = unit
def __str__(self):
return '\n'.join(item[1] for item in self.iter_lines())
def iter_lines(self, filter=None, shift=0):
"""
Iterate the nested structure as a list of strings with appropriate
indentations for each level of structure.
"""
element = 0
# go through the list twice, first time print out all top level
# messages
for item in self:
if not isinstance(item, _ErrList):
if filter is None or filter(item):
yield item[0], indent(item[1], shift=shift)
# second time go through the next level items, each of the next level
# must present, even it has nothing.
for item in self:
if isinstance(item, _ErrList):
next_lines = item.iter_lines(filter=filter, shift=shift + 1)
try:
first_line = next(next_lines)
except StopIteration:
first_line = None
if first_line is not None:
if self.unit:
# This line is sort of a header for the next level in
# the hierarchy
yield None, indent('{} {}:'.format(self.unit, element),
shift=shift)
yield first_line
for line in next_lines:
yield line
element += 1
| bsd-3-clause |
MTechLLC/nica | web/themes/custom/nica_theme/bootstrap/test-infra/s3_cache.py | 2166 | 5734 | #!/usr/bin/env python2.7
# pylint: disable=C0301
from __future__ import absolute_import, unicode_literals, print_function, division
from sys import argv
from os import environ, stat, chdir, remove as _delete_file
from os.path import dirname, basename, abspath, realpath, expandvars
from hashlib import sha256
from subprocess import check_call as run
from json import load, dump as save
from contextlib import contextmanager
from datetime import datetime
from boto.s3.connection import S3Connection
from boto.s3.key import Key
from boto.exception import S3ResponseError
CONFIG_FILE = './S3Cachefile.json'
UPLOAD_TODO_FILE = './S3CacheTodo.json'
BYTES_PER_MB = 1024 * 1024
@contextmanager
def timer():
start = datetime.utcnow()
yield
end = datetime.utcnow()
elapsed = end - start
print("\tDone. Took", int(elapsed.total_seconds()), "second(s).")
@contextmanager
def todo_file(writeback=True):
try:
with open(UPLOAD_TODO_FILE, 'rt') as json_file:
todo = load(json_file)
except (IOError, OSError, ValueError):
todo = {}
yield todo
if writeback:
try:
with open(UPLOAD_TODO_FILE, 'wt') as json_file:
save(todo, json_file)
except (OSError, IOError) as save_err:
print("Error saving {}:".format(UPLOAD_TODO_FILE), save_err)
def _sha256_of_file(filename):
hasher = sha256()
with open(filename, 'rb') as input_file:
hasher.update(input_file.read())
file_hash = hasher.hexdigest()
print('sha256({}) = {}'.format(filename, file_hash))
return file_hash
def _delete_file_quietly(filename):
try:
_delete_file(filename)
except (OSError, IOError):
pass
def mark_needs_uploading(cache_name):
with todo_file() as todo:
todo[cache_name] = True
def mark_uploaded(cache_name):
with todo_file() as todo:
todo.pop(cache_name, None)
def need_to_upload(cache_name):
with todo_file(writeback=False) as todo:
return todo.get(cache_name, False)
def _tarball_size(directory):
kib = stat(_tarball_filename_for(directory)).st_size // BYTES_PER_MB
return "{} MiB".format(kib)
def _tarball_filename_for(directory):
return abspath('./{}.tar.gz'.format(basename(directory)))
def _create_tarball(directory):
print("Creating tarball of {}...".format(directory))
with timer():
run(['tar', '-czf', _tarball_filename_for(directory), '-C', dirname(directory), basename(directory)])
def _extract_tarball(directory):
print("Extracting tarball of {}...".format(directory))
with timer():
run(['tar', '-xzf', _tarball_filename_for(directory), '-C', dirname(directory)])
def download(directory):
mark_uploaded(cache_name) # reset
try:
print("Downloading {} tarball from S3...".format(cache_name))
with timer():
key.get_contents_to_filename(_tarball_filename_for(directory))
except S3ResponseError as err:
mark_needs_uploading(cache_name)
raise SystemExit("Cached {} download failed!".format(cache_name))
print("Downloaded {}.".format(_tarball_size(directory)))
_extract_tarball(directory)
print("{} successfully installed from cache.".format(cache_name))
def upload(directory):
_create_tarball(directory)
print("Uploading {} tarball to S3... ({})".format(cache_name, _tarball_size(directory)))
with timer():
key.set_contents_from_filename(_tarball_filename_for(directory))
print("{} cache successfully updated.".format(cache_name))
mark_uploaded(cache_name)
if __name__ == '__main__':
# Uses environment variables:
# AWS_ACCESS_KEY_ID -- AWS Access Key ID
# AWS_SECRET_ACCESS_KEY -- AWS Secret Access Key
argv.pop(0)
if len(argv) != 2:
raise SystemExit("USAGE: s3_cache.py <download | upload> <cache name>")
mode, cache_name = argv
script_dir = dirname(realpath(__file__))
chdir(script_dir)
try:
with open(CONFIG_FILE, 'rt') as config_file:
config = load(config_file)
except (IOError, OSError, ValueError) as config_err:
print(config_err)
raise SystemExit("Error when trying to load config from JSON file!")
try:
cache_info = config[cache_name]
key_file = expandvars(cache_info["key"])
fallback_cmd = cache_info["generate"]
directory = expandvars(cache_info["cache"])
except (TypeError, KeyError) as load_err:
print(load_err)
raise SystemExit("Config for cache named {!r} is missing or malformed!".format(cache_name))
try:
try:
BUCKET_NAME = environ['TWBS_S3_BUCKET']
except KeyError:
raise SystemExit("TWBS_S3_BUCKET environment variable not set!")
conn = S3Connection()
bucket = conn.lookup(BUCKET_NAME)
if bucket is None:
raise SystemExit("Could not access bucket!")
key_file_hash = _sha256_of_file(key_file)
key = Key(bucket, key_file_hash)
key.storage_class = 'REDUCED_REDUNDANCY'
if mode == 'download':
download(directory)
elif mode == 'upload':
if need_to_upload(cache_name):
upload(directory)
else:
print("No need to upload anything.")
else:
raise SystemExit("Unrecognized mode {!r}".format(mode))
except BaseException as exc:
if mode != 'download':
raise
print("Error!:", exc)
print("Unable to download from cache.")
print("Running fallback command to generate cache directory {!r}: {}".format(directory, fallback_cmd))
with timer():
run(fallback_cmd, shell=True)
| gpl-2.0 |
philippeback/volatility | volatility/plugins/addrspaces/macho.py | 44 | 6621 | # Volatility
#
# Authors:
# Mike Auty
#
# This file is part of Volatility.
#
# Volatility is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# Volatility is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Volatility. If not, see <http://www.gnu.org/licenses/>.
#
import struct
import volatility.plugins.addrspaces.standard as standard
import volatility.obj as obj
import volatility.addrspace as addrspace
macho_types = {
'fat_header': [ 0x8, {
'magic': [0x0, ['unsigned int']],
'nfat_arch': [0x4, ['unsigned int']],
}],
'fat_arch': [ 0x14, {
'cputype': [0x0, ['int']],
'cpusubtype': [0x4, ['int']],
'offset': [0x8, ['unsigned int']],
'size': [0xc, ['unsigned int']],
'align': [0x10, ['unsigned int']],
}],
'mach_header_64': [ 0x20, {
'magic': [0x0, ['unsigned int']],
'cputype': [0x4, ['int']],
'cpusubtype': [0x8, ['int']],
'filetype': [0xc, ['unsigned int']],
'ncmds': [0x10, ['unsigned int']],
'sizeofcmds': [0x14, ['unsigned int']],
'flags': [0x18, ['unsigned int']],
'reserved': [0x1c, ['unsigned int']],
}],
'mach_header': [ 0x1c, {
'magic': [0x0, ['unsigned int']],
'cputype': [0x4, ['int']],
'cpusubtype': [0x8, ['int']],
'filetype': [0xc, ['unsigned int']],
'ncmds': [0x10, ['unsigned int']],
'sizeofcmds': [0x14, ['unsigned int']],
'flags': [0x18, ['unsigned int']],
}],
'symtab_command': [ 0x18, {
'cmd': [0x0, ['unsigned int']],
'cmdsize': [0x4, ['unsigned int']],
'symoff': [0x8, ['unsigned int']],
'nsyms': [0xc, ['unsigned int']],
'stroff': [0x10, ['unsigned int']],
'strsize': [0x14, ['unsigned int']],
}],
'load_command': [ 0x8, {
'cmd': [0x0, ['unsigned int']],
'cmdsize': [0x4, ['unsigned int']],
}],
'segment_command': [ 0x38, {
'cmd': [0x0, ['unsigned int']],
'cmdsize': [0x4, ['unsigned int']],
'segname': [0x8, ['String', dict(length = 16)]],
'vmaddr': [0x18, ['unsigned int']],
'vmsize': [0x1c, ['unsigned int']],
'fileoff': [0x20, ['unsigned int']],
'filesize': [0x24, ['unsigned int']],
'maxprot': [0x28, ['int']],
'initprot': [0x2c, ['int']],
'nsects': [0x30, ['unsigned int']],
'flags': [0x34, ['unsigned int']],
}],
'segment_command_64': [ 0x48, {
'cmd': [0x0, ['unsigned int']],
'cmdsize': [0x4, ['unsigned int']],
'segname': [0x8, ['String', dict(length = 16)]],
'vmaddr': [0x18, ['unsigned long long']],
'vmsize': [0x20, ['unsigned long long']],
'fileoff': [0x28, ['unsigned long long']],
'filesize': [0x30, ['unsigned long long']],
'maxprot': [0x38, ['int']],
'initprot': [0x3c, ['int']],
'nsects': [0x40, ['unsigned int']],
'flags': [0x44, ['unsigned int']],
}],
'symtab_command': [ 0x18, {
'cmd': [0x0, ['unsigned int']],
'cmdsize': [0x4, ['unsigned int']],
'symoff': [0x8, ['unsigned int']],
'nsyms': [0xc, ['unsigned int']],
'stroff': [0x10, ['unsigned int']],
'strsize': [0x14, ['unsigned int']],
}],
'section_64': [ 0x50, {
'sectname': [0x0, ['array', 16, ['char']]],
'segname': [0x10, ['array', 16, ['char']]],
'addr': [0x20, ['unsigned long long']],
'size': [0x28, ['unsigned long long']],
'offset': [0x30, ['unsigned int']],
'align': [0x34, ['unsigned int']],
'reloff': [0x38, ['unsigned int']],
'nreloc': [0x3c, ['unsigned int']],
'flags': [0x40, ['unsigned int']],
'reserved1': [0x44, ['unsigned int']],
'reserved2': [0x48, ['unsigned int']],
'reserved3': [0x4c, ['unsigned int']],
}],
'section': [ 0x44, {
'sectname': [0x0, ['array', 16, ['char']]],
'segname': [0x10, ['array', 16, ['char']]],
'addr': [0x20, ['unsigned int']],
'size': [0x24, ['unsigned int']],
'offset': [0x28, ['unsigned int']],
'align': [0x2c, ['unsigned int']],
'reloff': [0x30, ['unsigned int']],
'nreloc': [0x34, ['unsigned int']],
'flags': [0x38, ['unsigned int']],
'reserved1': [0x3c, ['unsigned int']],
'reserved2': [0x40, ['unsigned int']],
}],
}
class MachoTypes(obj.ProfileModification):
def modification(self, profile):
profile.vtypes.update(macho_types)
class MachOAddressSpace(addrspace.AbstractRunBasedMemory):
"""
Address space for mach-o files to support atc-ny memory reader
The created mach-o file has a bunch of segments that contain the address of the section and the size
From there we can translate between incoming address requests to memory contents
"""
order = 1
pae = True
checkname = 'MachOValidAS'
def __init__(self, base, config, *args, **kwargs):
self.as_assert(base, "mac: need base")
addrspace.AbstractRunBasedMemory.__init__(self, base, config, *args, **kwargs)
sig = base.read(0, 4)
if sig == '\xce\xfa\xed\xfe':
self.bits = 32
elif sig == '\xcf\xfa\xed\xfe':
self.bits = 64
else:
self.as_assert(0, "MachO Header signature invalid")
self.runs = []
self.header = None
self.addr_cache = {}
self.parse_macho()
def get_object_name(self, object):
if self.bits == 64 and object in ["mach_header", "segment_command", "section"]:
object = object + "_64"
return object
def get_available_addresses(self):
for vmaddr, _, vmsize in self.runs:
yield vmaddr, vmsize
def get_header(self):
return self.header
def parse_macho(self):
self.runs = []
header_name = self.get_object_name("mach_header")
header_size = self.profile.get_obj_size(header_name)
self.header = obj.Object(header_name, 0, self.base)
offset = header_size
self.segs = []
for i in xrange(0, self.header.ncmds):
structname = self.get_object_name("segment_command")
seg = obj.Object(structname, offset, self.base)
self.segs.append(seg)
# Since these values will be used a lot, make sure they aren't reread (ie, no objects in the runs list)
run = (int(seg.vmaddr), int(seg.fileoff), int(seg.vmsize))
self.runs.append(run)
offset = offset + seg.cmdsize
| gpl-2.0 |
robbiet480/home-assistant | tests/auth/test_init.py | 13 | 32299 | """Tests for the Home Assistant auth module."""
from datetime import timedelta
import jwt
import pytest
import voluptuous as vol
from homeassistant import auth, data_entry_flow
from homeassistant.auth import auth_store, const as auth_const, models as auth_models
from homeassistant.auth.const import MFA_SESSION_EXPIRATION
from homeassistant.core import callback
from homeassistant.util import dt as dt_util
from tests.async_mock import Mock, patch
from tests.common import CLIENT_ID, MockUser, ensure_auth_manager_loaded, flush_store
@pytest.fixture
def mock_hass(loop):
"""Home Assistant mock with minimum amount of data set to make it work with auth."""
hass = Mock()
hass.config.skip_pip = True
return hass
async def test_auth_manager_from_config_validates_config(mock_hass):
"""Test get auth providers."""
with pytest.raises(vol.Invalid):
manager = await auth.auth_manager_from_config(
mock_hass,
[
{"name": "Test Name", "type": "insecure_example", "users": []},
{
"name": "Invalid configuration because no users",
"type": "insecure_example",
"id": "invalid_config",
},
],
[],
)
manager = await auth.auth_manager_from_config(
mock_hass,
[
{"name": "Test Name", "type": "insecure_example", "users": []},
{
"name": "Test Name 2",
"type": "insecure_example",
"id": "another",
"users": [],
},
],
[],
)
providers = [
{"name": provider.name, "id": provider.id, "type": provider.type}
for provider in manager.auth_providers
]
assert providers == [
{"name": "Test Name", "type": "insecure_example", "id": None},
{"name": "Test Name 2", "type": "insecure_example", "id": "another"},
]
async def test_auth_manager_from_config_auth_modules(mock_hass):
"""Test get auth modules."""
with pytest.raises(vol.Invalid):
manager = await auth.auth_manager_from_config(
mock_hass,
[
{"name": "Test Name", "type": "insecure_example", "users": []},
{
"name": "Test Name 2",
"type": "insecure_example",
"id": "another",
"users": [],
},
],
[
{"name": "Module 1", "type": "insecure_example", "data": []},
{
"name": "Invalid configuration because no data",
"type": "insecure_example",
"id": "another",
},
],
)
manager = await auth.auth_manager_from_config(
mock_hass,
[
{"name": "Test Name", "type": "insecure_example", "users": []},
{
"name": "Test Name 2",
"type": "insecure_example",
"id": "another",
"users": [],
},
],
[
{"name": "Module 1", "type": "insecure_example", "data": []},
{
"name": "Module 2",
"type": "insecure_example",
"id": "another",
"data": [],
},
],
)
providers = [
{"name": provider.name, "type": provider.type, "id": provider.id}
for provider in manager.auth_providers
]
assert providers == [
{"name": "Test Name", "type": "insecure_example", "id": None},
{"name": "Test Name 2", "type": "insecure_example", "id": "another"},
]
modules = [
{"name": module.name, "type": module.type, "id": module.id}
for module in manager.auth_mfa_modules
]
assert modules == [
{"name": "Module 1", "type": "insecure_example", "id": "insecure_example"},
{"name": "Module 2", "type": "insecure_example", "id": "another"},
]
async def test_create_new_user(hass):
"""Test creating new user."""
events = []
@callback
def user_added(event):
events.append(event)
hass.bus.async_listen("user_added", user_added)
manager = await auth.auth_manager_from_config(
hass,
[
{
"type": "insecure_example",
"users": [
{
"username": "test-user",
"password": "test-pass",
"name": "Test Name",
}
],
}
],
[],
)
step = await manager.login_flow.async_init(("insecure_example", None))
assert step["type"] == data_entry_flow.RESULT_TYPE_FORM
step = await manager.login_flow.async_configure(
step["flow_id"], {"username": "test-user", "password": "test-pass"}
)
assert step["type"] == data_entry_flow.RESULT_TYPE_CREATE_ENTRY
user = step["result"]
assert user is not None
assert user.is_owner is False
assert user.name == "Test Name"
await hass.async_block_till_done()
assert len(events) == 1
assert events[0].data["user_id"] == user.id
async def test_login_as_existing_user(mock_hass):
"""Test login as existing user."""
manager = await auth.auth_manager_from_config(
mock_hass,
[
{
"type": "insecure_example",
"users": [
{
"username": "test-user",
"password": "test-pass",
"name": "Test Name",
}
],
}
],
[],
)
mock_hass.auth = manager
ensure_auth_manager_loaded(manager)
# Add a fake user that we're not going to log in with
user = MockUser(
id="mock-user2", is_owner=False, is_active=False, name="Not user"
).add_to_auth_manager(manager)
user.credentials.append(
auth_models.Credentials(
id="mock-id2",
auth_provider_type="insecure_example",
auth_provider_id=None,
data={"username": "other-user"},
is_new=False,
)
)
# Add fake user with credentials for example auth provider.
user = MockUser(
id="mock-user", is_owner=False, is_active=False, name="Paulus"
).add_to_auth_manager(manager)
user.credentials.append(
auth_models.Credentials(
id="mock-id",
auth_provider_type="insecure_example",
auth_provider_id=None,
data={"username": "test-user"},
is_new=False,
)
)
step = await manager.login_flow.async_init(("insecure_example", None))
assert step["type"] == data_entry_flow.RESULT_TYPE_FORM
step = await manager.login_flow.async_configure(
step["flow_id"], {"username": "test-user", "password": "test-pass"}
)
assert step["type"] == data_entry_flow.RESULT_TYPE_CREATE_ENTRY
user = step["result"]
assert user is not None
assert user.id == "mock-user"
assert user.is_owner is False
assert user.is_active is False
assert user.name == "Paulus"
async def test_linking_user_to_two_auth_providers(hass, hass_storage):
"""Test linking user to two auth providers."""
manager = await auth.auth_manager_from_config(
hass,
[
{
"type": "insecure_example",
"users": [{"username": "test-user", "password": "test-pass"}],
},
{
"type": "insecure_example",
"id": "another-provider",
"users": [{"username": "another-user", "password": "another-password"}],
},
],
[],
)
step = await manager.login_flow.async_init(("insecure_example", None))
step = await manager.login_flow.async_configure(
step["flow_id"], {"username": "test-user", "password": "test-pass"}
)
user = step["result"]
assert user is not None
step = await manager.login_flow.async_init(
("insecure_example", "another-provider"), context={"credential_only": True}
)
step = await manager.login_flow.async_configure(
step["flow_id"], {"username": "another-user", "password": "another-password"}
)
new_credential = step["result"]
await manager.async_link_user(user, new_credential)
assert len(user.credentials) == 2
async def test_saving_loading(hass, hass_storage):
"""Test storing and saving data.
Creates one of each type that we store to test we restore correctly.
"""
manager = await auth.auth_manager_from_config(
hass,
[
{
"type": "insecure_example",
"users": [{"username": "test-user", "password": "test-pass"}],
}
],
[],
)
step = await manager.login_flow.async_init(("insecure_example", None))
step = await manager.login_flow.async_configure(
step["flow_id"], {"username": "test-user", "password": "test-pass"}
)
user = step["result"]
await manager.async_activate_user(user)
# the first refresh token will be used to create access token
refresh_token = await manager.async_create_refresh_token(user, CLIENT_ID)
manager.async_create_access_token(refresh_token, "192.168.0.1")
# the second refresh token will not be used
await manager.async_create_refresh_token(user, "dummy-client")
await flush_store(manager._store._store)
store2 = auth_store.AuthStore(hass)
users = await store2.async_get_users()
assert len(users) == 1
assert users[0].permissions == user.permissions
assert users[0] == user
assert len(users[0].refresh_tokens) == 2
for r_token in users[0].refresh_tokens.values():
if r_token.client_id == CLIENT_ID:
# verify the first refresh token
assert r_token.last_used_at is not None
assert r_token.last_used_ip == "192.168.0.1"
elif r_token.client_id == "dummy-client":
# verify the second refresh token
assert r_token.last_used_at is None
assert r_token.last_used_ip is None
else:
assert False, "Unknown client_id: %s" % r_token.client_id
async def test_cannot_retrieve_expired_access_token(hass):
"""Test that we cannot retrieve expired access tokens."""
manager = await auth.auth_manager_from_config(hass, [], [])
user = MockUser().add_to_auth_manager(manager)
refresh_token = await manager.async_create_refresh_token(user, CLIENT_ID)
assert refresh_token.user.id is user.id
assert refresh_token.client_id == CLIENT_ID
access_token = manager.async_create_access_token(refresh_token)
assert await manager.async_validate_access_token(access_token) is refresh_token
with patch(
"homeassistant.util.dt.utcnow",
return_value=dt_util.utcnow()
- auth_const.ACCESS_TOKEN_EXPIRATION
- timedelta(seconds=11),
):
access_token = manager.async_create_access_token(refresh_token)
assert await manager.async_validate_access_token(access_token) is None
async def test_generating_system_user(hass):
"""Test that we can add a system user."""
events = []
@callback
def user_added(event):
events.append(event)
hass.bus.async_listen("user_added", user_added)
manager = await auth.auth_manager_from_config(hass, [], [])
user = await manager.async_create_system_user("Hass.io")
token = await manager.async_create_refresh_token(user)
assert user.system_generated
assert token is not None
assert token.client_id is None
await hass.async_block_till_done()
assert len(events) == 1
assert events[0].data["user_id"] == user.id
async def test_refresh_token_requires_client_for_user(hass):
"""Test create refresh token for a user with client_id."""
manager = await auth.auth_manager_from_config(hass, [], [])
user = MockUser().add_to_auth_manager(manager)
assert user.system_generated is False
with pytest.raises(ValueError):
await manager.async_create_refresh_token(user)
token = await manager.async_create_refresh_token(user, CLIENT_ID)
assert token is not None
assert token.client_id == CLIENT_ID
assert token.token_type == auth_models.TOKEN_TYPE_NORMAL
# default access token expiration
assert token.access_token_expiration == auth_const.ACCESS_TOKEN_EXPIRATION
async def test_refresh_token_not_requires_client_for_system_user(hass):
"""Test create refresh token for a system user w/o client_id."""
manager = await auth.auth_manager_from_config(hass, [], [])
user = await manager.async_create_system_user("Hass.io")
assert user.system_generated is True
with pytest.raises(ValueError):
await manager.async_create_refresh_token(user, CLIENT_ID)
token = await manager.async_create_refresh_token(user)
assert token is not None
assert token.client_id is None
assert token.token_type == auth_models.TOKEN_TYPE_SYSTEM
async def test_refresh_token_with_specific_access_token_expiration(hass):
"""Test create a refresh token with specific access token expiration."""
manager = await auth.auth_manager_from_config(hass, [], [])
user = MockUser().add_to_auth_manager(manager)
token = await manager.async_create_refresh_token(
user, CLIENT_ID, access_token_expiration=timedelta(days=100)
)
assert token is not None
assert token.client_id == CLIENT_ID
assert token.access_token_expiration == timedelta(days=100)
async def test_refresh_token_type(hass):
"""Test create a refresh token with token type."""
manager = await auth.auth_manager_from_config(hass, [], [])
user = MockUser().add_to_auth_manager(manager)
with pytest.raises(ValueError):
await manager.async_create_refresh_token(
user, CLIENT_ID, token_type=auth_models.TOKEN_TYPE_SYSTEM
)
token = await manager.async_create_refresh_token(
user, CLIENT_ID, token_type=auth_models.TOKEN_TYPE_NORMAL
)
assert token is not None
assert token.client_id == CLIENT_ID
assert token.token_type == auth_models.TOKEN_TYPE_NORMAL
async def test_refresh_token_type_long_lived_access_token(hass):
"""Test create a refresh token has long-lived access token type."""
manager = await auth.auth_manager_from_config(hass, [], [])
user = MockUser().add_to_auth_manager(manager)
with pytest.raises(ValueError):
await manager.async_create_refresh_token(
user, token_type=auth_models.TOKEN_TYPE_LONG_LIVED_ACCESS_TOKEN
)
token = await manager.async_create_refresh_token(
user,
client_name="GPS LOGGER",
client_icon="mdi:home",
token_type=auth_models.TOKEN_TYPE_LONG_LIVED_ACCESS_TOKEN,
)
assert token is not None
assert token.client_id is None
assert token.client_name == "GPS LOGGER"
assert token.client_icon == "mdi:home"
assert token.token_type == auth_models.TOKEN_TYPE_LONG_LIVED_ACCESS_TOKEN
async def test_cannot_deactive_owner(mock_hass):
"""Test that we cannot deactivate the owner."""
manager = await auth.auth_manager_from_config(mock_hass, [], [])
owner = MockUser(is_owner=True).add_to_auth_manager(manager)
with pytest.raises(ValueError):
await manager.async_deactivate_user(owner)
async def test_remove_refresh_token(mock_hass):
"""Test that we can remove a refresh token."""
manager = await auth.auth_manager_from_config(mock_hass, [], [])
user = MockUser().add_to_auth_manager(manager)
refresh_token = await manager.async_create_refresh_token(user, CLIENT_ID)
access_token = manager.async_create_access_token(refresh_token)
await manager.async_remove_refresh_token(refresh_token)
assert await manager.async_get_refresh_token(refresh_token.id) is None
assert await manager.async_validate_access_token(access_token) is None
async def test_create_access_token(mock_hass):
"""Test normal refresh_token's jwt_key keep same after used."""
manager = await auth.auth_manager_from_config(mock_hass, [], [])
user = MockUser().add_to_auth_manager(manager)
refresh_token = await manager.async_create_refresh_token(user, CLIENT_ID)
assert refresh_token.token_type == auth_models.TOKEN_TYPE_NORMAL
jwt_key = refresh_token.jwt_key
access_token = manager.async_create_access_token(refresh_token)
assert access_token is not None
assert refresh_token.jwt_key == jwt_key
jwt_payload = jwt.decode(access_token, jwt_key, algorithm=["HS256"])
assert jwt_payload["iss"] == refresh_token.id
assert (
jwt_payload["exp"] - jwt_payload["iat"] == timedelta(minutes=30).total_seconds()
)
async def test_create_long_lived_access_token(mock_hass):
"""Test refresh_token's jwt_key changed for long-lived access token."""
manager = await auth.auth_manager_from_config(mock_hass, [], [])
user = MockUser().add_to_auth_manager(manager)
refresh_token = await manager.async_create_refresh_token(
user,
client_name="GPS Logger",
token_type=auth_models.TOKEN_TYPE_LONG_LIVED_ACCESS_TOKEN,
access_token_expiration=timedelta(days=300),
)
assert refresh_token.token_type == auth_models.TOKEN_TYPE_LONG_LIVED_ACCESS_TOKEN
access_token = manager.async_create_access_token(refresh_token)
jwt_payload = jwt.decode(access_token, refresh_token.jwt_key, algorithm=["HS256"])
assert jwt_payload["iss"] == refresh_token.id
assert (
jwt_payload["exp"] - jwt_payload["iat"] == timedelta(days=300).total_seconds()
)
async def test_one_long_lived_access_token_per_refresh_token(mock_hass):
"""Test one refresh_token can only have one long-lived access token."""
manager = await auth.auth_manager_from_config(mock_hass, [], [])
user = MockUser().add_to_auth_manager(manager)
refresh_token = await manager.async_create_refresh_token(
user,
client_name="GPS Logger",
token_type=auth_models.TOKEN_TYPE_LONG_LIVED_ACCESS_TOKEN,
access_token_expiration=timedelta(days=3000),
)
assert refresh_token.token_type == auth_models.TOKEN_TYPE_LONG_LIVED_ACCESS_TOKEN
access_token = manager.async_create_access_token(refresh_token)
jwt_key = refresh_token.jwt_key
rt = await manager.async_validate_access_token(access_token)
assert rt.id == refresh_token.id
with pytest.raises(ValueError):
await manager.async_create_refresh_token(
user,
client_name="GPS Logger",
token_type=auth_models.TOKEN_TYPE_LONG_LIVED_ACCESS_TOKEN,
access_token_expiration=timedelta(days=3000),
)
await manager.async_remove_refresh_token(refresh_token)
assert refresh_token.id not in user.refresh_tokens
rt = await manager.async_validate_access_token(access_token)
assert rt is None, "Previous issued access token has been invoked"
refresh_token_2 = await manager.async_create_refresh_token(
user,
client_name="GPS Logger",
token_type=auth_models.TOKEN_TYPE_LONG_LIVED_ACCESS_TOKEN,
access_token_expiration=timedelta(days=3000),
)
assert refresh_token_2.id != refresh_token.id
assert refresh_token_2.token_type == auth_models.TOKEN_TYPE_LONG_LIVED_ACCESS_TOKEN
access_token_2 = manager.async_create_access_token(refresh_token_2)
jwt_key_2 = refresh_token_2.jwt_key
assert access_token != access_token_2
assert jwt_key != jwt_key_2
rt = await manager.async_validate_access_token(access_token_2)
jwt_payload = jwt.decode(access_token_2, rt.jwt_key, algorithm=["HS256"])
assert jwt_payload["iss"] == refresh_token_2.id
assert (
jwt_payload["exp"] - jwt_payload["iat"] == timedelta(days=3000).total_seconds()
)
async def test_login_with_auth_module(mock_hass):
"""Test login as existing user with auth module."""
manager = await auth.auth_manager_from_config(
mock_hass,
[
{
"type": "insecure_example",
"users": [
{
"username": "test-user",
"password": "test-pass",
"name": "Test Name",
}
],
}
],
[
{
"type": "insecure_example",
"data": [{"user_id": "mock-user", "pin": "test-pin"}],
}
],
)
mock_hass.auth = manager
ensure_auth_manager_loaded(manager)
# Add fake user with credentials for example auth provider.
user = MockUser(
id="mock-user", is_owner=False, is_active=False, name="Paulus"
).add_to_auth_manager(manager)
user.credentials.append(
auth_models.Credentials(
id="mock-id",
auth_provider_type="insecure_example",
auth_provider_id=None,
data={"username": "test-user"},
is_new=False,
)
)
step = await manager.login_flow.async_init(("insecure_example", None))
assert step["type"] == data_entry_flow.RESULT_TYPE_FORM
step = await manager.login_flow.async_configure(
step["flow_id"], {"username": "test-user", "password": "test-pass"}
)
# After auth_provider validated, request auth module input form
assert step["type"] == data_entry_flow.RESULT_TYPE_FORM
assert step["step_id"] == "mfa"
step = await manager.login_flow.async_configure(
step["flow_id"], {"pin": "invalid-pin"}
)
# Invalid code error
assert step["type"] == data_entry_flow.RESULT_TYPE_FORM
assert step["step_id"] == "mfa"
assert step["errors"] == {"base": "invalid_code"}
step = await manager.login_flow.async_configure(
step["flow_id"], {"pin": "test-pin"}
)
# Finally passed, get user
assert step["type"] == data_entry_flow.RESULT_TYPE_CREATE_ENTRY
user = step["result"]
assert user is not None
assert user.id == "mock-user"
assert user.is_owner is False
assert user.is_active is False
assert user.name == "Paulus"
async def test_login_with_multi_auth_module(mock_hass):
"""Test login as existing user with multiple auth modules."""
manager = await auth.auth_manager_from_config(
mock_hass,
[
{
"type": "insecure_example",
"users": [
{
"username": "test-user",
"password": "test-pass",
"name": "Test Name",
}
],
}
],
[
{
"type": "insecure_example",
"data": [{"user_id": "mock-user", "pin": "test-pin"}],
},
{
"type": "insecure_example",
"id": "module2",
"data": [{"user_id": "mock-user", "pin": "test-pin2"}],
},
],
)
mock_hass.auth = manager
ensure_auth_manager_loaded(manager)
# Add fake user with credentials for example auth provider.
user = MockUser(
id="mock-user", is_owner=False, is_active=False, name="Paulus"
).add_to_auth_manager(manager)
user.credentials.append(
auth_models.Credentials(
id="mock-id",
auth_provider_type="insecure_example",
auth_provider_id=None,
data={"username": "test-user"},
is_new=False,
)
)
step = await manager.login_flow.async_init(("insecure_example", None))
assert step["type"] == data_entry_flow.RESULT_TYPE_FORM
step = await manager.login_flow.async_configure(
step["flow_id"], {"username": "test-user", "password": "test-pass"}
)
# After auth_provider validated, request select auth module
assert step["type"] == data_entry_flow.RESULT_TYPE_FORM
assert step["step_id"] == "select_mfa_module"
step = await manager.login_flow.async_configure(
step["flow_id"], {"multi_factor_auth_module": "module2"}
)
assert step["type"] == data_entry_flow.RESULT_TYPE_FORM
assert step["step_id"] == "mfa"
step = await manager.login_flow.async_configure(
step["flow_id"], {"pin": "test-pin2"}
)
# Finally passed, get user
assert step["type"] == data_entry_flow.RESULT_TYPE_CREATE_ENTRY
user = step["result"]
assert user is not None
assert user.id == "mock-user"
assert user.is_owner is False
assert user.is_active is False
assert user.name == "Paulus"
async def test_auth_module_expired_session(mock_hass):
"""Test login as existing user."""
manager = await auth.auth_manager_from_config(
mock_hass,
[
{
"type": "insecure_example",
"users": [
{
"username": "test-user",
"password": "test-pass",
"name": "Test Name",
}
],
}
],
[
{
"type": "insecure_example",
"data": [{"user_id": "mock-user", "pin": "test-pin"}],
}
],
)
mock_hass.auth = manager
ensure_auth_manager_loaded(manager)
# Add fake user with credentials for example auth provider.
user = MockUser(
id="mock-user", is_owner=False, is_active=False, name="Paulus"
).add_to_auth_manager(manager)
user.credentials.append(
auth_models.Credentials(
id="mock-id",
auth_provider_type="insecure_example",
auth_provider_id=None,
data={"username": "test-user"},
is_new=False,
)
)
step = await manager.login_flow.async_init(("insecure_example", None))
assert step["type"] == data_entry_flow.RESULT_TYPE_FORM
step = await manager.login_flow.async_configure(
step["flow_id"], {"username": "test-user", "password": "test-pass"}
)
assert step["type"] == data_entry_flow.RESULT_TYPE_FORM
assert step["step_id"] == "mfa"
with patch(
"homeassistant.util.dt.utcnow",
return_value=dt_util.utcnow() + MFA_SESSION_EXPIRATION,
):
step = await manager.login_flow.async_configure(
step["flow_id"], {"pin": "test-pin"}
)
# login flow abort due session timeout
assert step["type"] == data_entry_flow.RESULT_TYPE_ABORT
assert step["reason"] == "login_expired"
async def test_enable_mfa_for_user(hass, hass_storage):
"""Test enable mfa module for user."""
manager = await auth.auth_manager_from_config(
hass,
[
{
"type": "insecure_example",
"users": [{"username": "test-user", "password": "test-pass"}],
}
],
[{"type": "insecure_example", "data": []}],
)
step = await manager.login_flow.async_init(("insecure_example", None))
step = await manager.login_flow.async_configure(
step["flow_id"], {"username": "test-user", "password": "test-pass"}
)
user = step["result"]
assert user is not None
# new user don't have mfa enabled
modules = await manager.async_get_enabled_mfa(user)
assert len(modules) == 0
module = manager.get_auth_mfa_module("insecure_example")
# mfa module don't have data
assert bool(module._data) is False
# test enable mfa for user
await manager.async_enable_user_mfa(user, "insecure_example", {"pin": "test-pin"})
assert len(module._data) == 1
assert module._data[0] == {"user_id": user.id, "pin": "test-pin"}
# test get enabled mfa
modules = await manager.async_get_enabled_mfa(user)
assert len(modules) == 1
assert "insecure_example" in modules
# re-enable mfa for user will override
await manager.async_enable_user_mfa(
user, "insecure_example", {"pin": "test-pin-new"}
)
assert len(module._data) == 1
assert module._data[0] == {"user_id": user.id, "pin": "test-pin-new"}
modules = await manager.async_get_enabled_mfa(user)
assert len(modules) == 1
assert "insecure_example" in modules
# system user cannot enable mfa
system_user = await manager.async_create_system_user("system-user")
with pytest.raises(ValueError):
await manager.async_enable_user_mfa(
system_user, "insecure_example", {"pin": "test-pin"}
)
assert len(module._data) == 1
modules = await manager.async_get_enabled_mfa(system_user)
assert len(modules) == 0
# disable mfa for user
await manager.async_disable_user_mfa(user, "insecure_example")
assert bool(module._data) is False
# test get enabled mfa
modules = await manager.async_get_enabled_mfa(user)
assert len(modules) == 0
# disable mfa for user don't enabled just silent fail
await manager.async_disable_user_mfa(user, "insecure_example")
async def test_async_remove_user(hass):
"""Test removing a user."""
events = []
@callback
def user_removed(event):
events.append(event)
hass.bus.async_listen("user_removed", user_removed)
manager = await auth.auth_manager_from_config(
hass,
[
{
"type": "insecure_example",
"users": [
{
"username": "test-user",
"password": "test-pass",
"name": "Test Name",
}
],
}
],
[],
)
hass.auth = manager
ensure_auth_manager_loaded(manager)
# Add fake user with credentials for example auth provider.
user = MockUser(
id="mock-user", is_owner=False, is_active=False, name="Paulus"
).add_to_auth_manager(manager)
user.credentials.append(
auth_models.Credentials(
id="mock-id",
auth_provider_type="insecure_example",
auth_provider_id=None,
data={"username": "test-user"},
is_new=False,
)
)
assert len(user.credentials) == 1
await hass.auth.async_remove_user(user)
assert len(await manager.async_get_users()) == 0
assert len(user.credentials) == 0
await hass.async_block_till_done()
assert len(events) == 1
assert events[0].data["user_id"] == user.id
async def test_new_users(mock_hass):
"""Test newly created users."""
manager = await auth.auth_manager_from_config(
mock_hass,
[
{
"type": "insecure_example",
"users": [
{
"username": "test-user",
"password": "test-pass",
"name": "Test Name",
},
{
"username": "test-user-2",
"password": "test-pass",
"name": "Test Name",
},
{
"username": "test-user-3",
"password": "test-pass",
"name": "Test Name",
},
],
}
],
[],
)
ensure_auth_manager_loaded(manager)
user = await manager.async_create_user("Hello")
# first user in the system is owner and admin
assert user.is_owner
assert user.is_admin
assert user.groups == []
user = await manager.async_create_user("Hello 2")
assert not user.is_admin
assert user.groups == []
user = await manager.async_create_user("Hello 3", ["system-admin"])
assert user.is_admin
assert user.groups[0].id == "system-admin"
user_cred = await manager.async_get_or_create_user(
auth_models.Credentials(
id="mock-id",
auth_provider_type="insecure_example",
auth_provider_id=None,
data={"username": "test-user"},
is_new=True,
)
)
assert user_cred.is_admin
| apache-2.0 |
chintak/scikit-image | skimage/feature/util.py | 1 | 4726 | import numpy as np
from skimage.util import img_as_float
class FeatureDetector(object):
def __init__(self):
self.keypoints_ = np.array([])
def detect(self, image):
"""Detect keypoints in image.
Parameters
----------
image : 2D array
Input image.
"""
raise NotImplementedError()
class DescriptorExtractor(object):
def __init__(self):
self.descriptors_ = np.array([])
def extract(self, image, keypoints):
"""Extract feature descriptors in image for given keypoints.
Parameters
----------
image : 2D array
Input image.
keypoints : (N, 2) array
Keypoint locations as ``(row, col)``.
"""
raise NotImplementedError()
def plot_matches(ax, image1, image2, keypoints1, keypoints2, matches,
keypoints_color='k', matches_color=None, only_matches=False):
"""Plot matched features.
Parameters
----------
ax : matplotlib.axes.Axes
Matches and image are drawn in this ax.
image1 : (N, M [, 3]) array
First grayscale or color image.
image2 : (N, M [, 3]) array
Second grayscale or color image.
keypoints1 : (K1, 2) array
First keypoint coordinates as ``(row, col)``.
keypoints2 : (K2, 2) array
Second keypoint coordinates as ``(row, col)``.
matches : (Q, 2) array
Indices of corresponding matches in first and second set of
descriptors, where ``matches[:, 0]`` denote the indices in the first
and ``matches[:, 1]`` the indices in the second set of descriptors.
keypoints_color : matplotlib color, optional
Color for keypoint locations.
matches_color : matplotlib color, optional
Color for lines which connect keypoint matches. By default the
color is chosen randomly.
only_matches : bool, optional
Whether to only plot matches and not plot the keypoint locations.
"""
image1 = img_as_float(image1)
image2 = img_as_float(image2)
new_shape1 = list(image1.shape)
new_shape2 = list(image2.shape)
if image1.shape[0] < image2.shape[0]:
new_shape1[0] = image2.shape[0]
elif image1.shape[0] > image2.shape[0]:
new_shape2[0] = image1.shape[0]
if image1.shape[1] < image2.shape[1]:
new_shape1[1] = image2.shape[1]
elif image1.shape[1] > image2.shape[1]:
new_shape2[1] = image1.shape[1]
if new_shape1 != image1.shape:
new_image1 = np.zeros(new_shape1, dtype=image1.dtype)
new_image1[:image1.shape[0], :image1.shape[1]] = image1
image1 = new_image1
if new_shape2 != image2.shape:
new_image2 = np.zeros(new_shape2, dtype=image2.dtype)
new_image2[:image2.shape[0], :image2.shape[1]] = image2
image2 = new_image2
image = np.concatenate([image1, image2], axis=1)
offset = image1.shape
if not only_matches:
ax.scatter(keypoints1[:, 1], keypoints1[:, 0],
facecolors='none', edgecolors=keypoints_color)
ax.scatter(keypoints2[:, 1] + offset[1], keypoints2[:, 0],
facecolors='none', edgecolors=keypoints_color)
ax.imshow(image)
ax.axis((0, 2 * offset[1], offset[0], 0))
for i in range(matches.shape[0]):
idx1 = matches[i, 0]
idx2 = matches[i, 1]
if matches_color is None:
color = np.random.rand(3, 1)
else:
color = matches_color
ax.plot((keypoints1[idx1, 1], keypoints2[idx2, 1] + offset[1]),
(keypoints1[idx1, 0], keypoints2[idx2, 0]),
'-', color=color)
def _prepare_grayscale_input_2D(image):
image = np.squeeze(image)
if image.ndim != 2:
raise ValueError("Only 2-D gray-scale images supported.")
return img_as_float(image)
def _mask_border_keypoints(image_shape, keypoints, distance):
"""Mask coordinates that are within certain distance from the image border.
Parameters
----------
image_shape : (2, ) array_like
Shape of the image as ``(rows, cols)``.
keypoints : (N, 2) array
Keypoint coordinates as ``(rows, cols)``.
distance : int
Image border distance.
Returns
-------
mask : (N, ) bool array
Mask indicating if pixels are within the image (``True``) or in the
border region of the image (``False``).
"""
rows = image_shape[0]
cols = image_shape[1]
mask = (((distance - 1) < keypoints[:, 0])
& (keypoints[:, 0] < (rows - distance + 1))
& ((distance - 1) < keypoints[:, 1])
& (keypoints[:, 1] < (cols - distance + 1)))
return mask
| bsd-3-clause |
mbrukman/delayed-replay | tests/proxy_test.py | 1 | 1244 | #!/usr/bin/python
#
# Copyright 2014 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
################################################################################
#
# Tests for functionality in the proxy.py file.
import proxy
import unittest
class GetTargetUrlTest(unittest.TestCase):
def testSimple(self):
cases = [
('foo/bar', '/?q=foo/bar'),
('/home/~user', '/?q=/home/%7Euser')
]
for expected, path in cases:
actual = proxy.GetTargetUrl(path)
if expected != actual:
print 'Failed conversion for %s' % path
print 'expected: %s' % expected
print ' actual: %s' % actual
self.assertEquals(expected, actual)
if __name__ == '__main__':
unittest.main()
| apache-2.0 |
rpatterson/test-har | test_har/tests/test_requests.py | 1 | 3214 | """
Test using HAR files in Python tests against the requests library.
"""
import json
import requests
import requests_mock
from test_har import requests_har as test_har
from test_har import tests
class HARDogfoodRequestsTests(tests.HARDogfoodTestCase, test_har.HARTestCase):
"""
Test using HAR files in Python tests against the requests library.
"""
RESPONSE_TYPE = requests.Response
def setUp(self):
"""
Start the mocker, mock the example HAR response, and register cleanup.
"""
super(HARDogfoodRequestsTests, self).setUp()
self.mocker = requests_mock.Mocker()
self.mocker.start()
self.addCleanup(self.mocker.stop)
self.headers = test_har.array_to_dict(
self.entry["response"]["headers"])
self.headers['Content-Type'] = self.entry[
"response"]["content"]["mimeType"]
# Insert a key into the response
# about which HAR response makes no assertion
content = dict(
self.entry["response"]["content"]["text"],
email='[email protected]')
self.mocker.post(
self.entry["request"]["url"],
status_code=self.entry["response"]["status"],
reason=self.entry["response"]["statusText"],
headers=self.headers,
text=json.dumps(content))
def test_non_json(self):
"""
Mock the requests library non-JSON response.
"""
self.entry["response"]["content"]["mimeType"] = "text/html"
self.entry["response"]["content"]["text"] = (
'<html><body>Foo HTML body</body></html>')
self.mocker.post(
self.entry["request"]["url"],
status_code=self.entry["response"]["status"],
reason=self.entry["response"]["statusText"],
headers=dict(self.headers, **{'Content-Type': self.entry[
"response"]["content"]["mimeType"]}),
text=self.entry["response"]["content"]["text"])
super(HARDogfoodRequestsTests, self).test_non_json()
def test_missing_content_type(self):
"""
Fail when the response is missing the content/MIME type.
"""
self.headers.pop('Content-Type')
self.mocker.post(
self.entry["request"]["url"],
status_code=self.entry["response"]["status"],
reason=self.entry["response"]["statusText"],
headers=self.headers,
text=json.dumps(self.entry["response"]["content"]["text"]))
with self.assertRaises(AssertionError) as har_failures:
self.assertHAR(self.example)
self.assertIn(
'content/mimeType', har_failures.exception.args[0],
'Assertion exception missing MIME type detail')
# BBB Python 2.7 str vs unicode compat
with self.assertRaises(AssertionError) as expected:
self.assertIn(
'Content-Type', self.headers,
'Missing response content type')
self.assertEqual(
har_failures.exception.args[0]['content/mimeType'].args,
expected.exception.args,
'Wrong missing response MIME type failure assertion')
| gpl-3.0 |
pavlov99/jsonapi | jsonapi/utils.py | 1 | 2220 | """ JSON:API utils."""
class _classproperty(property):
""" Implement property behaviour for classes.
class A():
@_classproperty
@classmethod
def name(cls):
return cls.__name__
"""
def __get__(self, obj, type_):
return self.fget.__get__(None, type_)()
def _cached(f):
""" Decorator that makes a method cached."""
attr_name = '_cached_' + f.__name__
def wrapper(obj, *args, **kwargs):
if not hasattr(obj, attr_name):
setattr(obj, attr_name, f(obj, *args, **kwargs))
return getattr(obj, attr_name)
return wrapper
classproperty = lambda f: _classproperty(classmethod(f))
cached_property = lambda f: property(_cached(f))
cached_classproperty = lambda f: classproperty(_cached(f))
class Choices(object):
""" Choices."""
def __init__(self, *choices):
self._choices = []
self._choice_dict = {}
for choice in choices:
if isinstance(choice, (list, tuple)):
if len(choice) == 2:
choice = (choice[0], choice[1], choice[1])
elif len(choice) != 3:
raise ValueError(
"Choices can't handle a list/tuple of length {0}, only\
2 or 3".format(choice))
else:
choice = (choice, choice, choice)
self._choices.append((choice[0], choice[2]))
self._choice_dict[choice[1]] = choice[0]
def __getattr__(self, attname):
try:
return self._choice_dict[attname]
except KeyError:
raise AttributeError(attname)
def __iter__(self):
return iter(self._choices)
def __getitem__(self, index):
return self._choices[index]
def __delitem__(self, index):
del self._choices[index]
def __setitem__(self, index, value):
self._choices[index] = value
def __repr__(self):
return "{0}({1})".format(
self.__class__.__name__,
self._choices
)
def __len__(self):
return len(self._choices)
def __contains__(self, element):
return element in self._choice_dict.values()
| mit |
NicWayand/xray | xarray/plot/utils.py | 1 | 6442 | import pkg_resources
import numpy as np
import pandas as pd
from ..core.pycompat import basestring
def _load_default_cmap(fname='default_colormap.csv'):
"""
Returns viridis color map
"""
from matplotlib.colors import LinearSegmentedColormap
# Not sure what the first arg here should be
f = pkg_resources.resource_stream(__name__, fname)
cm_data = pd.read_csv(f, header=None).values
return LinearSegmentedColormap.from_list('viridis', cm_data)
def _determine_extend(calc_data, vmin, vmax):
extend_min = calc_data.min() < vmin
extend_max = calc_data.max() > vmax
if extend_min and extend_max:
extend = 'both'
elif extend_min:
extend = 'min'
elif extend_max:
extend = 'max'
else:
extend = 'neither'
return extend
def _build_discrete_cmap(cmap, levels, extend, filled):
"""
Build a discrete colormap and normalization of the data.
"""
import matplotlib as mpl
if not filled:
# non-filled contour plots
extend = 'max'
if extend == 'both':
ext_n = 2
elif extend in ['min', 'max']:
ext_n = 1
else:
ext_n = 0
n_colors = len(levels) + ext_n - 1
pal = _color_palette(cmap, n_colors)
new_cmap, cnorm = mpl.colors.from_levels_and_colors(
levels, pal, extend=extend)
# copy the old cmap name, for easier testing
new_cmap.name = getattr(cmap, 'name', cmap)
return new_cmap, cnorm
def _color_palette(cmap, n_colors):
import matplotlib.pyplot as plt
from matplotlib.colors import ListedColormap
colors_i = np.linspace(0, 1., n_colors)
if isinstance(cmap, (list, tuple)):
# we have a list of colors
try:
# first try to turn it into a palette with seaborn
from seaborn.apionly import color_palette
pal = color_palette(cmap, n_colors=n_colors)
except ImportError:
# if that fails, use matplotlib
# in this case, is there any difference between mpl and seaborn?
cmap = ListedColormap(cmap, N=n_colors)
pal = cmap(colors_i)
elif isinstance(cmap, basestring):
# we have some sort of named palette
try:
# first try to turn it into a palette with seaborn
from seaborn.apionly import color_palette
pal = color_palette(cmap, n_colors=n_colors)
except (ImportError, ValueError):
# ValueError is raised when seaborn doesn't like a colormap
# (e.g. jet). If that fails, use matplotlib
try:
# is this a matplotlib cmap?
cmap = plt.get_cmap(cmap)
except ValueError:
# or maybe we just got a single color as a string
cmap = ListedColormap([cmap], N=n_colors)
pal = cmap(colors_i)
else:
# cmap better be a LinearSegmentedColormap (e.g. viridis)
pal = cmap(colors_i)
return pal
def _determine_cmap_params(plot_data, vmin=None, vmax=None, cmap=None,
center=None, robust=False, extend=None,
levels=None, filled=True, cnorm=None):
"""
Use some heuristics to set good defaults for colorbar and range.
Adapted from Seaborn:
https://github.com/mwaskom/seaborn/blob/v0.6/seaborn/matrix.py#L158
Parameters
==========
plot_data: Numpy array
Doesn't handle xarray objects
Returns
=======
cmap_params : dict
Use depends on the type of the plotting function
"""
ROBUST_PERCENTILE = 2.0
import matplotlib as mpl
calc_data = np.ravel(plot_data[~pd.isnull(plot_data)])
# Setting center=False prevents a divergent cmap
possibly_divergent = center is not False
# Set center to 0 so math below makes sense but remember its state
center_is_none = False
if center is None:
center = 0
center_is_none = True
# Setting both vmin and vmax prevents a divergent cmap
if (vmin is not None) and (vmax is not None):
possibly_divergent = False
# vlim might be computed below
vlim = None
if vmin is None:
if robust:
vmin = np.percentile(calc_data, ROBUST_PERCENTILE)
else:
vmin = calc_data.min()
elif possibly_divergent:
vlim = abs(vmin - center)
if vmax is None:
if robust:
vmax = np.percentile(calc_data, 100 - ROBUST_PERCENTILE)
else:
vmax = calc_data.max()
elif possibly_divergent:
vlim = abs(vmax - center)
if possibly_divergent:
# kwargs not specific about divergent or not: infer defaults from data
divergent = ((vmin < 0) and (vmax > 0)) or not center_is_none
else:
divergent = False
# A divergent map should be symmetric around the center value
if divergent:
if vlim is None:
vlim = max(abs(vmin - center), abs(vmax - center))
vmin, vmax = -vlim, vlim
# Now add in the centering value and set the limits
vmin += center
vmax += center
# Choose default colormaps if not provided
if cmap is None:
if divergent:
cmap = "RdBu_r"
else:
cmap = "viridis"
# Allow viridis before matplotlib 1.5
if cmap == "viridis":
cmap = _load_default_cmap()
# Handle discrete levels
if levels is not None:
if isinstance(levels, int):
ticker = mpl.ticker.MaxNLocator(levels)
levels = ticker.tick_values(vmin, vmax)
vmin, vmax = levels[0], levels[-1]
if extend is None:
extend = _determine_extend(calc_data, vmin, vmax)
if levels is not None:
cmap, cnorm = _build_discrete_cmap(cmap, levels, extend, filled)
return dict(vmin=vmin, vmax=vmax, cmap=cmap, extend=extend,
levels=levels, norm=cnorm)
def _infer_xy_labels(darray, x, y):
"""
Determine x and y labels. For use in _plot2d
darray must be a 2 dimensional data array.
"""
if x is None and y is None:
if darray.ndim != 2:
raise ValueError('DataArray must be 2d')
y, x = darray.dims
elif x is None or y is None:
raise ValueError('cannot supply only one of x and y')
elif any(k not in darray.coords for k in (x, y)):
raise ValueError('x and y must be coordinate variables')
return x, y
| apache-2.0 |
Eseoghene/bite-project | deps/gdata-python-client/src/gdata/geo/data.py | 132 | 2385 | #!/usr/bin/python
#
# Copyright (C) 2009 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Contains the data classes of the Geography Extension"""
__author__ = '[email protected] (Jeff Scudder)'
import atom.core
GEORSS_TEMPLATE = '{http://www.georss.org/georss/}%s'
GML_TEMPLATE = '{http://www.opengis.net/gml/}%s'
GEO_TEMPLATE = '{http://www.w3.org/2003/01/geo/wgs84_pos#/}%s'
class GeoLat(atom.core.XmlElement):
"""Describes a W3C latitude."""
_qname = GEO_TEMPLATE % 'lat'
class GeoLong(atom.core.XmlElement):
"""Describes a W3C longitude."""
_qname = GEO_TEMPLATE % 'long'
class GeoRssBox(atom.core.XmlElement):
"""Describes a geographical region."""
_qname = GEORSS_TEMPLATE % 'box'
class GeoRssPoint(atom.core.XmlElement):
"""Describes a geographical location."""
_qname = GEORSS_TEMPLATE % 'point'
class GmlLowerCorner(atom.core.XmlElement):
"""Describes a lower corner of a region."""
_qname = GML_TEMPLATE % 'lowerCorner'
class GmlPos(atom.core.XmlElement):
"""Describes a latitude and longitude."""
_qname = GML_TEMPLATE % 'pos'
class GmlPoint(atom.core.XmlElement):
"""Describes a particular geographical point."""
_qname = GML_TEMPLATE % 'Point'
pos = GmlPos
class GmlUpperCorner(atom.core.XmlElement):
"""Describes an upper corner of a region."""
_qname = GML_TEMPLATE % 'upperCorner'
class GmlEnvelope(atom.core.XmlElement):
"""Describes a Gml geographical region."""
_qname = GML_TEMPLATE % 'Envelope'
lower_corner = GmlLowerCorner
upper_corner = GmlUpperCorner
class GeoRssWhere(atom.core.XmlElement):
"""Describes a geographical location or region."""
_qname = GEORSS_TEMPLATE % 'where'
Point = GmlPoint
Envelope = GmlEnvelope
class W3CPoint(atom.core.XmlElement):
"""Describes a W3C geographical location."""
_qname = GEO_TEMPLATE % 'Point'
long = GeoLong
lat = GeoLat
| apache-2.0 |
j5shi/Thruster | pylibs/test/test_mutex.py | 4 | 1034 | import unittest
import test.test_support
mutex = test.test_support.import_module("mutex", deprecated=True)
class MutexTest(unittest.TestCase):
def test_lock_and_unlock(self):
def called_by_mutex(some_data):
self.assertEqual(some_data, "spam")
self.assertTrue(m.test(), "mutex not held")
# Nested locking
m.lock(called_by_mutex2, "eggs")
def called_by_mutex2(some_data):
self.assertEqual(some_data, "eggs")
self.assertTrue(m.test(), "mutex not held")
self.assertTrue(ready_for_2,
"called_by_mutex2 called too soon")
m = mutex.mutex()
read_for_2 = False
m.lock(called_by_mutex, "spam")
ready_for_2 = True
# unlock both locks
m.unlock()
m.unlock()
self.assertFalse(m.test(), "mutex still held")
def test_main():
test.test_support.run_unittest(MutexTest)
if __name__ == "__main__":
test_main()
| gpl-2.0 |
ehashman/oh-mainline | vendor/packages/Pygments/pygments/formatters/svg.py | 362 | 5867 | # -*- coding: utf-8 -*-
"""
pygments.formatters.svg
~~~~~~~~~~~~~~~~~~~~~~~
Formatter for SVG output.
:copyright: Copyright 2006-2013 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
from pygments.formatter import Formatter
from pygments.util import get_bool_opt, get_int_opt
__all__ = ['SvgFormatter']
def escape_html(text):
"""Escape &, <, > as well as single and double quotes for HTML."""
return text.replace('&', '&'). \
replace('<', '<'). \
replace('>', '>'). \
replace('"', '"'). \
replace("'", ''')
class2style = {}
class SvgFormatter(Formatter):
"""
Format tokens as an SVG graphics file. This formatter is still experimental.
Each line of code is a ``<text>`` element with explicit ``x`` and ``y``
coordinates containing ``<tspan>`` elements with the individual token styles.
By default, this formatter outputs a full SVG document including doctype
declaration and the ``<svg>`` root element.
*New in Pygments 0.9.*
Additional options accepted:
`nowrap`
Don't wrap the SVG ``<text>`` elements in ``<svg><g>`` elements and
don't add a XML declaration and a doctype. If true, the `fontfamily`
and `fontsize` options are ignored. Defaults to ``False``.
`fontfamily`
The value to give the wrapping ``<g>`` element's ``font-family``
attribute, defaults to ``"monospace"``.
`fontsize`
The value to give the wrapping ``<g>`` element's ``font-size``
attribute, defaults to ``"14px"``.
`xoffset`
Starting offset in X direction, defaults to ``0``.
`yoffset`
Starting offset in Y direction, defaults to the font size if it is given
in pixels, or ``20`` else. (This is necessary since text coordinates
refer to the text baseline, not the top edge.)
`ystep`
Offset to add to the Y coordinate for each subsequent line. This should
roughly be the text size plus 5. It defaults to that value if the text
size is given in pixels, or ``25`` else.
`spacehack`
Convert spaces in the source to `` ``, which are non-breaking
spaces. SVG provides the ``xml:space`` attribute to control how
whitespace inside tags is handled, in theory, the ``preserve`` value
could be used to keep all whitespace as-is. However, many current SVG
viewers don't obey that rule, so this option is provided as a workaround
and defaults to ``True``.
"""
name = 'SVG'
aliases = ['svg']
filenames = ['*.svg']
def __init__(self, **options):
# XXX outencoding
Formatter.__init__(self, **options)
self.nowrap = get_bool_opt(options, 'nowrap', False)
self.fontfamily = options.get('fontfamily', 'monospace')
self.fontsize = options.get('fontsize', '14px')
self.xoffset = get_int_opt(options, 'xoffset', 0)
fs = self.fontsize.strip()
if fs.endswith('px'): fs = fs[:-2].strip()
try:
int_fs = int(fs)
except:
int_fs = 20
self.yoffset = get_int_opt(options, 'yoffset', int_fs)
self.ystep = get_int_opt(options, 'ystep', int_fs + 5)
self.spacehack = get_bool_opt(options, 'spacehack', True)
self._stylecache = {}
def format_unencoded(self, tokensource, outfile):
"""
Format ``tokensource``, an iterable of ``(tokentype, tokenstring)``
tuples and write it into ``outfile``.
For our implementation we put all lines in their own 'line group'.
"""
x = self.xoffset
y = self.yoffset
if not self.nowrap:
if self.encoding:
outfile.write('<?xml version="1.0" encoding="%s"?>\n' %
self.encoding)
else:
outfile.write('<?xml version="1.0"?>\n')
outfile.write('<!DOCTYPE svg PUBLIC "-//W3C//DTD SVG 1.0//EN" '
'"http://www.w3.org/TR/2001/REC-SVG-20010904/DTD/'
'svg10.dtd">\n')
outfile.write('<svg xmlns="http://www.w3.org/2000/svg">\n')
outfile.write('<g font-family="%s" font-size="%s">\n' %
(self.fontfamily, self.fontsize))
outfile.write('<text x="%s" y="%s" xml:space="preserve">' % (x, y))
for ttype, value in tokensource:
style = self._get_style(ttype)
tspan = style and '<tspan' + style + '>' or ''
tspanend = tspan and '</tspan>' or ''
value = escape_html(value)
if self.spacehack:
value = value.expandtabs().replace(' ', ' ')
parts = value.split('\n')
for part in parts[:-1]:
outfile.write(tspan + part + tspanend)
y += self.ystep
outfile.write('</text>\n<text x="%s" y="%s" '
'xml:space="preserve">' % (x, y))
outfile.write(tspan + parts[-1] + tspanend)
outfile.write('</text>')
if not self.nowrap:
outfile.write('</g></svg>\n')
def _get_style(self, tokentype):
if tokentype in self._stylecache:
return self._stylecache[tokentype]
otokentype = tokentype
while not self.style.styles_token(tokentype):
tokentype = tokentype.parent
value = self.style.style_for_token(tokentype)
result = ''
if value['color']:
result = ' fill="#' + value['color'] + '"'
if value['bold']:
result += ' font-weight="bold"'
if value['italic']:
result += ' font-style="italic"'
self._stylecache[otokentype] = result
return result
| agpl-3.0 |
mollstam/UnrealPy | UnrealPyEmbed/Development/Python/2015.08.07-Python2710-x64-Source-vs2015/Python27/Source/django-1.8.2/django/views/decorators/csrf.py | 586 | 2202 | from functools import wraps
from django.middleware.csrf import CsrfViewMiddleware, get_token
from django.utils.decorators import available_attrs, decorator_from_middleware
csrf_protect = decorator_from_middleware(CsrfViewMiddleware)
csrf_protect.__name__ = "csrf_protect"
csrf_protect.__doc__ = """
This decorator adds CSRF protection in exactly the same way as
CsrfViewMiddleware, but it can be used on a per view basis. Using both, or
using the decorator multiple times, is harmless and efficient.
"""
class _EnsureCsrfToken(CsrfViewMiddleware):
# We need this to behave just like the CsrfViewMiddleware, but not reject
# requests or log warnings.
def _reject(self, request, reason):
return None
requires_csrf_token = decorator_from_middleware(_EnsureCsrfToken)
requires_csrf_token.__name__ = 'requires_csrf_token'
requires_csrf_token.__doc__ = """
Use this decorator on views that need a correct csrf_token available to
RequestContext, but without the CSRF protection that csrf_protect
enforces.
"""
class _EnsureCsrfCookie(CsrfViewMiddleware):
def _reject(self, request, reason):
return None
def process_view(self, request, callback, callback_args, callback_kwargs):
retval = super(_EnsureCsrfCookie, self).process_view(request, callback, callback_args, callback_kwargs)
# Forces process_response to send the cookie
get_token(request)
return retval
ensure_csrf_cookie = decorator_from_middleware(_EnsureCsrfCookie)
ensure_csrf_cookie.__name__ = 'ensure_csrf_cookie'
ensure_csrf_cookie.__doc__ = """
Use this decorator to ensure that a view sets a CSRF cookie, whether or not it
uses the csrf_token template tag, or the CsrfViewMiddleware is used.
"""
def csrf_exempt(view_func):
"""
Marks a view function as being exempt from the CSRF view protection.
"""
# We could just do view_func.csrf_exempt = True, but decorators
# are nicer if they don't have side-effects, so we return a new
# function.
def wrapped_view(*args, **kwargs):
return view_func(*args, **kwargs)
wrapped_view.csrf_exempt = True
return wraps(view_func, assigned=available_attrs(view_func))(wrapped_view)
| mit |
zofuthan/edx-platform | common/lib/xmodule/xmodule/video_module/video_module.py | 47 | 37504 |
# -*- coding: utf-8 -*-
# pylint: disable=abstract-method
"""Video is ungraded Xmodule for support video content.
It's new improved video module, which support additional feature:
- Can play non-YouTube video sources via in-browser HTML5 video player.
- YouTube defaults to HTML5 mode from the start.
- Speed changes in both YouTube and non-YouTube videos happen via
in-browser HTML5 video method (when in HTML5 mode).
- Navigational subtitles can be disabled altogether via an attribute
in XML.
Examples of html5 videos for manual testing:
https://s3.amazonaws.com/edx-course-videos/edx-intro/edX-FA12-cware-1_100.mp4
https://s3.amazonaws.com/edx-course-videos/edx-intro/edX-FA12-cware-1_100.webm
https://s3.amazonaws.com/edx-course-videos/edx-intro/edX-FA12-cware-1_100.ogv
"""
import copy
import json
import logging
import random
from collections import OrderedDict
from operator import itemgetter
from lxml import etree
from pkg_resources import resource_string
from django.conf import settings
from openedx.core.lib.cache_utils import memoize_in_request_cache
from xblock.core import XBlock
from xblock.fields import ScopeIds
from xblock.runtime import KvsFieldData
from xmodule.modulestore.inheritance import InheritanceKeyValueStore, own_metadata
from xmodule.x_module import XModule, module_attr
from xmodule.editing_module import TabsEditingDescriptor
from xmodule.raw_module import EmptyDataRawDescriptor
from xmodule.xml_module import is_pointer_tag, name_to_pathname, deserialize_field
from xmodule.exceptions import NotFoundError
from .transcripts_utils import VideoTranscriptsMixin
from .video_utils import create_youtube_string, get_video_from_cdn, get_poster
from .bumper_utils import bumperize
from .video_xfields import VideoFields
from .video_handlers import VideoStudentViewHandlers, VideoStudioViewHandlers
from xmodule.video_module import manage_video_subtitles_save
from xmodule.mixin import LicenseMixin
# The following import/except block for edxval is temporary measure until
# edxval is a proper XBlock Runtime Service.
#
# Here's the deal: the VideoModule should be able to take advantage of edx-val
# (https://github.com/edx/edx-val) to figure out what URL to give for video
# resources that have an edx_video_id specified. edx-val is a Django app, and
# including it causes tests to fail because we run common/lib tests standalone
# without Django dependencies. The alternatives seem to be:
#
# 1. Move VideoModule out of edx-platform.
# 2. Accept the Django dependency in common/lib.
# 3. Try to import, catch the exception on failure, and check for the existence
# of edxval_api before invoking it in the code.
# 4. Make edxval an XBlock Runtime Service
#
# (1) is a longer term goal. VideoModule should be made into an XBlock and
# extracted from edx-platform entirely. But that's expensive to do because of
# the various dependencies (like templates). Need to sort this out.
# (2) is explicitly discouraged.
# (3) is what we're doing today. The code is still functional when called within
# the context of the LMS, but does not cause failure on import when running
# standalone tests. Most VideoModule tests tend to be in the LMS anyway,
# probably for historical reasons, so we're not making things notably worse.
# (4) is one of the next items on the backlog for edxval, and should get rid
# of this particular import silliness. It's just that I haven't made one before,
# and I was worried about trying it with my deadline constraints.
try:
import edxval.api as edxval_api
except ImportError:
edxval_api = None
try:
from branding.models import BrandingInfoConfig
except ImportError:
BrandingInfoConfig = None
log = logging.getLogger(__name__)
_ = lambda text: text
@XBlock.wants('settings')
class VideoModule(VideoFields, VideoTranscriptsMixin, VideoStudentViewHandlers, XModule, LicenseMixin):
"""
XML source example:
<video show_captions="true"
youtube="0.75:jNCf2gIqpeE,1.0:ZwkTiUPN0mg,1.25:rsq9auxASqI,1.50:kMyNdzVHHgg"
url_name="lecture_21_3" display_name="S19V3: Vacancies"
>
<source src=".../mit-3091x/M-3091X-FA12-L21-3_100.mp4"/>
<source src=".../mit-3091x/M-3091X-FA12-L21-3_100.webm"/>
<source src=".../mit-3091x/M-3091X-FA12-L21-3_100.ogv"/>
</video>
"""
video_time = 0
icon_class = 'video'
# To make sure that js files are called in proper order we use numerical
# index. We do that to avoid issues that occurs in tests.
module = __name__.replace('.video_module', '', 2)
js = {
'js': [
resource_string(module, 'js/src/video/00_component.js'),
resource_string(module, 'js/src/video/00_video_storage.js'),
resource_string(module, 'js/src/video/00_resizer.js'),
resource_string(module, 'js/src/video/00_async_process.js'),
resource_string(module, 'js/src/video/00_i18n.js'),
resource_string(module, 'js/src/video/00_sjson.js'),
resource_string(module, 'js/src/video/00_iterator.js'),
resource_string(module, 'js/src/video/01_initialize.js'),
resource_string(module, 'js/src/video/025_focus_grabber.js'),
resource_string(module, 'js/src/video/02_html5_video.js'),
resource_string(module, 'js/src/video/03_video_player.js'),
resource_string(module, 'js/src/video/035_video_accessible_menu.js'),
resource_string(module, 'js/src/video/04_video_control.js'),
resource_string(module, 'js/src/video/04_video_full_screen.js'),
resource_string(module, 'js/src/video/05_video_quality_control.js'),
resource_string(module, 'js/src/video/06_video_progress_slider.js'),
resource_string(module, 'js/src/video/07_video_volume_control.js'),
resource_string(module, 'js/src/video/08_video_speed_control.js'),
resource_string(module, 'js/src/video/09_video_caption.js'),
resource_string(module, 'js/src/video/09_play_placeholder.js'),
resource_string(module, 'js/src/video/09_play_pause_control.js'),
resource_string(module, 'js/src/video/09_play_skip_control.js'),
resource_string(module, 'js/src/video/09_skip_control.js'),
resource_string(module, 'js/src/video/09_bumper.js'),
resource_string(module, 'js/src/video/09_save_state_plugin.js'),
resource_string(module, 'js/src/video/09_events_plugin.js'),
resource_string(module, 'js/src/video/09_events_bumper_plugin.js'),
resource_string(module, 'js/src/video/09_poster.js'),
resource_string(module, 'js/src/video/095_video_context_menu.js'),
resource_string(module, 'js/src/video/10_commands.js'),
resource_string(module, 'js/src/video/10_main.js')
]
}
css = {'scss': [
resource_string(module, 'css/video/display.scss'),
resource_string(module, 'css/video/accessible_menu.scss'),
]}
js_module_name = "Video"
def get_transcripts_for_student(self, transcripts):
"""Return transcript information necessary for rendering the XModule student view.
This is more or less a direct extraction from `get_html`.
Args:
transcripts (dict): A dict with all transcripts and a sub.
Returns:
Tuple of (track_url, transcript_language, sorted_languages)
track_url -> subtitle download url
transcript_language -> default transcript language
sorted_languages -> dictionary of available transcript languages
"""
track_url = None
sub, other_lang = transcripts["sub"], transcripts["transcripts"]
if self.download_track:
if self.track:
track_url = self.track
elif sub or other_lang:
track_url = self.runtime.handler_url(self, 'transcript', 'download').rstrip('/?')
transcript_language = self.get_default_transcript_language(transcripts)
native_languages = {lang: label for lang, label in settings.LANGUAGES if len(lang) == 2}
languages = {
lang: native_languages.get(lang, display)
for lang, display in settings.ALL_LANGUAGES
if lang in other_lang
}
if not other_lang or (other_lang and sub):
languages['en'] = 'English'
# OrderedDict for easy testing of rendered context in tests
sorted_languages = sorted(languages.items(), key=itemgetter(1))
sorted_languages = OrderedDict(sorted_languages)
return track_url, transcript_language, sorted_languages
def get_html(self):
transcript_download_format = self.transcript_download_format if not (self.download_track and self.track) else None
sources = filter(None, self.html5_sources)
download_video_link = None
branding_info = None
youtube_streams = ""
# If we have an edx_video_id, we prefer its values over what we store
# internally for download links (source, html5_sources) and the youtube
# stream.
if self.edx_video_id and edxval_api:
try:
val_profiles = ["youtube", "desktop_webm", "desktop_mp4"]
val_video_urls = edxval_api.get_urls_for_profiles(self.edx_video_id, val_profiles)
# VAL will always give us the keys for the profiles we asked for, but
# if it doesn't have an encoded video entry for that Video + Profile, the
# value will map to `None`
# add the non-youtube urls to the list of alternative sources
# use the last non-None non-youtube url as the link to download the video
for url in [val_video_urls[p] for p in val_profiles if p != "youtube"]:
if url:
if url not in sources:
sources.append(url)
if self.download_video:
download_video_link = url
# set the youtube url
if val_video_urls["youtube"]:
youtube_streams = "1.00:{}".format(val_video_urls["youtube"])
except edxval_api.ValInternalError:
# VAL raises this exception if it can't find data for the edx video ID. This can happen if the
# course data is ported to a machine that does not have the VAL data. So for now, pass on this
# exception and fallback to whatever we find in the VideoDescriptor.
log.warning("Could not retrieve information from VAL for edx Video ID: %s.", self.edx_video_id)
# If the user comes from China use China CDN for html5 videos.
# 'CN' is China ISO 3166-1 country code.
# Video caching is disabled for Studio. User_location is always None in Studio.
# CountryMiddleware disabled for Studio.
cdn_url = getattr(settings, 'VIDEO_CDN_URL', {}).get(self.system.user_location)
if getattr(self, 'video_speed_optimizations', True) and cdn_url:
branding_info = BrandingInfoConfig.get_config().get(self.system.user_location)
for index, source_url in enumerate(sources):
new_url = get_video_from_cdn(cdn_url, source_url)
if new_url:
sources[index] = new_url
# If there was no edx_video_id, or if there was no download specified
# for it, we fall back on whatever we find in the VideoDescriptor
if not download_video_link and self.download_video:
if self.source:
download_video_link = self.source
elif self.html5_sources:
download_video_link = self.html5_sources[0]
track_url, transcript_language, sorted_languages = self.get_transcripts_for_student(self.get_transcripts_info())
# CDN_VIDEO_URLS is only to be used here and will be deleted
# TODO([email protected]): Delete this after the CDN experiment has completed.
html_id = self.location.html_id()
if self.system.user_location == 'CN' and \
settings.FEATURES.get('ENABLE_VIDEO_BEACON', False) and \
html_id in getattr(settings, 'CDN_VIDEO_URLS', {}).keys():
cdn_urls = getattr(settings, 'CDN_VIDEO_URLS', {})[html_id]
cdn_exp_group, new_source = random.choice(zip(range(len(cdn_urls)), cdn_urls))
if cdn_exp_group > 0:
sources[0] = new_source
cdn_eval = True
else:
cdn_eval = False
cdn_exp_group = None
self.youtube_streams = youtube_streams or create_youtube_string(self) # pylint: disable=W0201
settings_service = self.runtime.service(self, 'settings')
yt_api_key = None
if settings_service:
xblock_settings = settings_service.get_settings_bucket(self)
if xblock_settings and 'YOUTUBE_API_KEY' in xblock_settings:
yt_api_key = xblock_settings['YOUTUBE_API_KEY']
metadata = {
'saveStateUrl': self.system.ajax_url + '/save_user_state',
'autoplay': settings.FEATURES.get('AUTOPLAY_VIDEOS', False),
'streams': self.youtube_streams,
'sub': self.sub,
'sources': sources,
# This won't work when we move to data that
# isn't on the filesystem
'captionDataDir': getattr(self, 'data_dir', None),
'showCaptions': json.dumps(self.show_captions),
'generalSpeed': self.global_speed,
'speed': self.speed,
'savedVideoPosition': self.saved_video_position.total_seconds(),
'start': self.start_time.total_seconds(),
'end': self.end_time.total_seconds(),
'transcriptLanguage': transcript_language,
'transcriptLanguages': sorted_languages,
# TODO: Later on the value 1500 should be taken from some global
# configuration setting field.
'ytTestTimeout': 1500,
'ytApiUrl': settings.YOUTUBE['API'],
'ytMetadataUrl': settings.YOUTUBE['METADATA_URL'],
'ytKey': yt_api_key,
'transcriptTranslationUrl': self.runtime.handler_url(
self, 'transcript', 'translation/__lang__'
).rstrip('/?'),
'transcriptAvailableTranslationsUrl': self.runtime.handler_url(
self, 'transcript', 'available_translations'
).rstrip('/?'),
## For now, the option "data-autohide-html5" is hard coded. This option
## either enables or disables autohiding of controls and captions on mouse
## inactivity. If set to true, controls and captions will autohide for
## HTML5 sources (non-YouTube) after a period of mouse inactivity over the
## whole video. When the mouse moves (or a key is pressed while any part of
## the video player is focused), the captions and controls will be shown
## once again.
##
## There is no option in the "Advanced Editor" to set this option. However,
## this option will have an effect if changed to "True". The code on
## front-end exists.
'autohideHtml5': False
}
bumperize(self)
context = {
'bumper_metadata': json.dumps(self.bumper['metadata']), # pylint: disable=E1101
'metadata': json.dumps(OrderedDict(metadata)),
'poster': json.dumps(get_poster(self)),
'branding_info': branding_info,
'cdn_eval': cdn_eval,
'cdn_exp_group': cdn_exp_group,
'id': self.location.html_id(),
'display_name': self.display_name_with_default,
'handout': self.handout,
'download_video_link': download_video_link,
'track': track_url,
'transcript_download_format': transcript_download_format,
'transcript_download_formats_list': self.descriptor.fields['transcript_download_format'].values,
'license': getattr(self, "license", None),
}
return self.system.render_template('video.html', context)
@XBlock.wants("request_cache")
@XBlock.wants("settings")
class VideoDescriptor(VideoFields, VideoTranscriptsMixin, VideoStudioViewHandlers,
TabsEditingDescriptor, EmptyDataRawDescriptor, LicenseMixin):
"""
Descriptor for `VideoModule`.
"""
module_class = VideoModule
transcript = module_attr('transcript')
show_in_read_only_mode = True
tabs = [
{
'name': _("Basic"),
'template': "video/transcripts.html",
'current': True
},
{
'name': _("Advanced"),
'template': "tabs/metadata-edit-tab.html"
}
]
def __init__(self, *args, **kwargs):
"""
Mostly handles backward compatibility issues.
`source` is deprecated field.
a) If `source` exists and `source` is not `html5_sources`: show `source`
field on front-end as not-editable but clearable. Dropdown is a new
field `download_video` and it has value True.
b) If `source` is cleared it is not shown anymore.
c) If `source` exists and `source` in `html5_sources`, do not show `source`
field. `download_video` field has value True.
"""
super(VideoDescriptor, self).__init__(*args, **kwargs)
# For backwards compatibility -- if we've got XML data, parse it out and set the metadata fields
if self.data:
field_data = self._parse_video_xml(etree.fromstring(self.data))
self._field_data.set_many(self, field_data)
del self.data
self.source_visible = False
if self.source:
# If `source` field value exist in the `html5_sources` field values,
# then delete `source` field value and use value from `html5_sources` field.
if self.source in self.html5_sources:
self.source = '' # Delete source field value.
self.download_video = True
else: # Otherwise, `source` field value will be used.
self.source_visible = True
if not self.fields['download_video'].is_set_on(self):
self.download_video = True
# Force download_video field to default value if it's not explicitly set for backward compatibility.
if not self.fields['download_video'].is_set_on(self):
self.download_video = self.download_video
self.force_save_fields(['download_video'])
# for backward compatibility.
# If course was existed and was not re-imported by the moment of adding `download_track` field,
# we should enable `download_track` if following is true:
if not self.fields['download_track'].is_set_on(self) and self.track:
self.download_track = True
def editor_saved(self, user, old_metadata, old_content):
"""
Used to update video values during `self`:save method from CMS.
old_metadata: dict, values of fields of `self` with scope=settings which were explicitly set by user.
old_content, same as `old_metadata` but for scope=content.
Due to nature of code flow in item.py::_save_item, before current function is called,
fields of `self` instance have been already updated, but not yet saved.
To obtain values, which were changed by user input,
one should compare own_metadata(self) and old_medatada.
Video player has two tabs, and due to nature of sync between tabs,
metadata from Basic tab is always sent when video player is edited and saved first time, for example:
{'youtube_id_1_0': u'3_yD_cEKoCk', 'display_name': u'Video', 'sub': u'3_yD_cEKoCk', 'html5_sources': []},
that's why these fields will always present in old_metadata after first save. This should be fixed.
At consequent save requests html5_sources are always sent too, disregard of their change by user.
That means that html5_sources are always in list of fields that were changed (`metadata` param in save_item).
This should be fixed too.
"""
metadata_was_changed_by_user = old_metadata != own_metadata(self)
if metadata_was_changed_by_user:
manage_video_subtitles_save(
self,
user,
old_metadata if old_metadata else None,
generate_translation=True
)
def save_with_metadata(self, user):
"""
Save module with updated metadata to database."
"""
self.save()
self.runtime.modulestore.update_item(self, user.id)
@property
def editable_metadata_fields(self):
editable_fields = super(VideoDescriptor, self).editable_metadata_fields
settings_service = self.runtime.service(self, 'settings')
if settings_service:
xb_settings = settings_service.get_settings_bucket(self)
if not xb_settings.get("licensing_enabled", False) and "license" in editable_fields:
del editable_fields["license"]
if self.source_visible:
editable_fields['source']['non_editable'] = True
else:
editable_fields.pop('source')
languages = [{'label': label, 'code': lang} for lang, label in settings.ALL_LANGUAGES if lang != u'en']
languages.sort(key=lambda l: l['label'])
editable_fields['transcripts']['languages'] = languages
editable_fields['transcripts']['type'] = 'VideoTranslations'
editable_fields['transcripts']['urlRoot'] = self.runtime.handler_url(self, 'studio_transcript', 'translation').rstrip('/?')
editable_fields['handout']['type'] = 'FileUploader'
return editable_fields
@classmethod
def from_xml(cls, xml_data, system, id_generator):
"""
Creates an instance of this descriptor from the supplied xml_data.
This may be overridden by subclasses
xml_data: A string of xml that will be translated into data and children for
this module
system: A DescriptorSystem for interacting with external resources
id_generator is used to generate course-specific urls and identifiers
"""
xml_object = etree.fromstring(xml_data)
url_name = xml_object.get('url_name', xml_object.get('slug'))
block_type = 'video'
definition_id = id_generator.create_definition(block_type, url_name)
usage_id = id_generator.create_usage(definition_id)
if is_pointer_tag(xml_object):
filepath = cls._format_filepath(xml_object.tag, name_to_pathname(url_name))
xml_object = cls.load_file(filepath, system.resources_fs, usage_id)
system.parse_asides(xml_object, definition_id, usage_id, id_generator)
field_data = cls._parse_video_xml(xml_object, id_generator)
kvs = InheritanceKeyValueStore(initial_values=field_data)
field_data = KvsFieldData(kvs)
video = system.construct_xblock_from_class(
cls,
# We're loading a descriptor, so student_id is meaningless
# We also don't have separate notions of definition and usage ids yet,
# so we use the location for both
ScopeIds(None, block_type, definition_id, usage_id),
field_data,
)
return video
def definition_to_xml(self, resource_fs):
"""
Returns an xml string representing this module.
"""
xml = etree.Element('video')
youtube_string = create_youtube_string(self)
# Mild workaround to ensure that tests pass -- if a field
# is set to its default value, we don't need to write it out.
if youtube_string and youtube_string != '1.00:3_yD_cEKoCk':
xml.set('youtube', unicode(youtube_string))
xml.set('url_name', self.url_name)
attrs = {
'display_name': self.display_name,
'show_captions': json.dumps(self.show_captions),
'start_time': self.start_time,
'end_time': self.end_time,
'sub': self.sub,
'download_track': json.dumps(self.download_track),
'download_video': json.dumps(self.download_video),
}
for key, value in attrs.items():
# Mild workaround to ensure that tests pass -- if a field
# is set to its default value, we don't write it out.
if value:
if key in self.fields and self.fields[key].is_set_on(self):
xml.set(key, unicode(value))
for source in self.html5_sources:
ele = etree.Element('source')
ele.set('src', source)
xml.append(ele)
if self.track:
ele = etree.Element('track')
ele.set('src', self.track)
xml.append(ele)
if self.handout:
ele = etree.Element('handout')
ele.set('src', self.handout)
xml.append(ele)
# sorting for easy testing of resulting xml
for transcript_language in sorted(self.transcripts.keys()):
ele = etree.Element('transcript')
ele.set('language', transcript_language)
ele.set('src', self.transcripts[transcript_language])
xml.append(ele)
if self.edx_video_id and edxval_api:
try:
xml.append(edxval_api.export_to_xml(self.edx_video_id))
except edxval_api.ValVideoNotFoundError:
pass
# handle license specifically
self.add_license_to_xml(xml)
return xml
def get_context(self):
"""
Extend context by data for transcript basic tab.
"""
_context = super(VideoDescriptor, self).get_context()
metadata_fields = copy.deepcopy(self.editable_metadata_fields)
display_name = metadata_fields['display_name']
video_url = metadata_fields['html5_sources']
youtube_id_1_0 = metadata_fields['youtube_id_1_0']
def get_youtube_link(video_id):
# First try a lookup in VAL. If we have a YouTube entry there, it overrides the
# one passed in.
if self.edx_video_id and edxval_api:
val_youtube_id = edxval_api.get_url_for_profile(self.edx_video_id, "youtube")
if val_youtube_id:
video_id = val_youtube_id
if video_id:
return 'http://youtu.be/{0}'.format(video_id)
else:
return ''
_ = self.runtime.service(self, "i18n").ugettext
video_url.update({
'help': _('The URL for your video. This can be a YouTube URL or a link to an .mp4, .ogg, or .webm video file hosted elsewhere on the Internet.'),
'display_name': _('Default Video URL'),
'field_name': 'video_url',
'type': 'VideoList',
'default_value': [get_youtube_link(youtube_id_1_0['default_value'])]
})
youtube_id_1_0_value = get_youtube_link(youtube_id_1_0['value'])
if youtube_id_1_0_value:
video_url['value'].insert(0, youtube_id_1_0_value)
metadata = {
'display_name': display_name,
'video_url': video_url
}
_context.update({'transcripts_basic_tab_metadata': metadata})
return _context
@classmethod
def _parse_youtube(cls, data):
"""
Parses a string of Youtube IDs such as "1.0:AXdE34_U,1.5:VO3SxfeD"
into a dictionary. Necessary for backwards compatibility with
XML-based courses.
"""
ret = {'0.75': '', '1.00': '', '1.25': '', '1.50': ''}
videos = data.split(',')
for video in videos:
pieces = video.split(':')
try:
speed = '%.2f' % float(pieces[0]) # normalize speed
# Handle the fact that youtube IDs got double-quoted for a period of time.
# Note: we pass in "VideoFields.youtube_id_1_0" so we deserialize as a String--
# it doesn't matter what the actual speed is for the purposes of deserializing.
youtube_id = deserialize_field(cls.youtube_id_1_0, pieces[1])
ret[speed] = youtube_id
except (ValueError, IndexError):
log.warning('Invalid YouTube ID: %s', video)
return ret
@classmethod
def _parse_video_xml(cls, xml, id_generator=None):
"""
Parse video fields out of xml_data. The fields are set if they are
present in the XML.
Arguments:
id_generator is used to generate course-specific urls and identifiers
"""
field_data = {}
# Convert between key types for certain attributes --
# necessary for backwards compatibility.
conversions = {
# example: 'start_time': cls._example_convert_start_time
}
# Convert between key names for certain attributes --
# necessary for backwards compatibility.
compat_keys = {
'from': 'start_time',
'to': 'end_time'
}
sources = xml.findall('source')
if sources:
field_data['html5_sources'] = [ele.get('src') for ele in sources]
track = xml.find('track')
if track is not None:
field_data['track'] = track.get('src')
handout = xml.find('handout')
if handout is not None:
field_data['handout'] = handout.get('src')
transcripts = xml.findall('transcript')
if transcripts:
field_data['transcripts'] = {tr.get('language'): tr.get('src') for tr in transcripts}
for attr, value in xml.items():
if attr in compat_keys:
attr = compat_keys[attr]
if attr in cls.metadata_to_strip + ('url_name', 'name'):
continue
if attr == 'youtube':
speeds = cls._parse_youtube(value)
for speed, youtube_id in speeds.items():
# should have made these youtube_id_1_00 for
# cleanliness, but hindsight doesn't need glasses
normalized_speed = speed[:-1] if speed.endswith('0') else speed
# If the user has specified html5 sources, make sure we don't use the default video
if youtube_id != '' or 'html5_sources' in field_data:
field_data['youtube_id_{0}'.format(normalized_speed.replace('.', '_'))] = youtube_id
elif attr in conversions:
field_data[attr] = conversions[attr](value)
elif attr not in cls.fields:
field_data.setdefault('xml_attributes', {})[attr] = value
else:
# We export values with json.dumps (well, except for Strings, but
# for about a month we did it for Strings also).
field_data[attr] = deserialize_field(cls.fields[attr], value)
# For backwards compatibility: Add `source` if XML doesn't have `download_video`
# attribute.
if 'download_video' not in field_data and sources:
field_data['source'] = field_data['html5_sources'][0]
# For backwards compatibility: if XML doesn't have `download_track` attribute,
# it means that it is an old format. So, if `track` has some value,
# `download_track` needs to have value `True`.
if 'download_track' not in field_data and track is not None:
field_data['download_track'] = True
video_asset_elem = xml.find('video_asset')
if (
edxval_api and
video_asset_elem is not None and
'edx_video_id' in field_data
):
# Allow ValCannotCreateError to escape
edxval_api.import_from_xml(
video_asset_elem,
field_data['edx_video_id'],
course_id=getattr(id_generator, 'target_course_id', None)
)
# load license if it exists
field_data = LicenseMixin.parse_license_from_xml(field_data, xml)
return field_data
def index_dictionary(self):
xblock_body = super(VideoDescriptor, self).index_dictionary()
video_body = {
"display_name": self.display_name,
}
def _update_transcript_for_index(language=None):
""" Find video transcript - if not found, don't update index """
try:
transcripts = self.get_transcripts_info()
transcript = self.get_transcript(
transcripts, transcript_format='txt', lang=language
)[0].replace("\n", " ")
transcript_index_name = "transcript_{}".format(language if language else self.transcript_language)
video_body.update({transcript_index_name: transcript})
except NotFoundError:
pass
if self.sub:
_update_transcript_for_index()
# Check to see if there are transcripts in other languages besides default transcript
if self.transcripts:
for language in self.transcripts.keys():
_update_transcript_for_index(language)
if "content" in xblock_body:
xblock_body["content"].update(video_body)
else:
xblock_body["content"] = video_body
xblock_body["content_type"] = "Video"
return xblock_body
@property
def request_cache(self):
"""
Returns the request_cache from the runtime.
"""
return self.runtime.service(self, "request_cache")
@memoize_in_request_cache('request_cache')
def get_cached_val_data_for_course(self, video_profile_names, course_id):
"""
Returns the VAL data for the requested video profiles for the given course.
"""
return edxval_api.get_video_info_for_course_and_profiles(unicode(course_id), video_profile_names)
def student_view_json(self, context):
"""
Returns a JSON representation of the student_view of this XModule.
The contract of the JSON content is between the caller and the particular XModule.
"""
# If the "only_on_web" field is set on this video, do not return the rest of the video's data
# in this json view, since this video is to be accessed only through its web view."
if self.only_on_web:
return {"only_on_web": True}
encoded_videos = {}
val_video_data = {}
# Check in VAL data first if edx_video_id exists
if self.edx_video_id:
video_profile_names = context.get("profiles", [])
# get and cache bulk VAL data for course
val_course_data = self.get_cached_val_data_for_course(video_profile_names, self.location.course_key)
val_video_data = val_course_data.get(self.edx_video_id, {})
# Get the encoded videos if data from VAL is found
if val_video_data:
encoded_videos = val_video_data.get('profiles', {})
# If information for this edx_video_id is not found in the bulk course data, make a
# separate request for this individual edx_video_id, unless cache misses are disabled.
# This is useful/required for videos that don't have a course designated, such as the introductory video
# that is shared across many courses. However, this results in a separate database request so watch
# out for any performance hit if many such videos exist in a course. Set the 'allow_cache_miss' parameter
# to False to disable this fall back.
elif context.get("allow_cache_miss", "True").lower() == "true":
try:
val_video_data = edxval_api.get_video_info(self.edx_video_id)
# Unfortunately, the VAL API is inconsistent in how it returns the encodings, so remap here.
for enc_vid in val_video_data.pop('encoded_videos'):
encoded_videos[enc_vid['profile']] = {key: enc_vid[key] for key in ["url", "file_size"]}
except edxval_api.ValVideoNotFoundError:
pass
# Fall back to other video URLs in the video module if not found in VAL
if not encoded_videos:
video_url = self.html5_sources[0] if self.html5_sources else self.source
if video_url:
encoded_videos["fallback"] = {
"url": video_url,
"file_size": 0, # File size is unknown for fallback URLs
}
transcripts_info = self.get_transcripts_info()
transcripts = {
lang: self.runtime.handler_url(self, 'transcript', 'download', query="lang=" + lang, thirdparty=True)
for lang in self.available_translations(transcripts_info, verify_assets=False)
}
return {
"only_on_web": self.only_on_web,
"duration": val_video_data.get('duration', None),
"transcripts": transcripts,
"encoded_videos": encoded_videos,
}
| agpl-3.0 |
rynomad/CCNx-Federated-Wiki-Prototype | server/express/node_modules/npm/node_modules/node-gyp/gyp/test/configurations/invalid/gyptest-configurations.py | 57 | 1029 | #!/usr/bin/env python
# Copyright (c) 2010 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""
Verifies build of an executable in three different configurations.
"""
import TestGyp
# Keys that do not belong inside a configuration dictionary.
invalid_configuration_keys = [
'actions',
'all_dependent_settings',
'configurations',
'dependencies',
'direct_dependent_settings',
'libraries',
'link_settings',
'sources',
'standalone_static_library',
'target_name',
'type',
]
test = TestGyp.TestGyp()
if test.format == 'scons':
test.skip_test('TODO: http://code.google.com/p/gyp/issues/detail?id=176\n')
for test_key in invalid_configuration_keys:
test.run_gyp('%s.gyp' % test_key, status=1, stderr=None)
expect = ['%s not allowed in the Debug configuration, found in target '
'%s.gyp:configurations#target' % (test_key, test_key)]
test.must_contain_all_lines(test.stderr(), expect)
test.pass_test()
| mit |
shifter/grr | gui/api_value_renderers.py | 4 | 12620 | #!/usr/bin/env python
"""Renderers that render RDFValues into JSON compatible data structures."""
import base64
import inspect
import numbers
import logging
from grr.lib import aff4
from grr.lib import rdfvalue
from grr.lib import registry
from grr.lib import type_info
from grr.lib import utils
from grr.lib.rdfvalues import flows as rdf_flows
from grr.lib.rdfvalues import protodict as rdf_protodict
from grr.lib.rdfvalues import structs as rdf_structs
class ApiValueRenderer(object):
"""Baseclass for API renderers that render RDFValues."""
__metaclass__ = registry.MetaclassRegistry
value_class = object
_type_list_cache = {}
_renderers_cache = {}
@classmethod
def GetRendererForValueOrClass(cls, value, limit_lists=-1):
"""Returns renderer corresponding to a given value and rendering args."""
if inspect.isclass(value):
value_cls = value
else:
value_cls = value.__class__
cache_key = "%s_%d" % (value_cls.__name__, limit_lists)
try:
renderer_cls = cls._renderers_cache[cache_key]
except KeyError:
candidates = []
for candidate in ApiValueRenderer.classes.values():
if candidate.value_class:
candidate_class = candidate.value_class
else:
continue
if inspect.isclass(value):
if aff4.issubclass(value_cls, candidate_class):
candidates.append((candidate, candidate_class))
else:
if isinstance(value, candidate_class):
candidates.append((candidate, candidate_class))
if not candidates:
raise RuntimeError("No renderer found for value %s." %
value.__class__.__name__)
candidates = sorted(candidates,
key=lambda candidate: len(candidate[1].mro()))
renderer_cls = candidates[-1][0]
cls._renderers_cache[cache_key] = renderer_cls
return renderer_cls(limit_lists=limit_lists)
def __init__(self, limit_lists=-1):
super(ApiValueRenderer, self).__init__()
self.limit_lists = limit_lists
def _PassThrough(self, value):
renderer = ApiValueRenderer.GetRendererForValueOrClass(
value, limit_lists=self.limit_lists)
return renderer.RenderValue(value)
def _IncludeTypeInfo(self, result, original_value):
# Converted value is placed in the resulting dictionary under the 'value'
# key.
if hasattr(original_value, "age"):
age = original_value.age.AsSecondsFromEpoch()
else:
age = 0
return dict(type=original_value.__class__.__name__,
value=result,
age=age)
def RenderValue(self, value):
"""Renders given value into plain old python objects."""
return self._IncludeTypeInfo(utils.SmartUnicode(value), value)
def RenderMetadata(self, value_cls):
"""Renders metadata of a given value class.
Args:
value_cls: Metadata of this class will be rendered. This class is
guaranteed to be (or to be a subclass of) value_class.
Returns:
Dictionary with class metadata.
"""
result = dict(name=value_cls.__name__,
mro=[klass.__name__ for klass in value_cls.__mro__],
doc=value_cls.__doc__ or "",
kind="primitive")
try:
default_value = RenderValue(value_cls())
result["default"] = default_value
except Exception as e: # pylint: disable=broad-except
logging.debug("Can't create default for primitive %s: %s",
value_cls.__name__, e)
return result
class ApiNumberRenderer(ApiValueRenderer):
"""Renderer for numbers."""
value_class = numbers.Number
def RenderValue(self, value):
# Always render ints as longs - so that there's no ambiguity in the UI
# renderers when type depends on the value.
if isinstance(value, int):
value = long(value)
return self._IncludeTypeInfo(value, value)
class ApiStringRenderer(ApiValueRenderer):
"""Renderer for strings."""
value_class = basestring
def RenderValue(self, value):
return self._IncludeTypeInfo(utils.SmartUnicode(value), value)
class ApiEnumRenderer(ApiValueRenderer):
"""Renderer for deprecated (old-style) enums."""
value_class = rdf_structs.Enum
def RenderValue(self, value):
return self._IncludeTypeInfo(value.name, value)
class ApiEnumNamedValueRenderer(ApiValueRenderer):
"""Renderer for new-style enums."""
value_class = rdf_structs.EnumNamedValue
def RenderValue(self, value):
return self._IncludeTypeInfo(value.name, value)
class ApiDictRenderer(ApiValueRenderer):
"""Renderer for dicts."""
value_class = dict
def RenderValue(self, value):
result = {}
for k, v in value.items():
result[k] = self._PassThrough(v)
return self._IncludeTypeInfo(result, value)
class ApiRDFDictRenderer(ApiDictRenderer):
"""Renderer for RDF Dict instances."""
value_class = rdf_protodict.Dict
class FetchMoreLink(rdfvalue.RDFValue):
"""Stub used to display 'More data available...' link."""
class ApiListRenderer(ApiValueRenderer):
"""Renderer for lists."""
value_class = list
def RenderValue(self, value):
if self.limit_lists == 0:
return "<lists are omitted>"
elif self.limit_lists == -1:
return [self._PassThrough(v) for v in value]
else:
result = [self._PassThrough(v) for v in list(value)[:self.limit_lists]]
if len(value) > self.limit_lists:
result.append(dict(age=0,
type=FetchMoreLink.__name__,
url="to/be/implemented"))
return result
class ApiTupleRenderer(ApiListRenderer):
"""Renderer for tuples."""
value_class = tuple
class ApiSetRenderer(ApiListRenderer):
"""Renderer for sets."""
value_class = set
class ApiRepeatedFieldHelperRenderer(ApiListRenderer):
"""Renderer for repeated fields helpers."""
value_class = rdf_structs.RepeatedFieldHelper
class ApiRDFValueArrayRenderer(ApiListRenderer):
"""Renderer for RDFValueArray."""
value_class = rdf_protodict.RDFValueArray
class ApiRDFBoolRenderer(ApiValueRenderer):
"""Renderer for RDFBool."""
value_class = rdfvalue.RDFBool
def RenderValue(self, value):
return self._IncludeTypeInfo(value != 0, value)
class ApiRDFBytesRenderer(ApiValueRenderer):
"""Renderer for RDFBytes."""
value_class = rdfvalue.RDFBytes
def RenderValue(self, value):
result = base64.b64encode(value.SerializeToString())
return self._IncludeTypeInfo(result, value)
class ApiRDFStringRenderer(ApiValueRenderer):
"""Renderer for RDFString."""
value_class = rdfvalue.RDFString
def RenderValue(self, value):
result = utils.SmartUnicode(value)
return self._IncludeTypeInfo(result, value)
class ApiRDFIntegerRenderer(ApiValueRenderer):
"""Renderer for RDFInteger."""
value_class = rdfvalue.RDFInteger
def RenderValue(self, value):
result = int(value)
return self._IncludeTypeInfo(result, value)
class ApiFlowStateRenderer(ApiValueRenderer):
"""Renderer for FlowState."""
value_class = rdf_flows.FlowState
def RenderValue(self, value):
return self._PassThrough(value.data)
class ApiDataBlobRenderer(ApiValueRenderer):
"""Renderer for DataBlob."""
value_class = rdf_protodict.DataBlob
def RenderValue(self, value):
return self._PassThrough(value.GetValue())
class ApiHashDigestRenderer(ApiValueRenderer):
"""Renderer for hash digests."""
value_class = rdfvalue.HashDigest
def RenderValue(self, value):
result = utils.SmartStr(value)
return self._IncludeTypeInfo(result, value)
class ApiEmbeddedRDFValueRenderer(ApiValueRenderer):
"""Renderer for EmbeddedRDFValue."""
value_class = rdf_protodict.EmbeddedRDFValue
def RenderValue(self, value):
return self._PassThrough(value.payload)
class ApiRDFProtoStructRenderer(ApiValueRenderer):
"""Renderer for RDFProtoStructs."""
value_class = rdf_structs.RDFProtoStruct
value_processors = []
metadata_processors = []
def RenderValue(self, value):
result = value.AsDict()
for k, v in value.AsDict().items():
result[k] = self._PassThrough(v)
for processor in self.value_processors:
result = processor(self, result, value)
result = self._IncludeTypeInfo(result, value)
return result
def RenderMetadata(self, value_cls):
fields = []
for field_desc in value_cls.type_infos:
repeated = isinstance(field_desc, type_info.ProtoList)
if hasattr(field_desc, "delegate"):
field_desc = field_desc.delegate
field = {
"name": field_desc.name,
"index": field_desc.field_number,
"repeated": repeated,
"dynamic": isinstance(field_desc, type_info.ProtoDynamicEmbedded)
}
field_type = field_desc.type
if field_type is not None:
field["type"] = field_type.__name__
if field_type.context_help_url:
field["context_help_url"] = field_type.context_help_url
if field_type == rdf_structs.EnumNamedValue:
allowed_values = []
for enum_label in sorted(field_desc.enum, key=field_desc.enum.get):
enum_value = field_desc.enum[enum_label]
labels = [rdf_structs.SemanticDescriptor.Labels.reverse_enum[x]
for x in enum_value.labels or []]
allowed_values.append(dict(name=enum_label,
value=int(enum_value),
labels=labels,
doc=enum_value.description))
field["allowed_values"] = allowed_values
field_default = None
if (field_desc.default is not None
and not aff4.issubclass(field_type, rdf_structs.RDFStruct)
and hasattr(field_desc, "GetDefault")):
field_default = field_desc.GetDefault()
field["default"] = RenderValue(field_default)
if field_desc.description:
field["doc"] = field_desc.description
if field_desc.friendly_name:
field["friendly_name"] = field_desc.friendly_name
if field_desc.labels:
field["labels"] = [rdf_structs.SemanticDescriptor.Labels.reverse_enum[x]
for x in field_desc.labels]
fields.append(field)
for processor in self.metadata_processors:
fields = processor(self, fields)
result = dict(name=value_cls.__name__,
mro=[klass.__name__ for klass in value_cls.__mro__],
doc=value_cls.__doc__ or "",
fields=fields,
kind="struct")
if getattr(value_cls, "union_field", None):
result["union_field"] = value_cls.union_field
struct_default = None
try:
struct_default = value_cls()
except Exception as e: # pylint: disable=broad-except
# TODO(user): Some RDFStruct classes can't be constructed using
# default constructor (without arguments). Fix the code so that
# we can either construct all the RDFStruct classes with default
# constructors or know exactly which classes can't be constructed
# with default constructors.
logging.debug("Can't create default for struct %s: %s",
field_type.__name__, e)
if struct_default is not None:
result["default"] = RenderValue(struct_default)
return result
class ApiGrrMessageRenderer(ApiRDFProtoStructRenderer):
"""Renderer for GrrMessage objects."""
value_class = rdf_flows.GrrMessage
def RenderPayload(self, result, value):
"""Renders GrrMessage payload and renames args_rdf_name field."""
if "args_rdf_name" in result:
result["payload_type"] = result["args_rdf_name"]
del result["args_rdf_name"]
if "args" in result:
result["payload"] = self._PassThrough(value.payload)
del result["args"]
return result
def RenderPayloadMetadata(self, fields):
"""Payload-aware metadata processor."""
for f in fields:
if f["name"] == "args_rdf_name":
f["name"] = "payload_type"
if f["name"] == "args":
f["name"] = "payload"
return fields
value_processors = [RenderPayload]
metadata_processors = [RenderPayloadMetadata]
def RenderValue(value, limit_lists=-1):
"""Render given RDFValue as plain old python objects."""
if value is None:
return None
renderer = ApiValueRenderer.GetRendererForValueOrClass(
value, limit_lists=limit_lists)
return renderer.RenderValue(value)
def RenderTypeMetadata(value_cls):
renderer = ApiValueRenderer.GetRendererForValueOrClass(value_cls)
return renderer.RenderMetadata(value_cls)
| apache-2.0 |
xozzo/pyfootball | setup.py | 1 | 1257 | from setuptools import setup, find_packages
import os
if os.path.exists('README.rst'):
readme_path = 'README.rst'
else:
readme_path = 'README.md'
setup(
name='pyfootball',
version='1.0.1',
description='A client library for the football-data.org REST API',
long_description=open(readme_path).read(),
url='https://github.com/xozzo/pyfootball',
author='Timothy Ng',
author_email='[email protected]',
license='MIT',
# See https://pypi.python.org/pypi?%3Aaction=list_classifiers
classifiers=[
'Development Status :: 5 - Production/Stable',
'Intended Audience :: Developers',
'Topic :: Software Development :: Libraries',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python :: 3.5'
],
keywords='api wrapper client library football data',
packages=find_packages(exclude=['contrib', 'docs', 'tests', 'venv']),
install_requires=['requests'],
test_suite='tests',
# List additional groups of dependencies here (e.g. development
# dependencies). You can install these using the following syntax,
# for example:
# $ pip install -e .[dev]
extras_require={
'dev': ['sphinx', 'sphinx-autobuild']
}
)
| mit |
natefoo/ansible-modules-extras | network/a10/a10_service_group.py | 117 | 13447 | #!/usr/bin/python
# -*- coding: utf-8 -*-
"""
Ansible module to manage A10 Networks slb service-group objects
(c) 2014, Mischa Peters <[email protected]>
This file is part of Ansible
Ansible is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
Ansible is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with Ansible. If not, see <http://www.gnu.org/licenses/>.
"""
DOCUMENTATION = '''
---
module: a10_service_group
version_added: 1.8
short_description: Manage A10 Networks devices' service groups
description:
- Manage slb service-group objects on A10 Networks devices via aXAPI
author: "Mischa Peters (@mischapeters)"
notes:
- Requires A10 Networks aXAPI 2.1
- When a server doesn't exist and is added to the service-group the server will be created
options:
host:
description:
- hostname or ip of your A10 Networks device
required: true
default: null
aliases: []
choices: []
username:
description:
- admin account of your A10 Networks device
required: true
default: null
aliases: ['user', 'admin']
choices: []
password:
description:
- admin password of your A10 Networks device
required: true
default: null
aliases: ['pass', 'pwd']
choices: []
service_group:
description:
- slb service-group name
required: true
default: null
aliases: ['service', 'pool', 'group']
choices: []
service_group_protocol:
description:
- slb service-group protocol
required: false
default: tcp
aliases: ['proto', 'protocol']
choices: ['tcp', 'udp']
service_group_method:
description:
- slb service-group loadbalancing method
required: false
default: round-robin
aliases: ['method']
choices: ['round-robin', 'weighted-rr', 'least-connection', 'weighted-least-connection', 'service-least-connection', 'service-weighted-least-connection', 'fastest-response', 'least-request', 'round-robin-strict', 'src-ip-only-hash', 'src-ip-hash']
servers:
description:
- A list of servers to add to the service group. Each list item should be a
dictionary which specifies the C(server:) and C(port:), but can also optionally
specify the C(status:). See the examples below for details.
required: false
default: null
aliases: []
choices: []
write_config:
description:
- If C(yes), any changes will cause a write of the running configuration
to non-volatile memory. This will save I(all) configuration changes,
including those that may have been made manually or through other modules,
so care should be taken when specifying C(yes).
required: false
default: "no"
choices: ["yes", "no"]
validate_certs:
description:
- If C(no), SSL certificates will not be validated. This should only be used
on personally controlled devices using self-signed certificates.
required: false
default: 'yes'
choices: ['yes', 'no']
'''
EXAMPLES = '''
# Create a new service-group
- a10_service_group:
host: a10.mydomain.com
username: myadmin
password: mypassword
service_group: sg-80-tcp
servers:
- server: foo1.mydomain.com
port: 8080
- server: foo2.mydomain.com
port: 8080
- server: foo3.mydomain.com
port: 8080
- server: foo4.mydomain.com
port: 8080
status: disabled
'''
VALID_SERVICE_GROUP_FIELDS = ['name', 'protocol', 'lb_method']
VALID_SERVER_FIELDS = ['server', 'port', 'status']
def validate_servers(module, servers):
for item in servers:
for key in item:
if key not in VALID_SERVER_FIELDS:
module.fail_json(msg="invalid server field (%s), must be one of: %s" % (key, ','.join(VALID_SERVER_FIELDS)))
# validate the server name is present
if 'server' not in item:
module.fail_json(msg="server definitions must define the server field")
# validate the port number is present and an integer
if 'port' in item:
try:
item['port'] = int(item['port'])
except:
module.fail_json(msg="server port definitions must be integers")
else:
module.fail_json(msg="server definitions must define the port field")
# convert the status to the internal API integer value
if 'status' in item:
item['status'] = axapi_enabled_disabled(item['status'])
else:
item['status'] = 1
def main():
argument_spec = a10_argument_spec()
argument_spec.update(url_argument_spec())
argument_spec.update(
dict(
state=dict(type='str', default='present', choices=['present', 'absent']),
service_group=dict(type='str', aliases=['service', 'pool', 'group'], required=True),
service_group_protocol=dict(type='str', default='tcp', aliases=['proto', 'protocol'], choices=['tcp', 'udp']),
service_group_method=dict(type='str', default='round-robin',
aliases=['method'],
choices=['round-robin',
'weighted-rr',
'least-connection',
'weighted-least-connection',
'service-least-connection',
'service-weighted-least-connection',
'fastest-response',
'least-request',
'round-robin-strict',
'src-ip-only-hash',
'src-ip-hash']),
servers=dict(type='list', aliases=['server', 'member'], default=[]),
)
)
module = AnsibleModule(
argument_spec=argument_spec,
supports_check_mode=False
)
host = module.params['host']
username = module.params['username']
password = module.params['password']
state = module.params['state']
write_config = module.params['write_config']
slb_service_group = module.params['service_group']
slb_service_group_proto = module.params['service_group_protocol']
slb_service_group_method = module.params['service_group_method']
slb_servers = module.params['servers']
if slb_service_group is None:
module.fail_json(msg='service_group is required')
axapi_base_url = 'https://' + host + '/services/rest/V2.1/?format=json'
load_balancing_methods = {'round-robin': 0,
'weighted-rr': 1,
'least-connection': 2,
'weighted-least-connection': 3,
'service-least-connection': 4,
'service-weighted-least-connection': 5,
'fastest-response': 6,
'least-request': 7,
'round-robin-strict': 8,
'src-ip-only-hash': 14,
'src-ip-hash': 15}
if not slb_service_group_proto or slb_service_group_proto.lower() == 'tcp':
protocol = 2
else:
protocol = 3
# validate the server data list structure
validate_servers(module, slb_servers)
json_post = {
'service_group': {
'name': slb_service_group,
'protocol': protocol,
'lb_method': load_balancing_methods[slb_service_group_method],
}
}
# first we authenticate to get a session id
session_url = axapi_authenticate(module, axapi_base_url, username, password)
# then we check to see if the specified group exists
slb_result = axapi_call(module, session_url + '&method=slb.service_group.search', json.dumps({'name': slb_service_group}))
slb_service_group_exist = not axapi_failure(slb_result)
changed = False
if state == 'present':
# before creating/updating we need to validate that servers
# defined in the servers list exist to prevent errors
checked_servers = []
for server in slb_servers:
result = axapi_call(module, session_url + '&method=slb.server.search', json.dumps({'name': server['server']}))
if axapi_failure(result):
module.fail_json(msg="the server %s specified in the servers list does not exist" % server['server'])
checked_servers.append(server['server'])
if not slb_service_group_exist:
result = axapi_call(module, session_url + '&method=slb.service_group.create', json.dumps(json_post))
if axapi_failure(result):
module.fail_json(msg=result['response']['err']['msg'])
changed = True
else:
# check to see if the service group definition without the
# server members is different, and update that individually
# if it needs it
do_update = False
for field in VALID_SERVICE_GROUP_FIELDS:
if json_post['service_group'][field] != slb_result['service_group'][field]:
do_update = True
break
if do_update:
result = axapi_call(module, session_url + '&method=slb.service_group.update', json.dumps(json_post))
if axapi_failure(result):
module.fail_json(msg=result['response']['err']['msg'])
changed = True
# next we pull the defined list of servers out of the returned
# results to make it a bit easier to iterate over
defined_servers = slb_result.get('service_group', {}).get('member_list', [])
# next we add/update new member servers from the user-specified
# list if they're different or not on the target device
for server in slb_servers:
found = False
different = False
for def_server in defined_servers:
if server['server'] == def_server['server']:
found = True
for valid_field in VALID_SERVER_FIELDS:
if server[valid_field] != def_server[valid_field]:
different = True
break
if found or different:
break
# add or update as required
server_data = {
"name": slb_service_group,
"member": server,
}
if not found:
result = axapi_call(module, session_url + '&method=slb.service_group.member.create', json.dumps(server_data))
changed = True
elif different:
result = axapi_call(module, session_url + '&method=slb.service_group.member.update', json.dumps(server_data))
changed = True
# finally, remove any servers that are on the target
# device but were not specified in the list given
for server in defined_servers:
found = False
for slb_server in slb_servers:
if server['server'] == slb_server['server']:
found = True
break
# remove if not found
server_data = {
"name": slb_service_group,
"member": server,
}
if not found:
result = axapi_call(module, session_url + '&method=slb.service_group.member.delete', json.dumps(server_data))
changed = True
# if we changed things, get the full info regarding
# the service group for the return data below
if changed:
result = axapi_call(module, session_url + '&method=slb.service_group.search', json.dumps({'name': slb_service_group}))
else:
result = slb_result
elif state == 'absent':
if slb_service_group_exist:
result = axapi_call(module, session_url + '&method=slb.service_group.delete', json.dumps({'name': slb_service_group}))
changed = True
else:
result = dict(msg="the service group was not present")
# if the config has changed, save the config unless otherwise requested
if changed and write_config:
write_result = axapi_call(module, session_url + '&method=system.action.write_memory')
if axapi_failure(write_result):
module.fail_json(msg="failed to save the configuration: %s" % write_result['response']['err']['msg'])
# log out of the session nicely and exit
axapi_call(module, session_url + '&method=session.close')
module.exit_json(changed=changed, content=result)
# standard ansible module imports
from ansible.module_utils.basic import *
from ansible.module_utils.urls import *
from ansible.module_utils.a10 import *
main()
| gpl-3.0 |
feroda/django | tests/template_tests/templatetags/custom.py | 42 | 4823 | import operator
import warnings
from django import template
from django.template.defaultfilters import stringfilter
from django.utils import six
register = template.Library()
@register.filter
@stringfilter
def trim(value, num):
return value[:num]
@register.filter
def noop(value, param=None):
"""A noop filter that always return its first argument and does nothing with
its second (optional) one.
Useful for testing out whitespace in filter arguments (see #19882)."""
return value
@register.simple_tag(takes_context=True)
def context_stack_length(context):
return len(context.dicts)
@register.simple_tag
def no_params():
"""Expected no_params __doc__"""
return "no_params - Expected result"
no_params.anything = "Expected no_params __dict__"
@register.simple_tag
def one_param(arg):
"""Expected one_param __doc__"""
return "one_param - Expected result: %s" % arg
one_param.anything = "Expected one_param __dict__"
@register.simple_tag(takes_context=False)
def explicit_no_context(arg):
"""Expected explicit_no_context __doc__"""
return "explicit_no_context - Expected result: %s" % arg
explicit_no_context.anything = "Expected explicit_no_context __dict__"
@register.simple_tag(takes_context=True)
def no_params_with_context(context):
"""Expected no_params_with_context __doc__"""
return "no_params_with_context - Expected result (context value: %s)" % context['value']
no_params_with_context.anything = "Expected no_params_with_context __dict__"
@register.simple_tag(takes_context=True)
def params_and_context(context, arg):
"""Expected params_and_context __doc__"""
return "params_and_context - Expected result (context value: %s): %s" % (context['value'], arg)
params_and_context.anything = "Expected params_and_context __dict__"
@register.simple_tag
def simple_two_params(one, two):
"""Expected simple_two_params __doc__"""
return "simple_two_params - Expected result: %s, %s" % (one, two)
simple_two_params.anything = "Expected simple_two_params __dict__"
@register.simple_tag
def simple_one_default(one, two='hi'):
"""Expected simple_one_default __doc__"""
return "simple_one_default - Expected result: %s, %s" % (one, two)
simple_one_default.anything = "Expected simple_one_default __dict__"
@register.simple_tag
def simple_unlimited_args(one, two='hi', *args):
"""Expected simple_unlimited_args __doc__"""
return "simple_unlimited_args - Expected result: %s" % (', '.join(six.text_type(arg) for arg in [one, two] + list(args)))
simple_unlimited_args.anything = "Expected simple_unlimited_args __dict__"
@register.simple_tag
def simple_only_unlimited_args(*args):
"""Expected simple_only_unlimited_args __doc__"""
return "simple_only_unlimited_args - Expected result: %s" % ', '.join(six.text_type(arg) for arg in args)
simple_only_unlimited_args.anything = "Expected simple_only_unlimited_args __dict__"
@register.simple_tag
def simple_unlimited_args_kwargs(one, two='hi', *args, **kwargs):
"""Expected simple_unlimited_args_kwargs __doc__"""
# Sort the dictionary by key to guarantee the order for testing.
sorted_kwarg = sorted(six.iteritems(kwargs), key=operator.itemgetter(0))
return "simple_unlimited_args_kwargs - Expected result: %s / %s" % (
', '.join(six.text_type(arg) for arg in [one, two] + list(args)),
', '.join('%s=%s' % (k, v) for (k, v) in sorted_kwarg)
)
simple_unlimited_args_kwargs.anything = "Expected simple_unlimited_args_kwargs __dict__"
@register.simple_tag(takes_context=True)
def simple_tag_without_context_parameter(arg):
"""Expected simple_tag_without_context_parameter __doc__"""
return "Expected result"
simple_tag_without_context_parameter.anything = "Expected simple_tag_without_context_parameter __dict__"
@register.simple_tag(takes_context=True)
def current_app(context):
return "%s" % context.current_app
@register.simple_tag(takes_context=True)
def use_l10n(context):
return "%s" % context.use_l10n
@register.simple_tag(name='minustwo')
def minustwo_overridden_name(value):
return value - 2
register.simple_tag(lambda x: x - 1, name='minusone')
with warnings.catch_warnings():
warnings.simplefilter('ignore')
@register.assignment_tag
def assignment_no_params():
"""Expected assignment_no_params __doc__"""
return "assignment_no_params - Expected result"
assignment_no_params.anything = "Expected assignment_no_params __dict__"
@register.assignment_tag(takes_context=True)
def assignment_tag_without_context_parameter(arg):
"""Expected assignment_tag_without_context_parameter __doc__"""
return "Expected result"
assignment_tag_without_context_parameter.anything = "Expected assignment_tag_without_context_parameter __dict__"
| bsd-3-clause |
frankk00/realtor | oauth_provider/oauth.py | 1 | 23473 | """
The MIT License
Copyright (c) 2007 Leah Culver
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
"""
import logging
logger = logging.getLogger()
logger.setLevel(logging.DEBUG)
import cgi
import urllib
import time
import random
import urlparse
import hmac
import binascii
VERSION = '1.0' # Hi Blaine!
HTTP_METHOD = 'GET'
SIGNATURE_METHOD = 'PLAINTEXT'
class OAuthError(RuntimeError):
"""Generic exception class."""
def __init__(self, message='OAuth error occured.'):
self.message = message
def build_authenticate_header(realm=''):
"""Optional WWW-Authenticate header (401 error)"""
return {'WWW-Authenticate': 'OAuth realm="%s"' % realm}
def escape(s):
"""Escape a URL including any /."""
return urllib.quote(s, safe='~')
def _utf8_str(s):
"""Convert unicode to utf-8."""
if isinstance(s, unicode):
return s.encode("utf-8")
else:
return str(s)
def generate_timestamp():
"""Get seconds since epoch (UTC)."""
return int(time.time())
def generate_nonce(length=8):
"""Generate pseudorandom number."""
return ''.join([str(random.randint(0, 9)) for i in range(length)])
def generate_verifier(length=8):
"""Generate pseudorandom number."""
return ''.join([str(random.randint(0, 9)) for i in range(length)])
class OAuthConsumer(object):
"""Consumer of OAuth authentication.
OAuthConsumer is a data type that represents the identity of the Consumer
via its shared secret with the Service Provider.
"""
key = None
secret = None
def __init__(self, key, secret):
self.key = key
self.secret = secret
class OAuthToken(object):
"""OAuthToken is a data type that represents an End User via either an access
or request token.
key -- the token
secret -- the token secret
"""
key = None
secret = None
callback = None
callback_confirmed = None
verifier = None
def __init__(self, key, secret):
self.key = key
self.secret = secret
def set_callback(self, callback):
self.callback = callback
self.callback_confirmed = 'true'
def set_verifier(self, verifier=None):
if verifier is not None:
self.verifier = verifier
else:
self.verifier = generate_verifier()
def get_callback_url(self):
if self.callback and self.verifier:
# Append the oauth_verifier.
parts = urlparse.urlparse(self.callback)
scheme, netloc, path, params, query, fragment = parts[:6]
if query:
query = '%s&oauth_verifier=%s' % (query, self.verifier)
else:
query = 'oauth_verifier=%s' % self.verifier
return urlparse.urlunparse((scheme, netloc, path, params,
query, fragment))
return self.callback
def to_string(self):
data = {
'oauth_token': self.key,
'oauth_token_secret': self.secret,
}
if self.callback_confirmed is not None:
data['oauth_callback_confirmed'] = self.callback_confirmed
return urllib.urlencode(data)
def from_string(s):
""" Returns a token from something like:
oauth_token_secret=xxx&oauth_token=xxx
"""
params = cgi.parse_qs(s, keep_blank_values=False)
key = params['oauth_token'][0]
secret = params['oauth_token_secret'][0]
token = OAuthToken(key, secret)
try:
token.callback_confirmed = params['oauth_callback_confirmed'][0]
except KeyError:
pass # 1.0, no callback confirmed.
return token
from_string = staticmethod(from_string)
def __str__(self):
return self.to_string()
class OAuthRequest(object):
"""OAuthRequest represents the request and can be serialized.
OAuth parameters:
- oauth_consumer_key
- oauth_token
- oauth_signature_method
- oauth_signature
- oauth_timestamp
- oauth_nonce
- oauth_version
- oauth_verifier
... any additional parameters, as defined by the Service Provider.
"""
parameters = None # OAuth parameters.
http_method = HTTP_METHOD
http_url = None
version = VERSION
def __init__(self, http_method=HTTP_METHOD, http_url=None, parameters=None):
self.http_method = http_method
self.http_url = http_url
self.parameters = parameters or {}
def set_parameter(self, parameter, value):
self.parameters[parameter] = value
def get_parameter(self, parameter):
try:
return self.parameters[parameter]
except:
raise OAuthError('Parameter not found: %s' % parameter)
def _get_timestamp_nonce(self):
return self.get_parameter('oauth_timestamp'), self.get_parameter(
'oauth_nonce')
def get_nonoauth_parameters(self):
"""Get any non-OAuth parameters."""
parameters = {}
for k, v in self.parameters.iteritems():
# Ignore oauth parameters.
if k.find('oauth_') < 0:
parameters[k] = v
return parameters
def to_header(self, realm=''):
"""Serialize as a header for an HTTPAuth request."""
auth_header = 'OAuth realm="%s"' % realm
# Add the oauth parameters.
if self.parameters:
for k, v in self.parameters.iteritems():
if k[:6] == 'oauth_':
auth_header += ', %s="%s"' % (k, escape(str(v)))
return {'Authorization': auth_header}
def to_postdata(self):
"""Serialize as post data for a POST request."""
return '&'.join(['%s=%s' % (escape(str(k)), escape(str(v))) \
for k, v in self.parameters.iteritems()])
def to_url(self):
"""Serialize as a URL for a GET request."""
return '%s?%s' % (self.get_normalized_http_url(), self.to_postdata())
def get_normalized_parameters(self):
"""Return a string that contains the parameters that must be signed."""
params = self.parameters
try:
# Exclude the signature if it exists.
del params['oauth_signature']
except:
pass
# Escape key values before sorting.
key_values = [(escape(_utf8_str(k)), escape(_utf8_str(v))) \
for k,v in params.items()]
# Sort lexicographically, first after key, then after value.
key_values.sort()
# Combine key value pairs into a string.
return '&'.join(['%s=%s' % (k, v) for k, v in key_values])
def get_normalized_http_method(self):
"""Uppercases the http method."""
return self.http_method.upper()
def get_normalized_http_url(self):
"""Parses the URL and rebuilds it to be scheme://host/path."""
parts = urlparse.urlparse(self.http_url)
scheme, netloc, path = parts[:3]
# Exclude default port numbers.
if scheme == 'http' and netloc[-3:] == ':80':
netloc = netloc[:-3]
elif scheme == 'https' and netloc[-4:] == ':443':
netloc = netloc[:-4]
return '%s://%s%s' % (scheme, netloc, path)
def sign_request(self, signature_method, consumer, token):
"""Set the signature parameter to the result of build_signature."""
# Set the signature method.
self.set_parameter('oauth_signature_method',
signature_method.get_name())
# Set the signature.
self.set_parameter('oauth_signature',
self.build_signature(signature_method, consumer, token))
def build_signature(self, signature_method, consumer, token):
"""Calls the build signature method within the signature method."""
return signature_method.build_signature(self, consumer, token)
def from_request(http_method, http_url, headers=None, parameters=None,
query_string=None):
"""Combines multiple parameter sources."""
if parameters is None:
parameters = {}
# Headers
if headers and 'Authorization' in headers:
auth_header = headers['Authorization']
# Check that the authorization header is OAuth.
if auth_header[:6] == 'OAuth ':
auth_header = auth_header[6:]
try:
# Get the parameters from the header.
header_params = OAuthRequest._split_header(auth_header)
parameters.update(header_params)
except:
raise OAuthError('Unable to parse OAuth parameters from '
'Authorization header.')
# GET or POST query string.
if query_string:
query_params = OAuthRequest._split_url_string(query_string)
parameters.update(query_params)
# URL parameters.
param_str = urlparse.urlparse(http_url)[4] # query
url_params = OAuthRequest._split_url_string(param_str)
parameters.update(url_params)
if parameters:
return OAuthRequest(http_method, http_url, parameters)
return None
from_request = staticmethod(from_request)
def from_consumer_and_token(oauth_consumer, token=None,
callback=None, verifier=None, http_method=HTTP_METHOD,
http_url=None, parameters=None):
if not parameters:
parameters = {}
defaults = {
'oauth_consumer_key': oauth_consumer.key,
'oauth_timestamp': generate_timestamp(),
'oauth_nonce': generate_nonce(),
'oauth_version': OAuthRequest.version,
}
defaults.update(parameters)
parameters = defaults
if token:
parameters['oauth_token'] = token.key
if token.callback:
parameters['oauth_callback'] = token.callback
# 1.0a support for verifier.
if verifier:
parameters['oauth_verifier'] = verifier
elif callback:
# 1.0a support for callback in the request token request.
parameters['oauth_callback'] = callback
return OAuthRequest(http_method, http_url, parameters)
from_consumer_and_token = staticmethod(from_consumer_and_token)
def from_token_and_callback(token, callback=None, http_method=HTTP_METHOD,
http_url=None, parameters=None):
if not parameters:
parameters = {}
parameters['oauth_token'] = token.key
if callback:
parameters['oauth_callback'] = callback
return OAuthRequest(http_method, http_url, parameters)
from_token_and_callback = staticmethod(from_token_and_callback)
def _split_header(header):
"""Turn Authorization: header into parameters."""
params = {}
parts = header.split(',')
for param in parts:
# Ignore realm parameter.
if param.find('realm') > -1:
continue
# Remove whitespace.
param = param.strip()
# Split key-value.
param_parts = param.split('=', 1)
# Remove quotes and unescape the value.
params[param_parts[0]] = urllib.unquote(param_parts[1].strip('\"'))
return params
_split_header = staticmethod(_split_header)
def _split_url_string(param_str):
"""Turn URL string into parameters."""
parameters = cgi.parse_qs(param_str, keep_blank_values=False)
for k, v in parameters.iteritems():
parameters[k] = urllib.unquote(v[0])
return parameters
_split_url_string = staticmethod(_split_url_string)
class OAuthServer(object):
"""A worker to check the validity of a request against a data store."""
timestamp_threshold = 300 # In seconds, five minutes.
version = VERSION
signature_methods = None
data_store = None
def __init__(self, data_store=None, signature_methods=None):
self.data_store = data_store
self.signature_methods = signature_methods or {}
def set_data_store(self, data_store):
self.data_store = data_store
def get_data_store(self):
return self.data_store
def add_signature_method(self, signature_method):
self.signature_methods[signature_method.get_name()] = signature_method
return self.signature_methods
def fetch_request_token(self, oauth_request):
"""Processes a request_token request and returns the
request token on success.
"""
try:
# Get the request token for authorization.
token = self._get_token(oauth_request, 'request')
except OAuthError:
# No token required for the initial token request.
version = self._get_version(oauth_request)
consumer = self._get_consumer(oauth_request)
try:
callback = self.get_callback(oauth_request)
except OAuthError:
callback = None # 1.0, no callback specified.
self._check_signature(oauth_request, consumer, None)
# Fetch a new token.
token = self.data_store.fetch_request_token(consumer, callback)
return token
def fetch_access_token(self, oauth_request):
logger.warning("!!! IN OAuthServer.fetch_access_token OAuth Params: %s"%oauth_request.parameters)
"""Processes an access_token request and returns the
access token on success.
"""
version = self._get_version(oauth_request)
consumer = self._get_consumer(oauth_request)
try:
verifier = self._get_verifier(oauth_request)
except OAuthError:
verifier = None
# Get the request token.
token = self._get_token(oauth_request, 'request')
self._check_signature(oauth_request, consumer, token)
new_token = self.data_store.fetch_access_token(consumer, token, verifier)
return new_token
def verify_request(self, oauth_request):
"""Verifies an api call and checks all the parameters."""
# -> consumer and token
version = self._get_version(oauth_request)
consumer = self._get_consumer(oauth_request)
# Get the access token.
token = self._get_token(oauth_request, 'access')
self._check_signature(oauth_request, consumer, token)
parameters = oauth_request.get_nonoauth_parameters()
return consumer, token, parameters
def authorize_token(self, token, user):
"""Authorize a request token."""
return self.data_store.authorize_request_token(token, user)
def get_callback(self, oauth_request):
"""Get the callback URL."""
return oauth_request.get_parameter('oauth_callback')
def build_authenticate_header(self, realm=''):
"""Optional support for the authenticate header."""
return {'WWW-Authenticate': 'OAuth realm="%s"' % realm}
def _get_version(self, oauth_request):
"""Verify the correct version request for this server."""
try:
version = oauth_request.get_parameter('oauth_version')
except:
version = VERSION
if version and version != self.version:
raise OAuthError('OAuth version %s not supported.' % str(version))
return version
def _get_signature_method(self, oauth_request):
"""Figure out the signature with some defaults."""
try:
signature_method = oauth_request.get_parameter(
'oauth_signature_method')
except:
signature_method = SIGNATURE_METHOD
try:
# Get the signature method object.
signature_method = self.signature_methods[signature_method]
except:
signature_method_names = ', '.join(self.signature_methods.keys())
raise OAuthError('Signature method %s not supported try one of the '
'following: %s' % (signature_method, signature_method_names))
return signature_method
def _get_consumer(self, oauth_request):
consumer_key = oauth_request.get_parameter('oauth_consumer_key')
consumer = self.data_store.lookup_consumer(consumer_key)
if not consumer:
raise OAuthError('Invalid consumer.')
return consumer
def _get_token(self, oauth_request, token_type='access'):
"""Try to find the token for the provided request token key."""
token_field = oauth_request.get_parameter('oauth_token')
token = self.data_store.lookup_token(token_type, token_field)
if not token:
raise OAuthError('Invalid %s token: %s' % (token_type, token_field))
return token
def _get_verifier(self, oauth_request):
return oauth_request.get_parameter('oauth_verifier')
def _check_signature(self, oauth_request, consumer, token):
timestamp, nonce = oauth_request._get_timestamp_nonce()
self._check_timestamp(timestamp)
self._check_nonce(consumer, token, nonce)
signature_method = self._get_signature_method(oauth_request)
try:
signature = oauth_request.get_parameter('oauth_signature')
except:
raise OAuthError('Missing signature.')
# Validate the signature.
valid_sig = signature_method.check_signature(oauth_request, consumer,
token, signature)
if not valid_sig:
key, base = signature_method.build_signature_base_string(
oauth_request, consumer, token)
logging.error("key: %s",key)
logging.error("base: %s",base)
raise OAuthError('Invalid signature. Expected signature base '
'string: %s' % base)
built = signature_method.build_signature(oauth_request, consumer, token)
def _check_timestamp(self, timestamp):
"""Verify that timestamp is recentish."""
timestamp = int(timestamp)
now = int(time.time())
lapsed = abs(now - timestamp)
if lapsed > self.timestamp_threshold:
raise OAuthError('Expired timestamp: given %d and now %s has a '
'greater difference than threshold %d' %
(timestamp, now, self.timestamp_threshold))
def _check_nonce(self, consumer, token, nonce):
"""Verify that the nonce is uniqueish."""
nonce = self.data_store.lookup_nonce(consumer, token, nonce)
if nonce:
raise OAuthError('Nonce already used: %s' % str(nonce))
class OAuthClient(object):
"""OAuthClient is a worker to attempt to execute a request."""
consumer = None
token = None
def __init__(self, oauth_consumer, oauth_token):
self.consumer = oauth_consumer
self.token = oauth_token
def get_consumer(self):
return self.consumer
def get_token(self):
return self.token
def fetch_request_token(self, oauth_request):
"""-> OAuthToken."""
raise NotImplementedError
def fetch_access_token(self, oauth_request):
"""-> OAuthToken."""
raise NotImplementedError
def access_resource(self, oauth_request):
"""-> Some protected resource."""
raise NotImplementedError
class OAuthDataStore(object):
"""A database abstraction used to lookup consumers and tokens."""
def lookup_consumer(self, key):
"""-> OAuthConsumer."""
raise NotImplementedError
def lookup_token(self, oauth_consumer, token_type, token_token):
"""-> OAuthToken."""
raise NotImplementedError
def lookup_nonce(self, oauth_consumer, oauth_token, nonce):
"""-> OAuthToken."""
raise NotImplementedError
def fetch_request_token(self, oauth_consumer, oauth_callback):
"""-> OAuthToken."""
raise NotImplementedError
def fetch_access_token(self, oauth_consumer, oauth_token, oauth_verifier):
"""-> OAuthToken."""
raise NotImplementedError
def authorize_request_token(self, oauth_token, user):
"""-> OAuthToken."""
raise NotImplementedError
class OAuthSignatureMethod(object):
"""A strategy class that implements a signature method."""
def get_name(self):
"""-> str."""
raise NotImplementedError
def build_signature_base_string(self, oauth_request, oauth_consumer, oauth_token):
"""-> str key, str raw."""
raise NotImplementedError
def build_signature(self, oauth_request, oauth_consumer, oauth_token):
"""-> str."""
raise NotImplementedError
def check_signature(self, oauth_request, consumer, token, signature):
built = self.build_signature(oauth_request, consumer, token)
logging.info("Built signature: %s"%(built))
return built == signature
class OAuthSignatureMethod_HMAC_SHA1(OAuthSignatureMethod):
def get_name(self):
return 'HMAC-SHA1'
def build_signature_base_string(self, oauth_request, consumer, token):
sig = (
escape(oauth_request.get_normalized_http_method()),
escape(oauth_request.get_normalized_http_url()),
escape(oauth_request.get_normalized_parameters()),
)
key = '%s&' % escape(consumer.secret)
if token:
key += escape(token.secret)
raw = '&'.join(sig)
return key, raw
def build_signature(self, oauth_request, consumer, token):
"""Builds the base signature string."""
key, raw = self.build_signature_base_string(oauth_request, consumer,
token)
# HMAC object.
try:
import hashlib # 2.5
hashed = hmac.new(key, raw, hashlib.sha1)
except:
import sha # Deprecated
hashed = hmac.new(key, raw, sha)
# Calculate the digest base 64.
return binascii.b2a_base64(hashed.digest())[:-1]
class OAuthSignatureMethod_PLAINTEXT(OAuthSignatureMethod):
def get_name(self):
return 'PLAINTEXT'
def build_signature_base_string(self, oauth_request, consumer, token):
"""Concatenates the consumer key and secret."""
sig = '%s&' % escape(consumer.secret)
if token:
sig = sig + escape(token.secret)
return sig, sig
def build_signature(self, oauth_request, consumer, token):
key, raw = self.build_signature_base_string(oauth_request, consumer,
token)
return key | bsd-3-clause |
lakshayg/tensorflow | tensorflow/contrib/testing/python/framework/fake_summary_writer.py | 75 | 5053 | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Fake summary writer for unit tests."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.core.framework import summary_pb2
from tensorflow.python.summary.writer import writer
from tensorflow.python.summary.writer import writer_cache
# TODO(ptucker): Replace with mock framework.
class FakeSummaryWriter(object):
"""Fake summary writer."""
_replaced_summary_writer = None
@classmethod
def install(cls):
if cls._replaced_summary_writer:
raise ValueError('FakeSummaryWriter already installed.')
cls._replaced_summary_writer = writer.FileWriter
writer.FileWriter = FakeSummaryWriter
writer_cache.FileWriter = FakeSummaryWriter
@classmethod
def uninstall(cls):
if not cls._replaced_summary_writer:
raise ValueError('FakeSummaryWriter not installed.')
writer.FileWriter = cls._replaced_summary_writer
writer_cache.FileWriter = cls._replaced_summary_writer
cls._replaced_summary_writer = None
def __init__(self, logdir, graph=None):
self._logdir = logdir
self._graph = graph
self._summaries = {}
self._added_graphs = []
self._added_meta_graphs = []
self._added_session_logs = []
@property
def summaries(self):
return self._summaries
def assert_summaries(self,
test_case,
expected_logdir=None,
expected_graph=None,
expected_summaries=None,
expected_added_graphs=None,
expected_added_meta_graphs=None,
expected_session_logs=None):
"""Assert expected items have been added to summary writer."""
if expected_logdir is not None:
test_case.assertEqual(expected_logdir, self._logdir)
if expected_graph is not None:
test_case.assertTrue(expected_graph is self._graph)
expected_summaries = expected_summaries or {}
for step in expected_summaries:
test_case.assertTrue(
step in self._summaries,
msg='Missing step %s from %s.' % (step, self._summaries.keys()))
actual_simple_values = {}
for step_summary in self._summaries[step]:
for v in step_summary.value:
# Ignore global_step/sec since it's written by Supervisor in a
# separate thread, so it's non-deterministic how many get written.
if 'global_step/sec' != v.tag:
actual_simple_values[v.tag] = v.simple_value
test_case.assertEqual(expected_summaries[step], actual_simple_values)
if expected_added_graphs is not None:
test_case.assertEqual(expected_added_graphs, self._added_graphs)
if expected_added_meta_graphs is not None:
test_case.assertEqual(expected_added_meta_graphs, self._added_meta_graphs)
if expected_session_logs is not None:
test_case.assertEqual(expected_session_logs, self._added_session_logs)
def add_summary(self, summ, current_global_step):
"""Add summary."""
if isinstance(summ, bytes):
summary_proto = summary_pb2.Summary()
summary_proto.ParseFromString(summ)
summ = summary_proto
if current_global_step in self._summaries:
step_summaries = self._summaries[current_global_step]
else:
step_summaries = []
self._summaries[current_global_step] = step_summaries
step_summaries.append(summ)
# NOTE: Ignore global_step since its value is non-deterministic.
def add_graph(self, graph, global_step=None, graph_def=None):
"""Add graph."""
if (global_step is not None) and (global_step < 0):
raise ValueError('Invalid global_step %s.' % global_step)
if graph_def is not None:
raise ValueError('Unexpected graph_def %s.' % graph_def)
self._added_graphs.append(graph)
def add_meta_graph(self, meta_graph_def, global_step=None):
"""Add metagraph."""
if (global_step is not None) and (global_step < 0):
raise ValueError('Invalid global_step %s.' % global_step)
self._added_meta_graphs.append(meta_graph_def)
# NOTE: Ignore global_step since its value is non-deterministic.
def add_session_log(self, session_log, global_step=None):
# pylint: disable=unused-argument
self._added_session_logs.append(session_log)
def flush(self):
pass
def reopen(self):
pass
def close(self):
pass
| apache-2.0 |
mauelsha/linux | scripts/gdb/linux/dmesg.py | 630 | 1991 | #
# gdb helper commands and functions for Linux kernel debugging
#
# kernel log buffer dump
#
# Copyright (c) Siemens AG, 2011, 2012
#
# Authors:
# Jan Kiszka <[email protected]>
#
# This work is licensed under the terms of the GNU GPL version 2.
#
import gdb
from linux import utils
class LxDmesg(gdb.Command):
"""Print Linux kernel log buffer."""
def __init__(self):
super(LxDmesg, self).__init__("lx-dmesg", gdb.COMMAND_DATA)
def invoke(self, arg, from_tty):
log_buf_addr = int(str(gdb.parse_and_eval("log_buf")).split()[0], 16)
log_first_idx = int(gdb.parse_and_eval("log_first_idx"))
log_next_idx = int(gdb.parse_and_eval("log_next_idx"))
log_buf_len = int(gdb.parse_and_eval("log_buf_len"))
inf = gdb.inferiors()[0]
start = log_buf_addr + log_first_idx
if log_first_idx < log_next_idx:
log_buf_2nd_half = -1
length = log_next_idx - log_first_idx
log_buf = inf.read_memory(start, length)
else:
log_buf_2nd_half = log_buf_len - log_first_idx
log_buf = inf.read_memory(start, log_buf_2nd_half) + \
inf.read_memory(log_buf_addr, log_next_idx)
pos = 0
while pos < log_buf.__len__():
length = utils.read_u16(log_buf[pos + 8:pos + 10])
if length == 0:
if log_buf_2nd_half == -1:
gdb.write("Corrupted log buffer!\n")
break
pos = log_buf_2nd_half
continue
text_len = utils.read_u16(log_buf[pos + 10:pos + 12])
text = log_buf[pos + 16:pos + 16 + text_len]
time_stamp = utils.read_u64(log_buf[pos:pos + 8])
for line in memoryview(text).tobytes().splitlines():
gdb.write("[{time:12.6f}] {line}\n".format(
time=time_stamp / 1000000000.0,
line=line))
pos += length
LxDmesg()
| gpl-2.0 |
fsschneider/DeepOBS | deepobs/tensorflow/datasets/two_d.py | 1 | 4790 | # -*- coding: utf-8 -*-
"""2D DeepOBS dataset."""
import numpy as np
import tensorflow as tf
from . import dataset
class two_d(dataset.DataSet):
"""DeepOBS data set class to create two dimensional stochastic testproblems.
This toy data set consists of a fixed number (``train_size``) of iid draws
from two scalar zero-mean normal distributions with standard deviation
specified by the ``noise_level``.
Args:
batch_size (int): The mini-batch size to use. Note that, if ``batch_size``
is not a divider of the dataset size (``1000`` for train and test) the
remainder is dropped in each epoch (after shuffling).
train_size (int): Size of the training data set. This will also be used as
the train_eval and test set size. Defaults to ``1000``.
noise_level (float): Standard deviation of the data points around the mean.
The data points are drawn from a Gaussian distribution. Defaults to
``1.0``.
Attributes:
batch: A tuple ``(x, y)`` of tensors with random x and y that can be used to
create a noisy two dimensional testproblem. Executing these
tensors raises a ``tf.errors.OutOfRangeError`` after one epoch.
train_init_op: A tensorflow operation initializing the dataset for the
training phase.
train_eval_init_op: A tensorflow operation initializing the testproblem for
evaluating on training data.
test_init_op: A tensorflow operation initializing the testproblem for
evaluating on test data.
phase: A string-value tf.Variable that is set to "train", "train_eval" or
"test", depending on the current phase. This can be used by testproblems
to adapt their behavior to this phase.
"""
def __init__(self, batch_size, train_size=10000, noise_level=1.0):
"""Creates a new 2D instance.
Args:
batch_size (int): The mini-batch size to use. Note that, if ``batch_size``
is not a divider of the dataset size (1k for train and test) the
remainder is dropped in each epoch (after shuffling).
train_size (int): Size of the training data set. This will also be used as
the train_eval and test set size. Defaults to ``1000``.
noise_level (float): Standard deviation of the data points around the mean.
The data points are drawn from a Gaussian distribution. Defaults to
``1.0``.
"""
self._name = "two_d"
self._train_size = train_size
self._noise_level = noise_level
super(two_d, self).__init__(batch_size)
def _make_dataset(self, data_x, data_y, shuffle=True):
"""Creates a 2D data set (helper used by ``.make_*_datset`` below).
Args:
data_x (np.array): Numpy array containing the ``X`` values of the
data points.
data_y (np.array): Numpy array containing the ``y`` values of the
data points.
shuffle (bool): Switch to turn on or off shuffling of the data set.
Defaults to ``True``.
Returns:
A tf.data.Dataset yielding batches of 2D data.
"""
with tf.name_scope(self._name):
with tf.device('/cpu:0'):
data = tf.data.Dataset.from_tensor_slices((data_x, data_y))
if shuffle:
data = data.shuffle(buffer_size=20000)
data = data.batch(self._batch_size, drop_remainder=True)
data = data.prefetch(buffer_size=4)
return data
def _make_train_dataset(self):
"""Creates the 2D training dataset.
Returns:
A tf.data.Dataset instance with batches of training data.
"""
# Draw data from a random generator with a fixed seed to always get the
# same data.
rng = np.random.RandomState(42)
data_x = rng.normal(0.0, self._noise_level, self._train_size)
data_y = rng.normal(0.0, self._noise_level, self._train_size)
data_x = np.float32(data_x)
data_y = np.float32(data_y)
return self._make_dataset(data_x, data_y, shuffle=True)
def _make_train_eval_dataset(self):
"""Creates the 2D train eval dataset.
Returns:
A tf.data.Dataset instance with batches of training eval data.
"""
return self._train_dataset.take(self._train_size // self._batch_size)
def _make_test_dataset(self):
"""Creates the 2D test dataset.
Returns:
A tf.data.Dataset instance with batches of test data.
"""
# recovers the deterministic 2D function using zeros
data_x, data_y = np.zeros(self._train_size), np.zeros(self._train_size)
data_x = np.float32(data_x)
data_y = np.float32(data_y)
return self._make_dataset(data_x, data_y, shuffle=False)
| mit |
oberlin/django | django/contrib/postgres/fields/jsonb.py | 341 | 2994 | import json
from psycopg2.extras import Json
from django.contrib.postgres import forms, lookups
from django.core import exceptions
from django.db.models import Field, Transform
from django.utils.translation import ugettext_lazy as _
__all__ = ['JSONField']
class JSONField(Field):
empty_strings_allowed = False
description = _('A JSON object')
default_error_messages = {
'invalid': _("Value must be valid JSON."),
}
def db_type(self, connection):
return 'jsonb'
def get_transform(self, name):
transform = super(JSONField, self).get_transform(name)
if transform:
return transform
return KeyTransformFactory(name)
def get_prep_value(self, value):
if value is not None:
return Json(value)
return value
def get_prep_lookup(self, lookup_type, value):
if lookup_type in ('has_key', 'has_keys', 'has_any_keys'):
return value
if isinstance(value, (dict, list)):
return Json(value)
return super(JSONField, self).get_prep_lookup(lookup_type, value)
def validate(self, value, model_instance):
super(JSONField, self).validate(value, model_instance)
try:
json.dumps(value)
except TypeError:
raise exceptions.ValidationError(
self.error_messages['invalid'],
code='invalid',
params={'value': value},
)
def value_to_string(self, obj):
value = self.value_from_object(obj)
return value
def formfield(self, **kwargs):
defaults = {'form_class': forms.JSONField}
defaults.update(kwargs)
return super(JSONField, self).formfield(**defaults)
JSONField.register_lookup(lookups.DataContains)
JSONField.register_lookup(lookups.ContainedBy)
JSONField.register_lookup(lookups.HasKey)
JSONField.register_lookup(lookups.HasKeys)
JSONField.register_lookup(lookups.HasAnyKeys)
class KeyTransform(Transform):
def __init__(self, key_name, *args, **kwargs):
super(KeyTransform, self).__init__(*args, **kwargs)
self.key_name = key_name
def as_sql(self, compiler, connection):
key_transforms = [self.key_name]
previous = self.lhs
while isinstance(previous, KeyTransform):
key_transforms.insert(0, previous.key_name)
previous = previous.lhs
lhs, params = compiler.compile(previous)
if len(key_transforms) > 1:
return "{} #> %s".format(lhs), [key_transforms] + params
try:
int(self.key_name)
except ValueError:
lookup = "'%s'" % self.key_name
else:
lookup = "%s" % self.key_name
return "%s -> %s" % (lhs, lookup), params
class KeyTransformFactory(object):
def __init__(self, key_name):
self.key_name = key_name
def __call__(self, *args, **kwargs):
return KeyTransform(self.key_name, *args, **kwargs)
| bsd-3-clause |
af1rst/bite-project | deps/gdata-python-client/tests/gdata_tests/codesearch_test.py | 133 | 1930 | #!/usr/bin/python
#
# Copyright (C) 2007 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
__author__ = 'api.jscudder (Jeffrey Scudder)'
import unittest
import gdata.codesearch
import gdata.test_data
class CodeSearchDataTest(unittest.TestCase):
def setUp(self):
self.feed = gdata.codesearch.CodesearchFeedFromString(
gdata.test_data.CODE_SEARCH_FEED)
def testCorrectXmlConversion(self):
self.assert_(self.feed.id.text ==
'http://www.google.com/codesearch/feeds/search?q=malloc')
self.assert_(len(self.feed.entry) == 10)
for entry in self.feed.entry:
if entry.id.text == ('http://www.google.com/codesearch?hl=en&q=+ma'
'lloc+show:LDjwp-Iqc7U:84hEYaYsZk8:xDGReDhvNi0&sa=N&ct=rx&cd=1'
'&cs_p=http://www.gnu.org&cs_f=software/autoconf/manual/autoco'
'nf-2.60/autoconf.html-002&cs_p=http://www.gnu.org&cs_f=softwa'
're/autoconf/manual/autoconf-2.60/autoconf.html-002#first'):
self.assert_(len(entry.match) == 4)
for match in entry.match:
if match.line_number == '4':
self.assert_(match.type == 'text/html')
self.assert_(entry.file.name ==
'software/autoconf/manual/autoconf-2.60/autoconf.html-002')
self.assert_(entry.package.name == 'http://www.gnu.org')
self.assert_(entry.package.uri == 'http://www.gnu.org')
if __name__ == '__main__':
unittest.main()
| apache-2.0 |
rspavel/spack | lib/spack/external/py/_log/warning.py | 218 | 2542 | import py, sys
class DeprecationWarning(DeprecationWarning):
def __init__(self, msg, path, lineno):
self.msg = msg
self.path = path
self.lineno = lineno
def __repr__(self):
return "%s:%d: %s" %(self.path, self.lineno+1, self.msg)
def __str__(self):
return self.msg
def _apiwarn(startversion, msg, stacklevel=2, function=None):
# below is mostly COPIED from python2.4/warnings.py's def warn()
# Get context information
if isinstance(stacklevel, str):
frame = sys._getframe(1)
level = 1
found = frame.f_code.co_filename.find(stacklevel) != -1
while frame:
co = frame.f_code
if co.co_filename.find(stacklevel) == -1:
if found:
stacklevel = level
break
else:
found = True
level += 1
frame = frame.f_back
else:
stacklevel = 1
msg = "%s (since version %s)" %(msg, startversion)
warn(msg, stacklevel=stacklevel+1, function=function)
def warn(msg, stacklevel=1, function=None):
if function is not None:
filename = py.std.inspect.getfile(function)
lineno = py.code.getrawcode(function).co_firstlineno
else:
try:
caller = sys._getframe(stacklevel)
except ValueError:
globals = sys.__dict__
lineno = 1
else:
globals = caller.f_globals
lineno = caller.f_lineno
if '__name__' in globals:
module = globals['__name__']
else:
module = "<string>"
filename = globals.get('__file__')
if filename:
fnl = filename.lower()
if fnl.endswith(".pyc") or fnl.endswith(".pyo"):
filename = filename[:-1]
elif fnl.endswith("$py.class"):
filename = filename.replace('$py.class', '.py')
else:
if module == "__main__":
try:
filename = sys.argv[0]
except AttributeError:
# embedded interpreters don't have sys.argv, see bug #839151
filename = '__main__'
if not filename:
filename = module
path = py.path.local(filename)
warning = DeprecationWarning(msg, path, lineno)
py.std.warnings.warn_explicit(warning, category=Warning,
filename=str(warning.path),
lineno=warning.lineno,
registry=py.std.warnings.__dict__.setdefault(
"__warningsregistry__", {})
)
| lgpl-2.1 |
googleinterns/learnbase | learnbase/src/main/webapp/WEB-INF/Lib/distutils/extension.py | 250 | 10904 | """distutils.extension
Provides the Extension class, used to describe C/C++ extension
modules in setup scripts."""
__revision__ = "$Id$"
import os, string, sys
from types import *
try:
import warnings
except ImportError:
warnings = None
# This class is really only used by the "build_ext" command, so it might
# make sense to put it in distutils.command.build_ext. However, that
# module is already big enough, and I want to make this class a bit more
# complex to simplify some common cases ("foo" module in "foo.c") and do
# better error-checking ("foo.c" actually exists).
#
# Also, putting this in build_ext.py means every setup script would have to
# import that large-ish module (indirectly, through distutils.core) in
# order to do anything.
class Extension:
"""Just a collection of attributes that describes an extension
module and everything needed to build it (hopefully in a portable
way, but there are hooks that let you be as unportable as you need).
Instance attributes:
name : string
the full name of the extension, including any packages -- ie.
*not* a filename or pathname, but Python dotted name
sources : [string]
list of source filenames, relative to the distribution root
(where the setup script lives), in Unix form (slash-separated)
for portability. Source files may be C, C++, SWIG (.i),
platform-specific resource files, or whatever else is recognized
by the "build_ext" command as source for a Python extension.
include_dirs : [string]
list of directories to search for C/C++ header files (in Unix
form for portability)
define_macros : [(name : string, value : string|None)]
list of macros to define; each macro is defined using a 2-tuple,
where 'value' is either the string to define it to or None to
define it without a particular value (equivalent of "#define
FOO" in source or -DFOO on Unix C compiler command line)
undef_macros : [string]
list of macros to undefine explicitly
library_dirs : [string]
list of directories to search for C/C++ libraries at link time
libraries : [string]
list of library names (not filenames or paths) to link against
runtime_library_dirs : [string]
list of directories to search for C/C++ libraries at run time
(for shared extensions, this is when the extension is loaded)
extra_objects : [string]
list of extra files to link with (eg. object files not implied
by 'sources', static library that must be explicitly specified,
binary resource files, etc.)
extra_compile_args : [string]
any extra platform- and compiler-specific information to use
when compiling the source files in 'sources'. For platforms and
compilers where "command line" makes sense, this is typically a
list of command-line arguments, but for other platforms it could
be anything.
extra_link_args : [string]
any extra platform- and compiler-specific information to use
when linking object files together to create the extension (or
to create a new static Python interpreter). Similar
interpretation as for 'extra_compile_args'.
export_symbols : [string]
list of symbols to be exported from a shared extension. Not
used on all platforms, and not generally necessary for Python
extensions, which typically export exactly one symbol: "init" +
extension_name.
swig_opts : [string]
any extra options to pass to SWIG if a source file has the .i
extension.
depends : [string]
list of files that the extension depends on
language : string
extension language (i.e. "c", "c++", "objc"). Will be detected
from the source extensions if not provided.
"""
# When adding arguments to this constructor, be sure to update
# setup_keywords in core.py.
def __init__ (self, name, sources,
include_dirs=None,
define_macros=None,
undef_macros=None,
library_dirs=None,
libraries=None,
runtime_library_dirs=None,
extra_objects=None,
extra_compile_args=None,
extra_link_args=None,
export_symbols=None,
swig_opts = None,
depends=None,
language=None,
**kw # To catch unknown keywords
):
assert type(name) is StringType, "'name' must be a string"
assert (type(sources) is ListType and
map(type, sources) == [StringType]*len(sources)), \
"'sources' must be a list of strings"
self.name = name
self.sources = sources
self.include_dirs = include_dirs or []
self.define_macros = define_macros or []
self.undef_macros = undef_macros or []
self.library_dirs = library_dirs or []
self.libraries = libraries or []
self.runtime_library_dirs = runtime_library_dirs or []
self.extra_objects = extra_objects or []
self.extra_compile_args = extra_compile_args or []
self.extra_link_args = extra_link_args or []
self.export_symbols = export_symbols or []
self.swig_opts = swig_opts or []
self.depends = depends or []
self.language = language
# If there are unknown keyword options, warn about them
if len(kw):
L = kw.keys() ; L.sort()
L = map(repr, L)
msg = "Unknown Extension options: " + string.join(L, ', ')
if warnings is not None:
warnings.warn(msg)
else:
sys.stderr.write(msg + '\n')
# class Extension
def read_setup_file (filename):
from distutils.sysconfig import \
parse_makefile, expand_makefile_vars, _variable_rx
from distutils.text_file import TextFile
from distutils.util import split_quoted
# First pass over the file to gather "VAR = VALUE" assignments.
vars = parse_makefile(filename)
# Second pass to gobble up the real content: lines of the form
# <module> ... [<sourcefile> ...] [<cpparg> ...] [<library> ...]
file = TextFile(filename,
strip_comments=1, skip_blanks=1, join_lines=1,
lstrip_ws=1, rstrip_ws=1)
try:
extensions = []
while 1:
line = file.readline()
if line is None: # eof
break
if _variable_rx.match(line): # VAR=VALUE, handled in first pass
continue
if line[0] == line[-1] == "*":
file.warn("'%s' lines not handled yet" % line)
continue
#print "original line: " + line
line = expand_makefile_vars(line, vars)
words = split_quoted(line)
#print "expanded line: " + line
# NB. this parses a slightly different syntax than the old
# makesetup script: here, there must be exactly one extension per
# line, and it must be the first word of the line. I have no idea
# why the old syntax supported multiple extensions per line, as
# they all wind up being the same.
module = words[0]
ext = Extension(module, [])
append_next_word = None
for word in words[1:]:
if append_next_word is not None:
append_next_word.append(word)
append_next_word = None
continue
suffix = os.path.splitext(word)[1]
switch = word[0:2] ; value = word[2:]
if suffix in (".c", ".cc", ".cpp", ".cxx", ".c++", ".m", ".mm"):
# hmm, should we do something about C vs. C++ sources?
# or leave it up to the CCompiler implementation to
# worry about?
ext.sources.append(word)
elif switch == "-I":
ext.include_dirs.append(value)
elif switch == "-D":
equals = string.find(value, "=")
if equals == -1: # bare "-DFOO" -- no value
ext.define_macros.append((value, None))
else: # "-DFOO=blah"
ext.define_macros.append((value[0:equals],
value[equals+2:]))
elif switch == "-U":
ext.undef_macros.append(value)
elif switch == "-C": # only here 'cause makesetup has it!
ext.extra_compile_args.append(word)
elif switch == "-l":
ext.libraries.append(value)
elif switch == "-L":
ext.library_dirs.append(value)
elif switch == "-R":
ext.runtime_library_dirs.append(value)
elif word == "-rpath":
append_next_word = ext.runtime_library_dirs
elif word == "-Xlinker":
append_next_word = ext.extra_link_args
elif word == "-Xcompiler":
append_next_word = ext.extra_compile_args
elif switch == "-u":
ext.extra_link_args.append(word)
if not value:
append_next_word = ext.extra_link_args
elif word == "-Xcompiler":
append_next_word = ext.extra_compile_args
elif switch == "-u":
ext.extra_link_args.append(word)
if not value:
append_next_word = ext.extra_link_args
elif suffix in (".a", ".so", ".sl", ".o", ".dylib"):
# NB. a really faithful emulation of makesetup would
# append a .o file to extra_objects only if it
# had a slash in it; otherwise, it would s/.o/.c/
# and append it to sources. Hmmmm.
ext.extra_objects.append(word)
else:
file.warn("unrecognized argument '%s'" % word)
extensions.append(ext)
finally:
file.close()
#print "module:", module
#print "source files:", source_files
#print "cpp args:", cpp_args
#print "lib args:", library_args
#extensions[module] = { 'sources': source_files,
# 'cpp_args': cpp_args,
# 'lib_args': library_args }
return extensions
# read_setup_file ()
| apache-2.0 |
agusc/scrapy | scrapy/extensions/logstats.py | 127 | 1715 | import logging
from twisted.internet import task
from scrapy.exceptions import NotConfigured
from scrapy import signals
logger = logging.getLogger(__name__)
class LogStats(object):
"""Log basic scraping stats periodically"""
def __init__(self, stats, interval=60.0):
self.stats = stats
self.interval = interval
self.multiplier = 60.0 / self.interval
@classmethod
def from_crawler(cls, crawler):
interval = crawler.settings.getfloat('LOGSTATS_INTERVAL')
if not interval:
raise NotConfigured
o = cls(crawler.stats, interval)
crawler.signals.connect(o.spider_opened, signal=signals.spider_opened)
crawler.signals.connect(o.spider_closed, signal=signals.spider_closed)
return o
def spider_opened(self, spider):
self.pagesprev = 0
self.itemsprev = 0
self.task = task.LoopingCall(self.log, spider)
self.task.start(self.interval)
def log(self, spider):
items = self.stats.get_value('item_scraped_count', 0)
pages = self.stats.get_value('response_received_count', 0)
irate = (items - self.itemsprev) * self.multiplier
prate = (pages - self.pagesprev) * self.multiplier
self.pagesprev, self.itemsprev = pages, items
msg = ("Crawled %(pages)d pages (at %(pagerate)d pages/min), "
"scraped %(items)d items (at %(itemrate)d items/min)")
log_args = {'pages': pages, 'pagerate': prate,
'items': items, 'itemrate': irate}
logger.info(msg, log_args, extra={'spider': spider})
def spider_closed(self, spider, reason):
if self.task.running:
self.task.stop()
| bsd-3-clause |
keithroe/vtkoptix | IO/XML/Testing/Python/TestXMLUnstructuredGridIO.py | 23 | 2784 | #!/usr/bin/env python
import os
import vtk
from vtk.util.misc import vtkGetDataRoot
from vtk.util.misc import vtkGetTempDir
VTK_DATA_ROOT = vtkGetDataRoot()
VTK_TEMP_DIR = vtkGetTempDir()
file0 = VTK_TEMP_DIR + '/ugFile0.vtu'
file1 = VTK_TEMP_DIR + '/ugFile1.vtu'
file2 = VTK_TEMP_DIR + '/ugFile2.vtu'
# read in some unstructured grid data
ugReader = vtk.vtkUnstructuredGridReader()
ugReader.SetFileName(VTK_DATA_ROOT + "/Data/blow.vtk")
ugReader.SetScalarsName("thickness9")
ugReader.SetVectorsName("displacement9")
extract = vtk.vtkExtractUnstructuredGridPiece()
extract.SetInputConnection(ugReader.GetOutputPort())
# write various versions
ugWriter = vtk.vtkXMLUnstructuredGridWriter()
ugWriter.SetFileName(file0)
ugWriter.SetDataModeToAscii()
ugWriter.SetInputConnection(ugReader.GetOutputPort())
ugWriter.Write()
ugWriter.SetFileName(file1)
ugWriter.SetInputConnection(extract.GetOutputPort())
ugWriter.SetDataModeToAppended()
ugWriter.SetNumberOfPieces(2)
ugWriter.Write()
ugWriter.SetFileName(file2)
ugWriter.SetDataModeToBinary()
ugWriter.SetGhostLevel(2)
ugWriter.Write()
# read the ASCII version
reader = vtk.vtkXMLUnstructuredGridReader()
reader.SetFileName(file0)
reader.Update()
ug0 = vtk.vtkUnstructuredGrid()
ug0.DeepCopy(reader.GetOutput())
sF = vtk.vtkDataSetSurfaceFilter()
sF.SetInputData(ug0)
mapper0 = vtk.vtkPolyDataMapper()
mapper0.SetInputConnection(sF.GetOutputPort())
actor0 = vtk.vtkActor()
actor0.SetMapper(mapper0)
actor0.SetPosition(0, 40, 20)
# read appended piece 0
reader.SetFileName(file1)
sF1 = vtk.vtkDataSetSurfaceFilter()
sF1.SetInputConnection(reader.GetOutputPort())
mapper1 = vtk.vtkPolyDataMapper()
mapper1.SetInputConnection(sF1.GetOutputPort())
mapper1.SetPiece(1)
mapper1.SetNumberOfPieces(2)
actor1 = vtk.vtkActor()
actor1.SetMapper(mapper1)
# read binary piece 0 (with ghost level)
reader2 = vtk.vtkXMLUnstructuredGridReader()
reader2.SetFileName(file2)
sF2 = vtk.vtkDataSetSurfaceFilter()
sF2.SetInputConnection(reader2.GetOutputPort())
mapper2 = vtk.vtkPolyDataMapper()
mapper2.SetInputConnection(sF2.GetOutputPort())
mapper2.SetPiece(1)
mapper2.SetNumberOfPieces(2)
mapper2.SetGhostLevel(2)
actor2 = vtk.vtkActor()
actor2.SetMapper(mapper2)
actor2.SetPosition(0, 0, 30)
# Create the RenderWindow, Renderer and both Actors
#
ren = vtk.vtkRenderer()
renWin = vtk.vtkRenderWindow()
renWin.AddRenderer(ren)
iren = vtk.vtkRenderWindowInteractor()
iren.SetRenderWindow(renWin)
# Add the actors to the renderer, set the background and size
#
ren.AddActor(actor0)
ren.AddActor(actor1)
ren.AddActor(actor2)
ren.ResetCamera()
ren.GetActiveCamera().SetPosition(180, 55, 65)
ren.GetActiveCamera().SetFocalPoint(3.5, 32, 15)
renWin.SetSize(300, 300)
renWin.Render()
#os.remove(file0)
#os.remove(file1)
#os.remove(file2)
| bsd-3-clause |
foursquare/pants | contrib/go/src/python/pants/contrib/go/tasks/go_test.py | 1 | 2117 | # coding=utf-8
# Copyright 2015 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import absolute_import, division, print_function, unicode_literals
from builtins import filter
from pants.base.exceptions import TaskError
from pants.base.workunit import WorkUnitLabel
from pants.contrib.go.tasks.go_workspace_task import GoWorkspaceTask
class GoTest(GoWorkspaceTask):
"""Runs `go test` on Go packages.
To run a library's tests, GoTest only requires a Go workspace to be initialized
(see GoWorkspaceTask) with links to necessary source files. It does not require
GoCompile to first compile the library to be tested -- in fact, GoTest will ignore
any binaries in "$GOPATH/pkg/", because Go test files (which live in the package
they are testing) are ignored in normal compilation, so Go test must compile everything
from scratch.
"""
@classmethod
def register_options(cls, register):
super(GoTest, cls).register_options(register)
register('--build-and-test-flags', default='',
fingerprint=True,
help='Flags to pass in to `go test` tool.')
@classmethod
def supports_passthru_args(cls):
return True
def execute(self):
# Only executes the tests from the package specified by the target roots, so
# we don't run the tests for _all_ dependencies of said package.
targets = filter(self.is_local_src, self.context.target_roots)
for target in targets:
self.ensure_workspace(target)
self._go_test(target)
def _go_test(self, target):
args = (self.get_options().build_and_test_flags.split()
+ [target.import_path]
+ self.get_passthru_args())
result, go_cmd = self.go_dist.execute_go_cmd('test', gopath=self.get_gopath(target), args=args,
workunit_factory=self.context.new_workunit,
workunit_labels=[WorkUnitLabel.TEST])
if result != 0:
raise TaskError('{} failed with exit code {}'.format(go_cmd, result))
| apache-2.0 |
gorjuce/odoo | openerp/addons/test_converter/tests/test_html.py | 257 | 13533 | # -*- encoding: utf-8 -*-
import json
import os
import datetime
from lxml import etree
from openerp.tests import common
from openerp.tools import html_escape as e
from openerp.addons.base.ir import ir_qweb
directory = os.path.dirname(__file__)
class TestExport(common.TransactionCase):
_model = None
def setUp(self):
super(TestExport, self).setUp()
self.Model = self.registry(self._model)
def get_field(self, name):
return self.Model._fields[name]
def get_converter(self, name, type=None):
field = self.get_field(name)
for postfix in type, field.type, '':
fs = ['ir', 'qweb', 'field']
if postfix is None: continue
if postfix: fs.append(postfix)
try:
model = self.registry('.'.join(fs))
break
except KeyError: pass
return lambda value, options=None, context=None: e(model.value_to_html(
self.cr, self.uid, value, field, options=options, context=context))
class TestBasicExport(TestExport):
_model = 'test_converter.test_model'
class TestCharExport(TestBasicExport):
def test_char(self):
converter = self.get_converter('char')
value = converter('foo')
self.assertEqual(value, 'foo')
value = converter("foo<bar>")
self.assertEqual(value, "foo<bar>")
class TestIntegerExport(TestBasicExport):
def test_integer(self):
converter = self.get_converter('integer')
value = converter(42)
self.assertEqual(value, "42")
class TestFloatExport(TestBasicExport):
def setUp(self):
super(TestFloatExport, self).setUp()
self.registry('res.lang').write(self.cr, self.uid, [1], {
'grouping': '[3,0]'
})
def test_float(self):
converter = self.get_converter('float')
value = converter(42.0)
self.assertEqual(value, "42.0")
value = converter(42.0100)
self.assertEqual(value, "42.01")
value = converter(42.01234)
self.assertEqual(value, "42.01234")
value = converter(1234567.89)
self.assertEqual(value, '1,234,567.89')
def test_numeric(self):
converter = self.get_converter('numeric')
value = converter(42.0)
self.assertEqual(value, '42.00')
value = converter(42.01234)
self.assertEqual(value, '42.01')
class TestCurrencyExport(TestExport):
_model = 'test_converter.monetary'
def setUp(self):
super(TestCurrencyExport, self).setUp()
self.Currency = self.registry('res.currency')
self.base = self.create(self.Currency, name="Source", symbol=u'source')
def create(self, model, context=None, **values):
return model.browse(
self.cr, self.uid,
model.create(self.cr, self.uid, values, context=context),
context=context)
def convert(self, obj, dest):
converter = self.registry('ir.qweb.field.monetary')
options = {
'widget': 'monetary',
'display_currency': 'c2'
}
context = dict(inherit_branding=True)
converted = converter.to_html(
self.cr, self.uid, 'value', obj, options,
etree.Element('span'),
{'field': 'obj.value', 'field-options': json.dumps(options)},
'', ir_qweb.QWebContext(self.cr, self.uid, {'obj': obj, 'c2': dest, }),
context=context,
)
return converted
def test_currency_post(self):
currency = self.create(self.Currency, name="Test", symbol=u"test")
obj = self.create(self.Model, value=0.12)
converted = self.convert(obj, dest=currency)
self.assertEqual(
converted,
'<span data-oe-model="{obj._model._name}" data-oe-id="{obj.id}" '
'data-oe-field="value" data-oe-type="monetary" '
'data-oe-expression="obj.value">'
'<span class="oe_currency_value">0.12</span>'
u'\N{NO-BREAK SPACE}{symbol}</span>'.format(
obj=obj,
symbol=currency.symbol.encode('utf-8')
).encode('utf-8'),)
def test_currency_pre(self):
currency = self.create(
self.Currency, name="Test", symbol=u"test", position='before')
obj = self.create(self.Model, value=0.12)
converted = self.convert(obj, dest=currency)
self.assertEqual(
converted,
'<span data-oe-model="{obj._model._name}" data-oe-id="{obj.id}" '
'data-oe-field="value" data-oe-type="monetary" '
'data-oe-expression="obj.value">'
u'{symbol}\N{NO-BREAK SPACE}'
'<span class="oe_currency_value">0.12</span>'
'</span>'.format(
obj=obj,
symbol=currency.symbol.encode('utf-8')
).encode('utf-8'),)
def test_currency_precision(self):
""" Precision should be the currency's, not the float field's
"""
currency = self.create(self.Currency, name="Test", symbol=u"test",)
obj = self.create(self.Model, value=0.1234567)
converted = self.convert(obj, dest=currency)
self.assertEqual(
converted,
'<span data-oe-model="{obj._model._name}" data-oe-id="{obj.id}" '
'data-oe-field="value" data-oe-type="monetary" '
'data-oe-expression="obj.value">'
'<span class="oe_currency_value">0.12</span>'
u'\N{NO-BREAK SPACE}{symbol}</span>'.format(
obj=obj,
symbol=currency.symbol.encode('utf-8')
).encode('utf-8'),)
class TestTextExport(TestBasicExport):
def test_text(self):
converter = self.get_converter('text')
value = converter("This is my text-kai")
self.assertEqual(value, "This is my text-kai")
value = converter("""
. The current line (address) in the buffer.
$ The last line in the buffer.
n The nth, line in the buffer where n is a number in the range [0,$].
$ The last line in the buffer.
- The previous line. This is equivalent to -1 and may be repeated with cumulative effect.
-n The nth previous line, where n is a non-negative number.
+ The next line. This is equivalent to +1 and may be repeated with cumulative effect.
""")
self.assertEqual(value, """<br>
. The current line (address) in the buffer.<br>
$ The last line in the buffer.<br>
n The nth, line in the buffer where n is a number in the range [0,$].<br>
$ The last line in the buffer.<br>
- The previous line. This is equivalent to -1 and may be repeated with cumulative effect.<br>
-n The nth previous line, where n is a non-negative number.<br>
+ The next line. This is equivalent to +1 and may be repeated with cumulative effect.<br>
""")
value = converter("""
fgdkls;hjas;lj <b>fdslkj</b> d;lasjfa lkdja <a href=http://spam.com>lfks</a>
fldkjsfhs <i style="color: red"><a href="http://spamspam.com">fldskjh</a></i>
""")
self.assertEqual(value, """<br>
fgdkls;hjas;lj <b>fdslkj</b> d;lasjfa lkdja <a href=http://spam.com>lfks</a><br>
fldkjsfhs <i style="color: red"><a href="http://spamspam.com">fldskjh</a></i><br>
""")
class TestMany2OneExport(TestBasicExport):
def test_many2one(self):
Sub = self.registry('test_converter.test_model.sub')
id0 = self.Model.create(self.cr, self.uid, {
'many2one': Sub.create(self.cr, self.uid, {'name': "Foo"})
})
id1 = self.Model.create(self.cr, self.uid, {
'many2one': Sub.create(self.cr, self.uid, {'name': "Fo<b>o</b>"})
})
def converter(record):
model = self.registry('ir.qweb.field.many2one')
return e(model.record_to_html(self.cr, self.uid, 'many2one', record))
value = converter(self.Model.browse(self.cr, self.uid, id0))
self.assertEqual(value, "Foo")
value = converter(self.Model.browse(self.cr, self.uid, id1))
self.assertEqual(value, "Fo<b>o</b>")
class TestBinaryExport(TestBasicExport):
def test_image(self):
field = self.get_field('binary')
converter = self.registry('ir.qweb.field.image')
with open(os.path.join(directory, 'test_vectors', 'image'), 'rb') as f:
content = f.read()
encoded_content = content.encode('base64')
value = e(converter.value_to_html(
self.cr, self.uid, encoded_content, field))
self.assertEqual(
value, '<img src="data:image/jpeg;base64,%s">' % (
encoded_content
))
with open(os.path.join(directory, 'test_vectors', 'pdf'), 'rb') as f:
content = f.read()
with self.assertRaises(ValueError):
e(converter.value_to_html(
self.cr, self.uid, 'binary', content.encode('base64'), field))
with open(os.path.join(directory, 'test_vectors', 'pptx'), 'rb') as f:
content = f.read()
with self.assertRaises(ValueError):
e(converter.value_to_html(
self.cr, self.uid, 'binary', content.encode('base64'), field))
class TestSelectionExport(TestBasicExport):
def test_selection(self):
[record] = self.Model.browse(self.cr, self.uid, [self.Model.create(self.cr, self.uid, {
'selection': 2,
'selection_str': 'C',
})])
converter = self.registry('ir.qweb.field.selection')
field_name = 'selection'
value = converter.record_to_html(self.cr, self.uid, field_name, record)
self.assertEqual(value, "réponse B")
field_name = 'selection_str'
value = converter.record_to_html(self.cr, self.uid, field_name, record)
self.assertEqual(value, "Qu'est-ce qu'il fout ce maudit pancake, tabernacle ?")
class TestHTMLExport(TestBasicExport):
def test_html(self):
converter = self.get_converter('html')
input = '<span>span</span>'
value = converter(input)
self.assertEqual(value, input)
class TestDatetimeExport(TestBasicExport):
def setUp(self):
super(TestDatetimeExport, self).setUp()
# set user tz to known value
Users = self.registry('res.users')
Users.write(self.cr, self.uid, self.uid, {
'tz': 'Pacific/Niue'
}, context=None)
def test_date(self):
converter = self.get_converter('date')
value = converter('2011-05-03')
# default lang/format is US
self.assertEqual(value, '05/03/2011')
def test_datetime(self):
converter = self.get_converter('datetime')
value = converter('2011-05-03 11:12:13')
# default lang/format is US
self.assertEqual(value, '05/03/2011 00:12:13')
def test_custom_format(self):
converter = self.get_converter('datetime')
converter2 = self.get_converter('date')
opts = {'format': 'MMMM d'}
value = converter('2011-03-02 11:12:13', options=opts)
value2 = converter2('2001-03-02', options=opts)
self.assertEqual(
value,
'March 2'
)
self.assertEqual(
value2,
'March 2'
)
class TestDurationExport(TestBasicExport):
def setUp(self):
super(TestDurationExport, self).setUp()
# needs to have lang installed otherwise falls back on en_US
self.registry('res.lang').load_lang(self.cr, self.uid, 'fr_FR')
def test_negative(self):
converter = self.get_converter('float', 'duration')
with self.assertRaises(ValueError):
converter(-4)
def test_missing_unit(self):
converter = self.get_converter('float', 'duration')
with self.assertRaises(ValueError):
converter(4)
def test_basic(self):
converter = self.get_converter('float', 'duration')
result = converter(4, {'unit': 'hour'}, {'lang': 'fr_FR'})
self.assertEqual(result, u'4 heures')
result = converter(50, {'unit': 'second'}, {'lang': 'fr_FR'})
self.assertEqual(result, u'50 secondes')
def test_multiple(self):
converter = self.get_converter('float', 'duration')
result = converter(1.5, {'unit': 'hour'}, {'lang': 'fr_FR'})
self.assertEqual(result, u"1 heure 30 minutes")
result = converter(72, {'unit': 'second'}, {'lang': 'fr_FR'})
self.assertEqual(result, u"1 minute 12 secondes")
class TestRelativeDatetime(TestBasicExport):
# not sure how a test based on "current time" should be tested. Even less
# so as it would mostly be a test of babel...
def setUp(self):
super(TestRelativeDatetime, self).setUp()
# needs to have lang installed otherwise falls back on en_US
self.registry('res.lang').load_lang(self.cr, self.uid, 'fr_FR')
def test_basic(self):
converter = self.get_converter('datetime', 'relative')
t = datetime.datetime.utcnow() - datetime.timedelta(hours=1)
result = converter(t, context={'lang': 'fr_FR'})
self.assertEqual(result, u"il y a 1 heure")
| agpl-3.0 |
VishvajitP/django-extensions | django_extensions/utils/validatingtemplatetags.py | 26 | 2469 | from django.template import defaulttags
from django.template.base import Library, Node
from django.templatetags import future
register = Library()
error_on_old_style_url_tag = False
new_style_url_tag = False
errors = []
def before_new_template(force_new_urls):
"""Reset state ready for new template"""
global new_style_url_tag, error_on_old_style_url_tag, errors
new_style_url_tag = False
error_on_old_style_url_tag = force_new_urls
errors = []
def get_template_errors():
return errors
# Disable extends and include as they are not needed, slow parsing down, and cause duplicate errors
class NoOpNode(Node):
def render(self, context):
return ''
@register.tag
def extends(parser, token):
return NoOpNode()
@register.tag
def include(parser, token):
return NoOpNode()
# We replace load to determine whether new style urls are in use and re-patch url after
# a future version is loaded
@register.tag
def load(parser, token):
global new_style_url_tag
bits = token.contents.split()
reloaded_url_tag = False
if len(bits) >= 4 and bits[-2] == "from" and bits[-1] == "future":
for name in bits[1:-2]:
if name == "url":
new_style_url_tag = True
reloaded_url_tag = True
try:
return defaulttags.load(parser, token)
finally:
if reloaded_url_tag:
parser.tags['url'] = new_style_url
@register.tag(name='url')
def old_style_url(parser, token):
global error_on_old_style_url_tag
bits = token.split_contents()
view = bits[1]
if error_on_old_style_url_tag:
_error("Old style url tag used (only reported once per file): {%% %s %%}" % (" ".join(bits)), token)
error_on_old_style_url_tag = False
if view[0] in "\"'" and view[0] == view[-1]:
_error("Old style url tag with quotes around view name: {%% %s %%}" % (" ".join(bits)), token)
return defaulttags.url(parser, token)
def new_style_url(parser, token):
bits = token.split_contents()
view = bits[1]
if view[0] not in "\"'" or view[0] != view[-1]:
_error("New style url tag without quotes around view name: {%% %s %%}" % (" ".join(bits)), token)
return future.url(parser, token)
def _error(message, token):
origin, (start, upto) = token.source
source = origin.reload()
line = source.count("\n", 0, start) + 1 # 1 based line numbering
errors.append((origin, line, message))
| mit |
mxOBS/deb-pkg_trusty_chromium-browser | tools/telemetry/telemetry/core/discover.py | 12 | 4024 | # Copyright 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import fnmatch
import inspect
import os
import re
from telemetry import decorators
from telemetry.core import camel_case
@decorators.Cache
def DiscoverModules(start_dir, top_level_dir, pattern='*'):
"""Discover all modules in |start_dir| which match |pattern|.
Args:
start_dir: The directory to recursively search.
top_level_dir: The top level of the package, for importing.
pattern: Unix shell-style pattern for filtering the filenames to import.
Returns:
list of modules.
"""
modules = []
for dir_path, _, filenames in os.walk(start_dir):
for filename in filenames:
# Filter out unwanted filenames.
if filename.startswith('.') or filename.startswith('_'):
continue
if os.path.splitext(filename)[1] != '.py':
continue
if not fnmatch.fnmatch(filename, pattern):
continue
# Find the module.
module_rel_path = os.path.relpath(os.path.join(dir_path, filename),
top_level_dir)
module_name = re.sub(r'[/\\]', '.', os.path.splitext(module_rel_path)[0])
# Import the module.
try:
module = __import__(module_name, fromlist=[True])
except ImportError:
continue
modules.append(module)
return modules
# TODO(dtu): Normalize all discoverable classes to have corresponding module
# and class names, then always index by class name.
@decorators.Cache
def DiscoverClasses(start_dir, top_level_dir, base_class, pattern='*',
index_by_class_name=False):
"""Discover all classes in |start_dir| which subclass |base_class|.
Base classes that contain subclasses are ignored by default.
Args:
start_dir: The directory to recursively search.
top_level_dir: The top level of the package, for importing.
base_class: The base class to search for.
pattern: Unix shell-style pattern for filtering the filenames to import.
index_by_class_name: If True, use class name converted to
lowercase_with_underscores instead of module name in return dict keys.
Returns:
dict of {module_name: class} or {underscored_class_name: class}
"""
modules = DiscoverModules(start_dir, top_level_dir, pattern)
classes = {}
for module in modules:
new_classes = DiscoverClassesInModule(
module, base_class, index_by_class_name)
classes = dict(classes.items() + new_classes.items())
return classes
@decorators.Cache
def DiscoverClassesInModule(module, base_class, index_by_class_name=False):
"""Discover all classes in |module| which subclass |base_class|.
Base classes that contain subclasses are ignored by default.
Args:
module: The module to search.
base_class: The base class to search for.
index_by_class_name: If True, use class name converted to
lowercase_with_underscores instead of module name in return dict keys.
Returns:
dict of {module_name: class} or {underscored_class_name: class}
"""
classes = {}
for _, obj in inspect.getmembers(module):
# Ensure object is a class.
if not inspect.isclass(obj):
continue
# Include only subclasses of base_class.
if not issubclass(obj, base_class):
continue
# Exclude the base_class itself.
if obj is base_class:
continue
# Exclude protected or private classes.
if obj.__name__.startswith('_'):
continue
# Include only the module in which the class is defined.
# If a class is imported by another module, exclude those duplicates.
if obj.__module__ != module.__name__:
continue
if index_by_class_name:
key_name = camel_case.ToUnderscore(obj.__name__)
else:
key_name = module.__name__.split('.')[-1]
classes[key_name] = obj
return classes
_counter = [0]
def _GetUniqueModuleName():
_counter[0] += 1
return "module_" + str(_counter[0])
| bsd-3-clause |
bodylabs/blmath | blmath/geometry/transform/correspondence.py | 1 | 2095 | # FIXME -- move back to core
def apply_correspondence(correspondence_src, correspondence_dst, vertices):
"""
Apply a correspondence defined between two vertex sets to a new set.
Identifies a correspondence between `correspondence_src` and
`correspondence_dst` then applies that correspondence to `vertices`.
That is, `correspondence_src` is to `correspondence_dst` as `vertices` is
to [ return value ].
`correspondence_src` and `vertices` must have the same topology. The return
value will have the same topology as `correspondence_dst`. Arguments can
be passed as `chumpy` or `numpy` arrays.
The most common usecase here is establishing a relationship between an
alignment and a pointcloud or set of landmarks. The pointcloud or landmarks
can then be moved automatically as the alignment is adjusted (e.g. fit to a
different mesh, reposed, etc).
Args:
correspondence_src: The source vertices for the correspondence
correspondence_dst: The destination vertices for the correspondence
vertices: The vertices to map using the defined correspondence
Returns:
the mapped version of `vertices`
Example usage
-------------
>>> transformed_scan_vertices = apply_correspondence(
... correspondence_src=alignment.v,
... correspondence_dst=scan.v,
... vertices=reposed_alignment.v
... )
>>> transformed_scan = Mesh(v=transformed_scan_vertices, vc=scan.vc)
"""
import chumpy as ch
from bodylabs.mesh.landmarking.transformed_lm import TransformedCoeffs
from bodylabs.mesh.landmarking.transformed_lm import TransformedLms
ch_desired = any([
isinstance(correspondence_src, ch.Ch),
isinstance(correspondence_dst, ch.Ch),
isinstance(vertices, ch.Ch),
])
coeffs = TransformedCoeffs(
src_v=correspondence_src, dst_v=correspondence_dst)
transformed_vertices = TransformedLms(
transformed_coeffs=coeffs, src_v=vertices)
return transformed_vertices if ch_desired else transformed_vertices.r
| bsd-2-clause |
seanwestfall/django | django/utils/deconstruct.py | 502 | 2047 | from importlib import import_module
from django.utils.version import get_docs_version
def deconstructible(*args, **kwargs):
"""
Class decorator that allow the decorated class to be serialized
by the migrations subsystem.
Accepts an optional kwarg `path` to specify the import path.
"""
path = kwargs.pop('path', None)
def decorator(klass):
def __new__(cls, *args, **kwargs):
# We capture the arguments to make returning them trivial
obj = super(klass, cls).__new__(cls)
obj._constructor_args = (args, kwargs)
return obj
def deconstruct(obj):
"""
Returns a 3-tuple of class import path, positional arguments,
and keyword arguments.
"""
# Python 2/fallback version
if path:
module_name, _, name = path.rpartition('.')
else:
module_name = obj.__module__
name = obj.__class__.__name__
# Make sure it's actually there and not an inner class
module = import_module(module_name)
if not hasattr(module, name):
raise ValueError(
"Could not find object %s in %s.\n"
"Please note that you cannot serialize things like inner "
"classes. Please move the object into the main module "
"body to use migrations.\n"
"For more information, see "
"https://docs.djangoproject.com/en/%s/topics/migrations/#serializing-values"
% (name, module_name, get_docs_version()))
return (
path or '%s.%s' % (obj.__class__.__module__, name),
obj._constructor_args[0],
obj._constructor_args[1],
)
klass.__new__ = staticmethod(__new__)
klass.deconstruct = deconstruct
return klass
if not args:
return decorator
return decorator(*args, **kwargs)
| bsd-3-clause |
christiantroy/xbmc | tools/EventClients/Clients/PS3 Sixaxis Controller/ps3d.py | 168 | 12019 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright (C) 2008-2013 Team XBMC
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
import sys
import traceback
import time
import struct
import threading
import os
if os.path.exists("../../lib/python"):
sys.path.append("../PS3 BD Remote")
sys.path.append("../../lib/python")
from bt.hid import HID
from bt.bt import bt_lookup_name
from xbmcclient import XBMCClient
from ps3 import sixaxis
from ps3_remote import process_keys as process_remote
try:
from ps3 import sixwatch
except Exception, e:
print "Failed to import sixwatch now disabled: " + str(e)
sixwatch = None
try:
import zeroconf
except:
zeroconf = None
ICON_PATH = "../../icons/"
else:
# fallback to system wide modules
from kodi.bt.hid import HID
from kodi.bt.bt import bt_lookup_name
from kodi.xbmcclient import XBMCClient
from kodi.ps3 import sixaxis
from kodi.ps3_remote import process_keys as process_remote
from kodi.defs import *
try:
from kodi.ps3 import sixwatch
except Exception, e:
print "Failed to import sixwatch now disabled: " + str(e)
sixwatch = None
try:
import kodi.zeroconf as zeroconf
except:
zeroconf = None
event_threads = []
def printerr():
trace = ""
exception = ""
exc_list = traceback.format_exception_only (sys.exc_type, sys.exc_value)
for entry in exc_list:
exception += entry
tb_list = traceback.format_tb(sys.exc_info()[2])
for entry in tb_list:
trace += entry
print("%s\n%s" % (exception, trace), "Script Error")
class StoppableThread ( threading.Thread ):
def __init__(self):
threading.Thread.__init__(self)
self._stop = False
self.set_timeout(0)
def stop_thread(self):
self._stop = True
def stop(self):
return self._stop
def close_sockets(self):
if self.isock:
try:
self.isock.close()
except:
pass
self.isock = None
if self.csock:
try:
self.csock.close()
except:
pass
self.csock = None
self.last_action = 0
def set_timeout(self, seconds):
self.timeout = seconds
def reset_timeout(self):
self.last_action = time.time()
def idle_time(self):
return time.time() - self.last_action
def timed_out(self):
if (time.time() - self.last_action) > self.timeout:
return True
else:
return False
class PS3SixaxisThread ( StoppableThread ):
def __init__(self, csock, isock, ipaddr="127.0.0.1"):
StoppableThread.__init__(self)
self.csock = csock
self.isock = isock
self.xbmc = XBMCClient(name="PS3 Sixaxis", icon_file=ICON_PATH + "/bluetooth.png", ip=ipaddr)
self.set_timeout(600)
def run(self):
six = sixaxis.sixaxis(self.xbmc, self.csock, self.isock)
self.xbmc.connect()
self.reset_timeout()
try:
while not self.stop():
if self.timed_out():
raise Exception("PS3 Sixaxis powering off, timed out")
if self.idle_time() > 50:
self.xbmc.connect()
try:
if six.process_socket(self.isock):
self.reset_timeout()
except Exception, e:
print e
break
except Exception, e:
printerr()
six.close()
self.close_sockets()
class PS3RemoteThread ( StoppableThread ):
def __init__(self, csock, isock, ipaddr="127.0.0.1"):
StoppableThread.__init__(self)
self.csock = csock
self.isock = isock
self.xbmc = XBMCClient(name="PS3 Blu-Ray Remote", icon_file=ICON_PATH + "/bluetooth.png", ip=ipaddr)
self.set_timeout(600)
self.services = []
self.current_xbmc = 0
def run(self):
self.xbmc.connect()
try:
# start the zeroconf thread if possible
try:
self.zeroconf_thread = ZeroconfThread()
self.zeroconf_thread.add_service('_xbmc-events._udp',
self.zeroconf_service_handler)
self.zeroconf_thread.start()
except Exception, e:
print str(e)
# main thread loop
while not self.stop():
status = process_remote(self.isock, self.xbmc)
if status == 2: # 2 = socket read timeout
if self.timed_out():
raise Exception("PS3 Blu-Ray Remote powering off, "\
"timed out")
elif status == 3: # 3 = ps and skip +
self.next_xbmc()
elif status == 4: # 4 = ps and skip -
self.previous_xbmc()
elif not status: # 0 = keys are normally processed
self.reset_timeout()
# process_remote() will raise an exception on read errors
except Exception, e:
print str(e)
self.zeroconf_thread.stop()
self.close_sockets()
def next_xbmc(self):
"""
Connect to the next XBMC instance
"""
self.current_xbmc = (self.current_xbmc + 1) % len( self.services )
self.reconnect()
return
def previous_xbmc(self):
"""
Connect to the previous XBMC instance
"""
self.current_xbmc -= 1
if self.current_xbmc < 0 :
self.current_xbmc = len( self.services ) - 1
self.reconnect()
return
def reconnect(self):
"""
Reconnect to an XBMC instance based on self.current_xbmc
"""
try:
service = self.services[ self.current_xbmc ]
print "Connecting to %s" % service['name']
self.xbmc.connect( service['address'], service['port'] )
self.xbmc.send_notification("PS3 Blu-Ray Remote", "New Connection", None)
except Exception, e:
print str(e)
def zeroconf_service_handler(self, event, service):
"""
Zeroconf event handler
"""
if event == zeroconf.SERVICE_FOUND: # new xbmc service detected
self.services.append( service )
elif event == zeroconf.SERVICE_LOST: # xbmc service lost
try:
# search for the service by name, since IP+port isn't available
for s in self.services:
# nuke it, if found
if service['name'] == s['name']:
self.services.remove(s)
break
except:
pass
return
class SixWatch(threading.Thread):
def __init__(self, mac):
threading.Thread.__init__(self)
self.mac = mac
self.daemon = True
self.start()
def run(self):
while True:
try:
sixwatch.main(self.mac)
except Exception, e:
print "Exception caught in sixwatch, restarting: " + str(e)
class ZeroconfThread ( threading.Thread ):
"""
"""
def __init__(self):
threading.Thread.__init__(self)
self._zbrowser = None
self._services = []
def run(self):
if zeroconf:
# create zeroconf service browser
self._zbrowser = zeroconf.Browser()
# add the requested services
for service in self._services:
self._zbrowser.add_service( service[0], service[1] )
# run the event loop
self._zbrowser.run()
return
def stop(self):
"""
Stop the zeroconf browser
"""
try:
self._zbrowser.stop()
except:
pass
return
def add_service(self, type, handler):
"""
Add a new service to search for.
NOTE: Services must be added before thread starts.
"""
self._services.append( [ type, handler ] )
def usage():
print """
PS3 Sixaxis / Blu-Ray Remote HID Server v0.1
Usage: ps3.py [bdaddress] [XBMC host]
bdaddress => address of local bluetooth device to use (default: auto)
(e.g. aa:bb:cc:dd:ee:ff)
ip address => IP address or hostname of the XBMC instance (default: localhost)
(e.g. 192.168.1.110)
"""
def start_hidd(bdaddr=None, ipaddr="127.0.0.1"):
devices = [ 'PLAYSTATION(R)3 Controller',
'BD Remote Control' ]
hid = HID(bdaddr)
watch = None
if sixwatch:
try:
print "Starting USB sixwatch"
watch = SixWatch(hid.get_local_address())
except Exception, e:
print "Failed to initialize sixwatch" + str(e)
pass
while True:
if hid.listen():
(csock, addr) = hid.get_control_socket()
device_name = bt_lookup_name(addr[0])
if device_name == devices[0]:
# handle PS3 controller
handle_ps3_controller(hid, ipaddr)
elif device_name == devices[1]:
# handle the PS3 remote
handle_ps3_remote(hid, ipaddr)
else:
print "Unknown Device: %s" % (device_name)
def handle_ps3_controller(hid, ipaddr):
print "Received connection from a Sixaxis PS3 Controller"
csock = hid.get_control_socket()[0]
isock = hid.get_interrupt_socket()[0]
sixaxis = PS3SixaxisThread(csock, isock, ipaddr)
add_thread(sixaxis)
sixaxis.start()
return
def handle_ps3_remote(hid, ipaddr):
print "Received connection from a PS3 Blu-Ray Remote"
csock = hid.get_control_socket()[0]
isock = hid.get_interrupt_socket()[0]
isock.settimeout(1)
remote = PS3RemoteThread(csock, isock, ipaddr)
add_thread(remote)
remote.start()
return
def add_thread(thread):
global event_threads
event_threads.append(thread)
def main():
if len(sys.argv)>3:
return usage()
bdaddr = ""
ipaddr = "127.0.0.1"
try:
for addr in sys.argv[1:]:
try:
# ensure that the addr is of the format 'aa:bb:cc:dd:ee:ff'
if "".join([ str(len(a)) for a in addr.split(":") ]) != "222222":
raise Exception("Invalid format")
bdaddr = addr
print "Connecting to Bluetooth device: %s" % bdaddr
except Exception, e:
try:
ipaddr = addr
print "Connecting to : %s" % ipaddr
except:
print str(e)
return usage()
except Exception, e:
pass
print "Starting HID daemon"
start_hidd(bdaddr, ipaddr)
if __name__=="__main__":
try:
main()
finally:
for t in event_threads:
try:
print "Waiting for thread "+str(t)+" to terminate"
t.stop_thread()
if t.isAlive():
t.join()
print "Thread "+str(t)+" terminated"
except Exception, e:
print str(e)
pass
| gpl-2.0 |
shadyueh/pyranking | env/lib/python2.7/site-packages/setuptools/sandbox.py | 259 | 13925 | import os
import sys
import tempfile
import operator
import functools
import itertools
import re
import contextlib
import pickle
import pkg_resources
if sys.platform.startswith('java'):
import org.python.modules.posix.PosixModule as _os
else:
_os = sys.modules[os.name]
try:
_file = file
except NameError:
_file = None
_open = open
from distutils.errors import DistutilsError
from pkg_resources import working_set
from setuptools import compat
from setuptools.compat import builtins
__all__ = [
"AbstractSandbox", "DirectorySandbox", "SandboxViolation", "run_setup",
]
def _execfile(filename, globals, locals=None):
"""
Python 3 implementation of execfile.
"""
mode = 'rb'
with open(filename, mode) as stream:
script = stream.read()
# compile() function in Python 2.6 and 3.1 requires LF line endings.
if sys.version_info[:2] < (2, 7) or sys.version_info[:2] >= (3, 0) and sys.version_info[:2] < (3, 2):
script = script.replace(b'\r\n', b'\n')
script = script.replace(b'\r', b'\n')
if locals is None:
locals = globals
code = compile(script, filename, 'exec')
exec(code, globals, locals)
@contextlib.contextmanager
def save_argv(repl=None):
saved = sys.argv[:]
if repl is not None:
sys.argv[:] = repl
try:
yield saved
finally:
sys.argv[:] = saved
@contextlib.contextmanager
def save_path():
saved = sys.path[:]
try:
yield saved
finally:
sys.path[:] = saved
@contextlib.contextmanager
def override_temp(replacement):
"""
Monkey-patch tempfile.tempdir with replacement, ensuring it exists
"""
if not os.path.isdir(replacement):
os.makedirs(replacement)
saved = tempfile.tempdir
tempfile.tempdir = replacement
try:
yield
finally:
tempfile.tempdir = saved
@contextlib.contextmanager
def pushd(target):
saved = os.getcwd()
os.chdir(target)
try:
yield saved
finally:
os.chdir(saved)
class UnpickleableException(Exception):
"""
An exception representing another Exception that could not be pickled.
"""
@classmethod
def dump(cls, type, exc):
"""
Always return a dumped (pickled) type and exc. If exc can't be pickled,
wrap it in UnpickleableException first.
"""
try:
return pickle.dumps(type), pickle.dumps(exc)
except Exception:
return cls.dump(cls, cls(repr(exc)))
class ExceptionSaver:
"""
A Context Manager that will save an exception, serialized, and restore it
later.
"""
def __enter__(self):
return self
def __exit__(self, type, exc, tb):
if not exc:
return
# dump the exception
self._saved = UnpickleableException.dump(type, exc)
self._tb = tb
# suppress the exception
return True
def resume(self):
"restore and re-raise any exception"
if '_saved' not in vars(self):
return
type, exc = map(pickle.loads, self._saved)
compat.reraise(type, exc, self._tb)
@contextlib.contextmanager
def save_modules():
"""
Context in which imported modules are saved.
Translates exceptions internal to the context into the equivalent exception
outside the context.
"""
saved = sys.modules.copy()
with ExceptionSaver() as saved_exc:
yield saved
sys.modules.update(saved)
# remove any modules imported since
del_modules = (
mod_name for mod_name in sys.modules
if mod_name not in saved
# exclude any encodings modules. See #285
and not mod_name.startswith('encodings.')
)
_clear_modules(del_modules)
saved_exc.resume()
def _clear_modules(module_names):
for mod_name in list(module_names):
del sys.modules[mod_name]
@contextlib.contextmanager
def save_pkg_resources_state():
saved = pkg_resources.__getstate__()
try:
yield saved
finally:
pkg_resources.__setstate__(saved)
@contextlib.contextmanager
def setup_context(setup_dir):
temp_dir = os.path.join(setup_dir, 'temp')
with save_pkg_resources_state():
with save_modules():
hide_setuptools()
with save_path():
with save_argv():
with override_temp(temp_dir):
with pushd(setup_dir):
# ensure setuptools commands are available
__import__('setuptools')
yield
def _needs_hiding(mod_name):
"""
>>> _needs_hiding('setuptools')
True
>>> _needs_hiding('pkg_resources')
True
>>> _needs_hiding('setuptools_plugin')
False
>>> _needs_hiding('setuptools.__init__')
True
>>> _needs_hiding('distutils')
True
"""
pattern = re.compile('(setuptools|pkg_resources|distutils)(\.|$)')
return bool(pattern.match(mod_name))
def hide_setuptools():
"""
Remove references to setuptools' modules from sys.modules to allow the
invocation to import the most appropriate setuptools. This technique is
necessary to avoid issues such as #315 where setuptools upgrading itself
would fail to find a function declared in the metadata.
"""
modules = filter(_needs_hiding, sys.modules)
_clear_modules(modules)
def run_setup(setup_script, args):
"""Run a distutils setup script, sandboxed in its directory"""
setup_dir = os.path.abspath(os.path.dirname(setup_script))
with setup_context(setup_dir):
try:
sys.argv[:] = [setup_script]+list(args)
sys.path.insert(0, setup_dir)
# reset to include setup dir, w/clean callback list
working_set.__init__()
working_set.callbacks.append(lambda dist:dist.activate())
def runner():
ns = dict(__file__=setup_script, __name__='__main__')
_execfile(setup_script, ns)
DirectorySandbox(setup_dir).run(runner)
except SystemExit as v:
if v.args and v.args[0]:
raise
# Normal exit, just return
class AbstractSandbox:
"""Wrap 'os' module and 'open()' builtin for virtualizing setup scripts"""
_active = False
def __init__(self):
self._attrs = [
name for name in dir(_os)
if not name.startswith('_') and hasattr(self,name)
]
def _copy(self, source):
for name in self._attrs:
setattr(os, name, getattr(source,name))
def run(self, func):
"""Run 'func' under os sandboxing"""
try:
self._copy(self)
if _file:
builtins.file = self._file
builtins.open = self._open
self._active = True
return func()
finally:
self._active = False
if _file:
builtins.file = _file
builtins.open = _open
self._copy(_os)
def _mk_dual_path_wrapper(name):
original = getattr(_os,name)
def wrap(self,src,dst,*args,**kw):
if self._active:
src,dst = self._remap_pair(name,src,dst,*args,**kw)
return original(src,dst,*args,**kw)
return wrap
for name in ["rename", "link", "symlink"]:
if hasattr(_os,name): locals()[name] = _mk_dual_path_wrapper(name)
def _mk_single_path_wrapper(name, original=None):
original = original or getattr(_os,name)
def wrap(self,path,*args,**kw):
if self._active:
path = self._remap_input(name,path,*args,**kw)
return original(path,*args,**kw)
return wrap
if _file:
_file = _mk_single_path_wrapper('file', _file)
_open = _mk_single_path_wrapper('open', _open)
for name in [
"stat", "listdir", "chdir", "open", "chmod", "chown", "mkdir",
"remove", "unlink", "rmdir", "utime", "lchown", "chroot", "lstat",
"startfile", "mkfifo", "mknod", "pathconf", "access"
]:
if hasattr(_os,name): locals()[name] = _mk_single_path_wrapper(name)
def _mk_single_with_return(name):
original = getattr(_os,name)
def wrap(self,path,*args,**kw):
if self._active:
path = self._remap_input(name,path,*args,**kw)
return self._remap_output(name, original(path,*args,**kw))
return original(path,*args,**kw)
return wrap
for name in ['readlink', 'tempnam']:
if hasattr(_os,name): locals()[name] = _mk_single_with_return(name)
def _mk_query(name):
original = getattr(_os,name)
def wrap(self,*args,**kw):
retval = original(*args,**kw)
if self._active:
return self._remap_output(name, retval)
return retval
return wrap
for name in ['getcwd', 'tmpnam']:
if hasattr(_os,name): locals()[name] = _mk_query(name)
def _validate_path(self,path):
"""Called to remap or validate any path, whether input or output"""
return path
def _remap_input(self,operation,path,*args,**kw):
"""Called for path inputs"""
return self._validate_path(path)
def _remap_output(self,operation,path):
"""Called for path outputs"""
return self._validate_path(path)
def _remap_pair(self,operation,src,dst,*args,**kw):
"""Called for path pairs like rename, link, and symlink operations"""
return (
self._remap_input(operation+'-from',src,*args,**kw),
self._remap_input(operation+'-to',dst,*args,**kw)
)
if hasattr(os, 'devnull'):
_EXCEPTIONS = [os.devnull,]
else:
_EXCEPTIONS = []
try:
from win32com.client.gencache import GetGeneratePath
_EXCEPTIONS.append(GetGeneratePath())
del GetGeneratePath
except ImportError:
# it appears pywin32 is not installed, so no need to exclude.
pass
class DirectorySandbox(AbstractSandbox):
"""Restrict operations to a single subdirectory - pseudo-chroot"""
write_ops = dict.fromkeys([
"open", "chmod", "chown", "mkdir", "remove", "unlink", "rmdir",
"utime", "lchown", "chroot", "mkfifo", "mknod", "tempnam",
])
_exception_patterns = [
# Allow lib2to3 to attempt to save a pickled grammar object (#121)
'.*lib2to3.*\.pickle$',
]
"exempt writing to paths that match the pattern"
def __init__(self, sandbox, exceptions=_EXCEPTIONS):
self._sandbox = os.path.normcase(os.path.realpath(sandbox))
self._prefix = os.path.join(self._sandbox,'')
self._exceptions = [
os.path.normcase(os.path.realpath(path))
for path in exceptions
]
AbstractSandbox.__init__(self)
def _violation(self, operation, *args, **kw):
raise SandboxViolation(operation, args, kw)
if _file:
def _file(self, path, mode='r', *args, **kw):
if mode not in ('r', 'rt', 'rb', 'rU', 'U') and not self._ok(path):
self._violation("file", path, mode, *args, **kw)
return _file(path,mode,*args,**kw)
def _open(self, path, mode='r', *args, **kw):
if mode not in ('r', 'rt', 'rb', 'rU', 'U') and not self._ok(path):
self._violation("open", path, mode, *args, **kw)
return _open(path,mode,*args,**kw)
def tmpnam(self):
self._violation("tmpnam")
def _ok(self, path):
active = self._active
try:
self._active = False
realpath = os.path.normcase(os.path.realpath(path))
return (
self._exempted(realpath)
or realpath == self._sandbox
or realpath.startswith(self._prefix)
)
finally:
self._active = active
def _exempted(self, filepath):
start_matches = (
filepath.startswith(exception)
for exception in self._exceptions
)
pattern_matches = (
re.match(pattern, filepath)
for pattern in self._exception_patterns
)
candidates = itertools.chain(start_matches, pattern_matches)
return any(candidates)
def _remap_input(self, operation, path, *args, **kw):
"""Called for path inputs"""
if operation in self.write_ops and not self._ok(path):
self._violation(operation, os.path.realpath(path), *args, **kw)
return path
def _remap_pair(self, operation, src, dst, *args, **kw):
"""Called for path pairs like rename, link, and symlink operations"""
if not self._ok(src) or not self._ok(dst):
self._violation(operation, src, dst, *args, **kw)
return (src,dst)
def open(self, file, flags, mode=0o777, *args, **kw):
"""Called for low-level os.open()"""
if flags & WRITE_FLAGS and not self._ok(file):
self._violation("os.open", file, flags, mode, *args, **kw)
return _os.open(file,flags,mode, *args, **kw)
WRITE_FLAGS = functools.reduce(
operator.or_, [getattr(_os, a, 0) for a in
"O_WRONLY O_RDWR O_APPEND O_CREAT O_TRUNC O_TEMPORARY".split()]
)
class SandboxViolation(DistutilsError):
"""A setup script attempted to modify the filesystem outside the sandbox"""
def __str__(self):
return """SandboxViolation: %s%r %s
The package setup script has attempted to modify files on your system
that are not within the EasyInstall build area, and has been aborted.
This package cannot be safely installed by EasyInstall, and may not
support alternate installation locations even if you run its setup
script by hand. Please inform the package's author and the EasyInstall
maintainers to find out if a fix or workaround is available.""" % self.args
#
| mit |
flavour/helios | controllers/org.py | 3 | 5103 | # -*- coding: utf-8 -*-
"""
Organization Registry - Controllers
@author: Fran Boon
@author: Michael Howden
"""
module = request.controller
resourcename = request.function
if not deployment_settings.has_module(module):
raise HTTP(404, body="Module disabled: %s" % module)
# Options Menu (available in all Functions" Views)
s3_menu(module)
# =============================================================================
def index():
""" Module's Home Page """
module_name = deployment_settings.modules[module].name_nice
response.title = module_name
return dict(module_name=module_name)
# =============================================================================
def sector():
""" RESTful CRUD controller """
#tablename = "%s_%s" % (module, resourcename)
#table = db[tablename]
return s3_rest_controller(module, resourcename)
# -----------------------------------------------------------------------------
def subsector():
""" RESTful CRUD controller """
#tablename = "%s_%s" % (module, resourcename)
#table = db[tablename]
return s3_rest_controller(module, resourcename)
# =============================================================================
def site():
""" RESTful CRUD controller """
return s3_rest_controller(module, resourcename)
# -----------------------------------------------------------------------------
def site_org_json():
table = db.org_site
otable = db.org_organisation
response.headers["Content-Type"] = "application/json"
#db.req_commit.date.represent = lambda dt: dt[:10]
query = (table.site_id == request.args[0]) & \
(table.organisation_id == otable.id)
records = db(query).select(otable.id,
otable.name)
return records.json()
# =============================================================================
def organisation():
""" RESTful CRUD controller """
# Defined in the Model for use from Multiple Controllers for unified menus
#return response.s3.organisation_controller()
return organisation_controller()
# -----------------------------------------------------------------------------
def organisation_list_represent(l):
if l:
max = 4
if len(l) > max:
count = 1
for x in l:
if count == 1:
output = organisation_represent(x)
elif count > max:
return "%s, etc" % output
else:
output = "%s, %s" % (output, organisation_represent(x))
count += 1
else:
return ", ".join([organisation_represent(x) for x in l])
else:
return NONE
# =============================================================================
def office():
""" RESTful CRUD controller """
# Defined in the Model for use from Multiple Controllers for unified menus
#return response.s3.office_controller()
return office_controller()
# =============================================================================
def person():
""" Person controller for AddPersonWidget """
def prep(r):
if r.representation != "s3json":
# Do not serve other representations here
return False
else:
s3mgr.show_ids = True
return True
response.s3.prep = prep
return s3_rest_controller("pr", "person")
# =============================================================================
def room():
""" RESTful CRUD controller """
return s3_rest_controller(module, resourcename)
# =============================================================================
def incoming():
""" Incoming Shipments """
s3mgr.load("inv_inv_item")
return response.s3.inv_incoming()
# =============================================================================
def req_match():
""" Match Requests """
s3mgr.load("req_req")
return response.s3.req_match()
# =============================================================================
def donor():
""" RESTful CRUD controller """
tablename = "org_donor"
table = db[tablename]
tablename = "org_donor"
s3.crud_strings[tablename] = Storage(
title_create = ADD_DONOR,
title_display = T("Donor Details"),
title_list = T("Donors Report"),
title_update = T("Edit Donor"),
title_search = T("Search Donors"),
subtitle_create = T("Add New Donor"),
subtitle_list = T("Donors"),
label_list_button = T("List Donors"),
label_create_button = ADD_DONOR,
label_delete_button = T("Delete Donor"),
msg_record_created = T("Donor added"),
msg_record_modified = T("Donor updated"),
msg_record_deleted = T("Donor deleted"),
msg_list_empty = T("No Donors currently registered"))
s3mgr.configure(tablename, listadd=False)
output = s3_rest_controller(module, resourcename)
return output
# END =========================================================================
| mit |
Ashaba/rms | rmslocalenv/lib/python2.7/site-packages/pip/_vendor/requests/packages/urllib3/packages/ssl_match_hostname/_implementation.py | 2360 | 3778 | """The match_hostname() function from Python 3.3.3, essential when using SSL."""
# Note: This file is under the PSF license as the code comes from the python
# stdlib. http://docs.python.org/3/license.html
import re
__version__ = '3.4.0.2'
class CertificateError(ValueError):
pass
def _dnsname_match(dn, hostname, max_wildcards=1):
"""Matching according to RFC 6125, section 6.4.3
http://tools.ietf.org/html/rfc6125#section-6.4.3
"""
pats = []
if not dn:
return False
# Ported from python3-syntax:
# leftmost, *remainder = dn.split(r'.')
parts = dn.split(r'.')
leftmost = parts[0]
remainder = parts[1:]
wildcards = leftmost.count('*')
if wildcards > max_wildcards:
# Issue #17980: avoid denials of service by refusing more
# than one wildcard per fragment. A survey of established
# policy among SSL implementations showed it to be a
# reasonable choice.
raise CertificateError(
"too many wildcards in certificate DNS name: " + repr(dn))
# speed up common case w/o wildcards
if not wildcards:
return dn.lower() == hostname.lower()
# RFC 6125, section 6.4.3, subitem 1.
# The client SHOULD NOT attempt to match a presented identifier in which
# the wildcard character comprises a label other than the left-most label.
if leftmost == '*':
# When '*' is a fragment by itself, it matches a non-empty dotless
# fragment.
pats.append('[^.]+')
elif leftmost.startswith('xn--') or hostname.startswith('xn--'):
# RFC 6125, section 6.4.3, subitem 3.
# The client SHOULD NOT attempt to match a presented identifier
# where the wildcard character is embedded within an A-label or
# U-label of an internationalized domain name.
pats.append(re.escape(leftmost))
else:
# Otherwise, '*' matches any dotless string, e.g. www*
pats.append(re.escape(leftmost).replace(r'\*', '[^.]*'))
# add the remaining fragments, ignore any wildcards
for frag in remainder:
pats.append(re.escape(frag))
pat = re.compile(r'\A' + r'\.'.join(pats) + r'\Z', re.IGNORECASE)
return pat.match(hostname)
def match_hostname(cert, hostname):
"""Verify that *cert* (in decoded format as returned by
SSLSocket.getpeercert()) matches the *hostname*. RFC 2818 and RFC 6125
rules are followed, but IP addresses are not accepted for *hostname*.
CertificateError is raised on failure. On success, the function
returns nothing.
"""
if not cert:
raise ValueError("empty or no certificate")
dnsnames = []
san = cert.get('subjectAltName', ())
for key, value in san:
if key == 'DNS':
if _dnsname_match(value, hostname):
return
dnsnames.append(value)
if not dnsnames:
# The subject is only checked when there is no dNSName entry
# in subjectAltName
for sub in cert.get('subject', ()):
for key, value in sub:
# XXX according to RFC 2818, the most specific Common Name
# must be used.
if key == 'commonName':
if _dnsname_match(value, hostname):
return
dnsnames.append(value)
if len(dnsnames) > 1:
raise CertificateError("hostname %r "
"doesn't match either of %s"
% (hostname, ', '.join(map(repr, dnsnames))))
elif len(dnsnames) == 1:
raise CertificateError("hostname %r "
"doesn't match %r"
% (hostname, dnsnames[0]))
else:
raise CertificateError("no appropriate commonName or "
"subjectAltName fields were found")
| mit |
gerrit-review/gerrit | tools/js/bowerutil.py | 1 | 1488 | # Copyright (C) 2013 The Android Open Source Project
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
def hash_bower_component(hash_obj, path):
"""Hash the contents of a bower component directory.
This is a stable hash of a directory downloaded with `bower install`, minus
the .bower.json file, which is autogenerated each time by bower. Used in lieu
of hashing a zipfile of the contents, since zipfiles are difficult to hash in
a stable manner.
Args:
hash_obj: an open hash object, e.g. hashlib.sha1().
path: path to the directory to hash.
Returns:
The passed-in hash_obj.
"""
if not os.path.isdir(path):
raise ValueError('Not a directory: %s' % path)
path = os.path.abspath(path)
for root, dirs, files in os.walk(path):
dirs.sort()
for f in sorted(files):
if f == '.bower.json':
continue
p = os.path.join(root, f)
hash_obj.update(p[len(path)+1:])
hash_obj.update(open(p).read())
return hash_obj
| apache-2.0 |
felixfontein/ansible | test/support/network-integration/collections/ansible_collections/ansible/netcommon/plugins/module_utils/network/restconf/restconf.py | 47 | 2617 | # This code is part of Ansible, but is an independent component.
# This particular file snippet, and this file snippet only, is BSD licensed.
# Modules you write using this snippet, which is embedded dynamically by Ansible
# still belong to the author of the module, and may assign their own license
# to the complete work.
#
# (c) 2018 Red Hat Inc.
#
# Redistribution and use in source and binary forms, with or without modification,
# are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
# IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
# USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
from ansible.module_utils.connection import Connection
def get(module, path=None, content=None, fields=None, output="json"):
if path is None:
raise ValueError("path value must be provided")
if content:
path += "?" + "content=%s" % content
if fields:
path += "?" + "field=%s" % fields
accept = None
if output == "xml":
accept = "application/yang-data+xml"
connection = Connection(module._socket_path)
return connection.send_request(
None, path=path, method="GET", accept=accept
)
def edit_config(module, path=None, content=None, method="GET", format="json"):
if path is None:
raise ValueError("path value must be provided")
content_type = None
if format == "xml":
content_type = "application/yang-data+xml"
connection = Connection(module._socket_path)
return connection.send_request(
content, path=path, method=method, content_type=content_type
)
| gpl-3.0 |
yugang/web-testing-service | wts/tests/csp/csp_sandbox_empty_int-manual.py | 30 | 2669 | def main(request, response):
import simplejson as json
f = file('config.json')
source = f.read()
s = json.JSONDecoder().decode(source)
url1 = "http://" + s['host'] + ":" + str(s['ports']['http'][1])
url2 = "http://" + s['host'] + ":" + str(s['ports']['http'][0])
_CSP = "sandbox "
response.headers.set("Content-Security-Policy", _CSP)
response.headers.set("X-Content-Security-Policy", _CSP)
response.headers.set("X-WebKit-CSP", _CSP)
return """<!DOCTYPE html>
<!--
Copyright (c) 2013 Intel Corporation.
Redistribution and use in source and binary forms, with or without modification,
are permitted provided that the following conditions are met:
* Redistributions of works must retain the original copyright notice, this list
of conditions and the following disclaimer.sandbox
* Redistributions in binary form must reproduce the original copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
* Neither the name of Intel Corporation nor the names of its contributors
may be used to endorse or promote products derived from this work without
specific prior written permission.
THIS SOFTWARE IS PROVIDED BY INTEL CORPORATION "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
ARE DISCLAIMED. IN NO EVENT SHALL INTEL CORPORATION BE LIABLE FOR ANY DIRECT,
INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
Authors:
Hao, Yunfei <[email protected]>
-->
<html>
<head>
<title>CSP Test: csp_sandbox_empty_int</title>
<link rel="author" title="Intel" href="http://www.intel.com"/>
<link rel="help" href="http://www.w3.org/TR/2012/CR-CSP-20121115/#sandbox-optional"/>
<meta name="flags" content=""/>
<meta name="assert" content="sandbox allow-scripts"/>
<meta charset="utf-8"/>
</head>
<body>
<p>Test passes if there is <strong>no</strong> text "FAIL" below.</p>
<div id="test" style="display:red"></div>
<script src="support/csp.js"></script>
<script>
if (X) {
document.getElementById("test").innerHTML = "FAIL";
}
</script>
</body>
</html> """
| bsd-3-clause |
sharma1nitish/phantomjs | src/qt/qtwebkit/Tools/Scripts/webkitpy/thirdparty/mod_pywebsocket/headerparserhandler.py | 638 | 9836 | # Copyright 2011, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""PythonHeaderParserHandler for mod_pywebsocket.
Apache HTTP Server and mod_python must be configured such that this
function is called to handle WebSocket request.
"""
import logging
from mod_python import apache
from mod_pywebsocket import common
from mod_pywebsocket import dispatch
from mod_pywebsocket import handshake
from mod_pywebsocket import util
# PythonOption to specify the handler root directory.
_PYOPT_HANDLER_ROOT = 'mod_pywebsocket.handler_root'
# PythonOption to specify the handler scan directory.
# This must be a directory under the root directory.
# The default is the root directory.
_PYOPT_HANDLER_SCAN = 'mod_pywebsocket.handler_scan'
# PythonOption to allow handlers whose canonical path is
# not under the root directory. It's disallowed by default.
# Set this option with value of 'yes' to allow.
_PYOPT_ALLOW_HANDLERS_OUTSIDE_ROOT = (
'mod_pywebsocket.allow_handlers_outside_root_dir')
# Map from values to their meanings. 'Yes' and 'No' are allowed just for
# compatibility.
_PYOPT_ALLOW_HANDLERS_OUTSIDE_ROOT_DEFINITION = {
'off': False, 'no': False, 'on': True, 'yes': True}
# (Obsolete option. Ignored.)
# PythonOption to specify to allow handshake defined in Hixie 75 version
# protocol. The default is None (Off)
_PYOPT_ALLOW_DRAFT75 = 'mod_pywebsocket.allow_draft75'
# Map from values to their meanings.
_PYOPT_ALLOW_DRAFT75_DEFINITION = {'off': False, 'on': True}
class ApacheLogHandler(logging.Handler):
"""Wrapper logging.Handler to emit log message to apache's error.log."""
_LEVELS = {
logging.DEBUG: apache.APLOG_DEBUG,
logging.INFO: apache.APLOG_INFO,
logging.WARNING: apache.APLOG_WARNING,
logging.ERROR: apache.APLOG_ERR,
logging.CRITICAL: apache.APLOG_CRIT,
}
def __init__(self, request=None):
logging.Handler.__init__(self)
self._log_error = apache.log_error
if request is not None:
self._log_error = request.log_error
# Time and level will be printed by Apache.
self._formatter = logging.Formatter('%(name)s: %(message)s')
def emit(self, record):
apache_level = apache.APLOG_DEBUG
if record.levelno in ApacheLogHandler._LEVELS:
apache_level = ApacheLogHandler._LEVELS[record.levelno]
msg = self._formatter.format(record)
# "server" parameter must be passed to have "level" parameter work.
# If only "level" parameter is passed, nothing shows up on Apache's
# log. However, at this point, we cannot get the server object of the
# virtual host which will process WebSocket requests. The only server
# object we can get here is apache.main_server. But Wherever (server
# configuration context or virtual host context) we put
# PythonHeaderParserHandler directive, apache.main_server just points
# the main server instance (not any of virtual server instance). Then,
# Apache follows LogLevel directive in the server configuration context
# to filter logs. So, we need to specify LogLevel in the server
# configuration context. Even if we specify "LogLevel debug" in the
# virtual host context which actually handles WebSocket connections,
# DEBUG level logs never show up unless "LogLevel debug" is specified
# in the server configuration context.
#
# TODO(tyoshino): Provide logging methods on request object. When
# request is mp_request object (when used together with Apache), the
# methods call request.log_error indirectly. When request is
# _StandaloneRequest, the methods call Python's logging facility which
# we create in standalone.py.
self._log_error(msg, apache_level, apache.main_server)
def _configure_logging():
logger = logging.getLogger()
# Logs are filtered by Apache based on LogLevel directive in Apache
# configuration file. We must just pass logs for all levels to
# ApacheLogHandler.
logger.setLevel(logging.DEBUG)
logger.addHandler(ApacheLogHandler())
_configure_logging()
_LOGGER = logging.getLogger(__name__)
def _parse_option(name, value, definition):
if value is None:
return False
meaning = definition.get(value.lower())
if meaning is None:
raise Exception('Invalid value for PythonOption %s: %r' %
(name, value))
return meaning
def _create_dispatcher():
_LOGGER.info('Initializing Dispatcher')
options = apache.main_server.get_options()
handler_root = options.get(_PYOPT_HANDLER_ROOT, None)
if not handler_root:
raise Exception('PythonOption %s is not defined' % _PYOPT_HANDLER_ROOT,
apache.APLOG_ERR)
handler_scan = options.get(_PYOPT_HANDLER_SCAN, handler_root)
allow_handlers_outside_root = _parse_option(
_PYOPT_ALLOW_HANDLERS_OUTSIDE_ROOT,
options.get(_PYOPT_ALLOW_HANDLERS_OUTSIDE_ROOT),
_PYOPT_ALLOW_HANDLERS_OUTSIDE_ROOT_DEFINITION)
dispatcher = dispatch.Dispatcher(
handler_root, handler_scan, allow_handlers_outside_root)
for warning in dispatcher.source_warnings():
apache.log_error(
'mod_pywebsocket: Warning in source loading: %s' % warning,
apache.APLOG_WARNING)
return dispatcher
# Initialize
_dispatcher = _create_dispatcher()
def headerparserhandler(request):
"""Handle request.
Args:
request: mod_python request.
This function is named headerparserhandler because it is the default
name for a PythonHeaderParserHandler.
"""
handshake_is_done = False
try:
# Fallback to default http handler for request paths for which
# we don't have request handlers.
if not _dispatcher.get_handler_suite(request.uri):
request.log_error(
'mod_pywebsocket: No handler for resource: %r' % request.uri,
apache.APLOG_INFO)
request.log_error(
'mod_pywebsocket: Fallback to Apache', apache.APLOG_INFO)
return apache.DECLINED
except dispatch.DispatchException, e:
request.log_error(
'mod_pywebsocket: Dispatch failed for error: %s' % e,
apache.APLOG_INFO)
if not handshake_is_done:
return e.status
try:
allow_draft75 = _parse_option(
_PYOPT_ALLOW_DRAFT75,
apache.main_server.get_options().get(_PYOPT_ALLOW_DRAFT75),
_PYOPT_ALLOW_DRAFT75_DEFINITION)
try:
handshake.do_handshake(
request, _dispatcher, allowDraft75=allow_draft75)
except handshake.VersionException, e:
request.log_error(
'mod_pywebsocket: Handshake failed for version error: %s' % e,
apache.APLOG_INFO)
request.err_headers_out.add(common.SEC_WEBSOCKET_VERSION_HEADER,
e.supported_versions)
return apache.HTTP_BAD_REQUEST
except handshake.HandshakeException, e:
# Handshake for ws/wss failed.
# Send http response with error status.
request.log_error(
'mod_pywebsocket: Handshake failed for error: %s' % e,
apache.APLOG_INFO)
return e.status
handshake_is_done = True
request._dispatcher = _dispatcher
_dispatcher.transfer_data(request)
except handshake.AbortedByUserException, e:
request.log_error('mod_pywebsocket: Aborted: %s' % e, apache.APLOG_INFO)
except Exception, e:
# DispatchException can also be thrown if something is wrong in
# pywebsocket code. It's caught here, then.
request.log_error('mod_pywebsocket: Exception occurred: %s\n%s' %
(e, util.get_stack_trace()),
apache.APLOG_ERR)
# Unknown exceptions before handshake mean Apache must handle its
# request with another handler.
if not handshake_is_done:
return apache.DECLINED
# Set assbackwards to suppress response header generation by Apache.
request.assbackwards = 1
return apache.DONE # Return DONE such that no other handlers are invoked.
# vi:sts=4 sw=4 et
| bsd-3-clause |
reminisce/mxnet | benchmark/opperf/utils/op_registry_utils.py | 2 | 13293 | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Utilities to interact with MXNet operator registry."""
from operator import itemgetter
from mxnet import runtime
import mxnet as mx
from benchmark.opperf.rules.default_params import DEFAULTS_INPUTS, MX_OP_MODULE
# Operators where parameter have special criteria that cannot be cleanly automated.
# Example: sample_multinomial operator has a parameter 'data'. It expects values to sum up to 1.
unique_ops = ("sample_multinomial",)
def _select_ops(operator_names, filters=("_contrib", "_"), merge_op_forward_backward=True):
"""From a given list of operators, filter out all operator names starting with given filters and prepares
a dictionary of operator with attributes - 'has_backward' and 'nd_op_handle = mxnet.ndarray.op'
By default, merge forward and backward operators for a given op into one operator and sets the attribute
'has_backward' for the operator.
By default, filter out all Contrib operators that starts with '_contrib' and internal operators that
starts with '_'.
Note - All deprecated operators are filtered out as well.
Parameters
----------
operator_names: List[str]
List of operator names.
filters: Tuple(str)
Tuple of filters to apply on operator names.
merge_op_forward_backward: Boolean, Default - True
Merge forward and backward operators for a given op in to one op.
Returns
-------
{"operator_name": {"has_backward", "nd_op_handle"}}
"""
mx_operators = {}
operators_with_backward = []
# Filter out deprecated operators
filters += ("normal", "uniform", "BatchNorm_v1", "Flatten", "contrib_CTCLoss", "Pad", "Cast",
"Pooling_v1", "Concat", "Reshape", "Convolution_v1", "SliceChannel", "Crop",
"crop", "onehot_encode")
if merge_op_forward_backward:
filters += ("_backward",)
for cur_op_name in operator_names:
if not cur_op_name.startswith(filters):
mx_operators[cur_op_name] = {"has_backward": False,
"nd_op_handle": getattr(MX_OP_MODULE, cur_op_name)}
if cur_op_name.startswith("_backward_"):
operators_with_backward.append(cur_op_name)
if merge_op_forward_backward:
# Identify all operators that can run backward.
for op_with_backward in operators_with_backward:
op_name = op_with_backward.split("_backward_")[1]
if op_name in mx_operators:
mx_operators[op_name]["has_backward"] = True
return mx_operators
def _set_op_arguments(mx_operators):
"""Fetch and set operator arguments - nargs, arg_names, arg_types
"""
for op_name in mx_operators:
operator_arguments = mx.operator.get_operator_arguments(op_name)
mx_operators[op_name]["params"] = {"narg": operator_arguments.narg,
"arg_names": operator_arguments.names,
"arg_types": operator_arguments.types}
def _get_all_mxnet_operators():
# Step 1 - Get all registered op names and filter it
operator_names = mx.operator.get_all_registered_operators()
mx_operators = _select_ops(operator_names)
# Step 2 - Get all parameters for the operators
_set_op_arguments(mx_operators)
return mx_operators
def prepare_op_inputs(arg_params, arg_values):
inputs = []
for arg_value in arg_values:
inp = {}
for arg_name in arg_params["params"]["arg_names"]:
if arg_name in arg_value:
inp[arg_name] = arg_value[arg_name]
inputs.append(inp)
return inputs
def prepare_op_inputs(op, arg_params):
inputs = []
# 4d tensor is needed only by following two ops
ops_4d = ['depth_to_space','space_to_depth']
# Prepare op to default input mapping
arg_values = {}
for arg_name, arg_type in zip(arg_params["params"]["arg_names"],
arg_params["params"]["arg_types"]):
if "NDArray" in arg_type and arg_name + "_nd" in DEFAULTS_INPUTS:
arg_values[arg_name] = DEFAULTS_INPUTS[arg_name + "_nd"]
elif "NDArray" in arg_type and op in ops_4d and arg_name + "_4d" in DEFAULTS_INPUTS:
arg_values[arg_name] = DEFAULTS_INPUTS[arg_name + "_4d"]
elif arg_name in DEFAULTS_INPUTS:
arg_values[arg_name] = DEFAULTS_INPUTS[arg_name]
elif "float" in arg_type and arg_name + "_float" in DEFAULTS_INPUTS:
arg_values[arg_name] = DEFAULTS_INPUTS[arg_name + "_float"]
elif "Shape" in arg_type and arg_name + "_shape" in DEFAULTS_INPUTS:
# This is for cases where in some ops 'axis' is Int in some ops a shape tuple.
# Ex: axis in sum is shape, axis in sort is int.
arg_values[arg_name] = DEFAULTS_INPUTS[arg_name + "_shape"]
# Number of different inputs we want to use to test
# the operator
num_input_combinations = max([len(value) for value in arg_values.values()])
# Prepare key/value args for param to input value
for idx in range(num_input_combinations):
inp = {}
for arg_name in arg_params["params"]["arg_names"]:
if arg_name in arg_values:
if len(arg_values[arg_name]) == num_input_combinations:
inp[arg_name] = arg_values[arg_name][idx]
else:
# This is required when we want to use a param same across all
# input combination. Example: keeping low and high same for random sampling
# operator for all different types of Tensor shape.
inp[arg_name] = arg_values[arg_name][0]
inputs.append(inp)
return inputs
def get_all_unary_operators():
"""Gets all Unary operators registered with MXNet.
Returns
-------
{"operator_name": {"has_backward", "nd_op_handle", "params"}}
"""
# Get all mxnet operators
mx_operators = _get_all_mxnet_operators()
# Filter for unary broadcast operators
unary_broadcast_mx_operators = {}
for op_name, op_params in mx_operators.items():
if op_params["params"]["narg"] == 1 and \
"data" in op_params["params"]["arg_names"]:
unary_broadcast_mx_operators[op_name] = mx_operators[op_name]
return unary_broadcast_mx_operators
def get_all_broadcast_binary_operators():
"""Gets all binary broadcast operators registered with MXNet.
Returns
-------
{"operator_name": {"has_backward", "nd_op_handle", "params"}}
"""
# Get all mxnet operators
mx_operators = _get_all_mxnet_operators()
# Filter for binary broadcast operators
binary_broadcast_mx_operators = {}
for op_name, op_params in mx_operators.items():
if op_name.startswith("broadcast_") and op_params["params"]["narg"] == 2 and \
"lhs" in op_params["params"]["arg_names"] and \
"rhs" in op_params["params"]["arg_names"]:
binary_broadcast_mx_operators[op_name] = mx_operators[op_name]
return binary_broadcast_mx_operators
def get_all_elemen_wise_binary_operators():
"""Gets all binary elemen_wise operators registered with MXNet.
Returns
-------
{"operator_name": {"has_backward", "nd_op_handle", "params"}}
"""
# Get all mxnet operators
mx_operators = _get_all_mxnet_operators()
# Filter for binary elemen_wise operators
binary_elemen_wise_mx_operators = {}
for op_name, op_params in mx_operators.items():
if op_name.startswith("elemwise_") and op_params["params"]["narg"] == 2 and \
"lhs" in op_params["params"]["arg_names"] and \
"rhs" in op_params["params"]["arg_names"]:
binary_elemen_wise_mx_operators[op_name] = mx_operators[op_name]
return binary_elemen_wise_mx_operators
def get_all_random_sampling_operators():
"""Gets all Random Sampling operators registered with MXNet.
Returns
-------
{"operator_name": {"has_backward", "nd_op_handle", "params"}}
"""
# Get all mxnet operators
mx_operators = _get_all_mxnet_operators()
# Filter for Random Sampling operators
random_sampling_mx_operators = {}
for op_name, op_params in mx_operators.items():
if op_name.startswith(("random_", "sample_")) and op_name not in unique_ops:
random_sampling_mx_operators[op_name] = mx_operators[op_name]
return random_sampling_mx_operators
def get_all_reduction_operators():
"""Gets all Reduction operators registered with MXNet.
Returns
-------
{"operator_name": {"has_backward", "nd_op_handle", "params"}}
"""
# Get all mxnet operators
mx_operators = _get_all_mxnet_operators()
# Filter for Reduction operators
reduction_mx_operators = {}
for op_name, op_params in mx_operators.items():
if op_params["params"]["narg"] == 4 and \
set(["data", "axis", "exclude", "keepdims"]).issubset(set(op_params["params"]["arg_names"])) \
and op_name not in unique_ops:
reduction_mx_operators[op_name] = mx_operators[op_name]
return reduction_mx_operators
def get_all_optimizer_operators():
"""Gets all Optimizer operators registered with MXNet.
Returns
-------
{"operator_name": {"has_backward", "nd_op_handle", "params"}}
"""
optimizer_ops = ['mp_sgd_update', 'signum_update', 'rmspropalex_update', 'ftml_update', 'rmsprop_update',
'sgd_mom_update', 'signsgd_update', 'mp_sgd_mom_update', 'ftrl_update', 'sgd_update',
'adam_update']
# Get all mxnet operators
mx_operators = _get_all_mxnet_operators()
# Filter for Optimizer operators
optimizer_mx_operators = {}
for op_name, op_params in mx_operators.items():
if op_name in optimizer_ops and op_name not in unique_ops:
optimizer_mx_operators[op_name] = mx_operators[op_name]
return optimizer_mx_operators
def get_all_sorting_searching_operators():
"""Gets all Sorting and Searching operators registered with MXNet.
Returns
-------
{"operator_name": {"has_backward", "nd_op_handle", "params"}}
"""
sort_search_ops = ['sort', 'argsort', 'argmax', 'argmin', 'topk']
# Get all mxnet operators
mx_operators = _get_all_mxnet_operators()
# Filter for Sort and search operators
sort_search_mx_operators = {}
for op_name, op_params in mx_operators.items():
if op_name in sort_search_ops and op_name not in unique_ops:
sort_search_mx_operators[op_name] = mx_operators[op_name]
return sort_search_mx_operators
def get_all_rearrange_operators():
"""Gets all array rearrange operators registered with MXNet.
Returns
-------
{"operator_name": {"has_backward", "nd_op_handle", "params"}}
"""
rearrange_ops = ['transpose','swapaxes','flip','depth_to_space','space_to_depth']
# Get all mxnet operators
mx_operators = _get_all_mxnet_operators()
# Filter for Array Rearrange operators
rearrange_mx_operators = {}
for op_name, op_params in mx_operators.items():
if op_name in rearrange_ops and op_name not in unique_ops:
rearrange_mx_operators[op_name] = mx_operators[op_name]
return rearrange_mx_operators
def get_operators_with_no_benchmark(operators_with_benchmark):
"""Gets all MXNet operators with not benchmark.
Retrieve all operators registered with MXNet and prepares a list of operators that are not part of given
operators with benchmark list.
Parameters
----------
operators_with_benchmark: list[Str]
List of operator names that has benchmarks
Returns
-------
list[Str]
List of operator names that is registered with MXNet but has no benchmarks.
"""
all_mxnet_operators = _get_all_mxnet_operators().keys()
return list(set(all_mxnet_operators) - set(operators_with_benchmark))
def get_current_runtime_features():
"""Get all current runtime time flags/configuration for MXNet.
Returns
-------
Map of current runtime features such as compile flags used by MXNet.
Example: {'runtime_features': {'OPENCV' : '✔ OPENCV', 'CUDA': '✖ CUDA'}}
"""
features = runtime.Features()
runtime_features = {}
for feature, config in sorted(features.items(), key=itemgetter(0)):
runtime_features[feature] = config
return {'runtime_features': runtime_features}
| apache-2.0 |
sinhrks/seaborn | seaborn/matrix.py | 5 | 40890 | """Functions to visualize matrices of data."""
import itertools
import colorsys
import matplotlib as mpl
import matplotlib.pyplot as plt
from matplotlib import gridspec
import numpy as np
import pandas as pd
from scipy.spatial import distance
from scipy.cluster import hierarchy
from .axisgrid import Grid
from .palettes import cubehelix_palette
from .utils import despine, axis_ticklabels_overlap
from .external.six.moves import range
def _index_to_label(index):
"""Convert a pandas index or multiindex to an axis label."""
if isinstance(index, pd.MultiIndex):
return "-".join(map(str, index.names))
else:
return index.name
def _index_to_ticklabels(index):
"""Convert a pandas index or multiindex into ticklabels."""
if isinstance(index, pd.MultiIndex):
return ["-".join(map(str, i)) for i in index.values]
else:
return index.values
def _convert_colors(colors):
"""Convert either a list of colors or nested lists of colors to RGB."""
to_rgb = mpl.colors.colorConverter.to_rgb
try:
to_rgb(colors[0])
# If this works, there is only one level of colors
return list(map(to_rgb, colors))
except ValueError:
# If we get here, we have nested lists
return [list(map(to_rgb, l)) for l in colors]
def _matrix_mask(data, mask):
"""Ensure that data and mask are compatabile and add missing values.
Values will be plotted for cells where ``mask`` is ``False``.
``data`` is expected to be a DataFrame; ``mask`` can be an array or
a DataFrame.
"""
if mask is None:
mask = np.zeros(data.shape, np.bool)
if isinstance(mask, np.ndarray):
# For array masks, ensure that shape matches data then convert
if mask.shape != data.shape:
raise ValueError("Mask must have the same shape as data.")
mask = pd.DataFrame(mask,
index=data.index,
columns=data.columns,
dtype=np.bool)
elif isinstance(mask, pd.DataFrame):
# For DataFrame masks, ensure that semantic labels match data
if not mask.index.equals(data.index) \
and mask.columns.equals(data.columns):
err = "Mask must have the same index and columns as data."
raise ValueError(err)
# Add any cells with missing data to the mask
# This works around an issue where `plt.pcolormesh` doesn't represent
# missing data properly
mask = mask | pd.isnull(data)
return mask
class _HeatMapper(object):
"""Draw a heatmap plot of a matrix with nice labels and colormaps."""
def __init__(self, data, vmin, vmax, cmap, center, robust, annot, fmt,
annot_kws, cbar, cbar_kws,
xticklabels=True, yticklabels=True, mask=None):
"""Initialize the plotting object."""
# We always want to have a DataFrame with semantic information
# and an ndarray to pass to matplotlib
if isinstance(data, pd.DataFrame):
plot_data = data.values
else:
plot_data = np.asarray(data)
data = pd.DataFrame(plot_data)
# Validate the mask and convet to DataFrame
mask = _matrix_mask(data, mask)
# Reverse the rows so the plot looks like the matrix
plot_data = plot_data[::-1]
data = data.ix[::-1]
mask = mask.ix[::-1]
plot_data = np.ma.masked_where(np.asarray(mask), plot_data)
# Get good names for the rows and columns
xtickevery = 1
if isinstance(xticklabels, int) and xticklabels > 1:
xtickevery = xticklabels
xticklabels = _index_to_ticklabels(data.columns)
elif isinstance(xticklabels, bool) and xticklabels:
xticklabels = _index_to_ticklabels(data.columns)
elif isinstance(xticklabels, bool) and not xticklabels:
xticklabels = ['' for _ in range(data.shape[1])]
ytickevery = 1
if isinstance(yticklabels, int) and yticklabels > 1:
ytickevery = yticklabels
yticklabels = _index_to_ticklabels(data.index)
elif isinstance(yticklabels, bool) and yticklabels:
yticklabels = _index_to_ticklabels(data.index)
elif isinstance(yticklabels, bool) and not yticklabels:
yticklabels = ['' for _ in range(data.shape[0])]
else:
yticklabels = yticklabels[::-1]
# Get the positions and used label for the ticks
nx, ny = data.T.shape
xstart, xend, xstep = 0, nx, xtickevery
self.xticks = np.arange(xstart, xend, xstep) + .5
self.xticklabels = xticklabels[xstart:xend:xstep]
ystart, yend, ystep = (ny - 1) % ytickevery, ny, ytickevery
self.yticks = np.arange(ystart, yend, ystep) + .5
self.yticklabels = yticklabels[ystart:yend:ystep]
# Get good names for the axis labels
xlabel = _index_to_label(data.columns)
ylabel = _index_to_label(data.index)
self.xlabel = xlabel if xlabel is not None else ""
self.ylabel = ylabel if ylabel is not None else ""
# Determine good default values for the colormapping
self._determine_cmap_params(plot_data, vmin, vmax,
cmap, center, robust)
# Save other attributes to the object
self.data = data
self.plot_data = plot_data
self.annot = annot
self.fmt = fmt
self.annot_kws = {} if annot_kws is None else annot_kws
self.cbar = cbar
self.cbar_kws = {} if cbar_kws is None else cbar_kws
def _determine_cmap_params(self, plot_data, vmin, vmax,
cmap, center, robust):
"""Use some heuristics to set good defaults for colorbar and range."""
calc_data = plot_data.data[~np.isnan(plot_data.data)]
if vmin is None:
vmin = np.percentile(calc_data, 2) if robust else calc_data.min()
if vmax is None:
vmax = np.percentile(calc_data, 98) if robust else calc_data.max()
# Simple heuristics for whether these data should have a divergent map
divergent = ((vmin < 0) and (vmax > 0)) or center is not None
# Now set center to 0 so math below makes sense
if center is None:
center = 0
# A divergent map should be symmetric around the center value
if divergent:
vlim = max(abs(vmin - center), abs(vmax - center))
vmin, vmax = -vlim, vlim
self.divergent = divergent
# Now add in the centering value and set the limits
vmin += center
vmax += center
self.vmin = vmin
self.vmax = vmax
# Choose default colormaps if not provided
if cmap is None:
if divergent:
self.cmap = "RdBu_r"
else:
self.cmap = cubehelix_palette(light=.95, as_cmap=True)
else:
self.cmap = cmap
def _annotate_heatmap(self, ax, mesh):
"""Add textual labels with the value in each cell."""
xpos, ypos = np.meshgrid(ax.get_xticks(), ax.get_yticks())
for x, y, val, color in zip(xpos.flat, ypos.flat,
mesh.get_array(), mesh.get_facecolors()):
if val is not np.ma.masked:
_, l, _ = colorsys.rgb_to_hls(*color[:3])
text_color = ".15" if l > .5 else "w"
val = ("{:" + self.fmt + "}").format(val)
ax.text(x, y, val, color=text_color,
ha="center", va="center", **self.annot_kws)
def plot(self, ax, cax, kws):
"""Draw the heatmap on the provided Axes."""
# Remove all the Axes spines
despine(ax=ax, left=True, bottom=True)
# Draw the heatmap
mesh = ax.pcolormesh(self.plot_data, vmin=self.vmin, vmax=self.vmax,
cmap=self.cmap, **kws)
# Set the axis limits
ax.set(xlim=(0, self.data.shape[1]), ylim=(0, self.data.shape[0]))
# Add row and column labels
ax.set(xticks=self.xticks, yticks=self.yticks)
xtl = ax.set_xticklabels(self.xticklabels)
ytl = ax.set_yticklabels(self.yticklabels, rotation="vertical")
# Possibly rotate them if they overlap
plt.draw()
if axis_ticklabels_overlap(xtl):
plt.setp(xtl, rotation="vertical")
if axis_ticklabels_overlap(ytl):
plt.setp(ytl, rotation="horizontal")
# Add the axis labels
ax.set(xlabel=self.xlabel, ylabel=self.ylabel)
# Annotate the cells with the formatted values
if self.annot:
self._annotate_heatmap(ax, mesh)
# Possibly add a colorbar
if self.cbar:
ticker = mpl.ticker.MaxNLocator(6)
cb = ax.figure.colorbar(mesh, cax, ax,
ticks=ticker, **self.cbar_kws)
cb.outline.set_linewidth(0)
def heatmap(data, vmin=None, vmax=None, cmap=None, center=None, robust=False,
annot=False, fmt=".2g", annot_kws=None,
linewidths=0, linecolor="white",
cbar=True, cbar_kws=None, cbar_ax=None,
square=False, ax=None, xticklabels=True, yticklabels=True,
mask=None,
**kwargs):
"""Plot rectangular data as a color-encoded matrix.
This function tries to infer a good colormap to use from the data, but
this is not guaranteed to work, so take care to make sure the kind of
colormap (sequential or diverging) and its limits are appropriate.
This is an Axes-level function and will draw the heatmap into the
currently-active Axes if none is provided to the ``ax`` argument. Part of
this Axes space will be taken and used to plot a colormap, unless ``cbar``
is False or a separate Axes is provided to ``cbar_ax``.
Parameters
----------
data : rectangular dataset
2D dataset that can be coerced into an ndarray. If a Pandas DataFrame
is provided, the index/column information will be used to label the
columns and rows.
vmin, vmax : floats, optional
Values to anchor the colormap, otherwise they are inferred from the
data and other keyword arguments. When a diverging dataset is inferred,
one of these values may be ignored.
cmap : matplotlib colormap name or object, optional
The mapping from data values to color space. If not provided, this
will be either a cubehelix map (if the function infers a sequential
dataset) or ``RdBu_r`` (if the function infers a diverging dataset).
center : float, optional
The value at which to center the colormap. Passing this value implies
use of a diverging colormap.
robust : bool, optional
If True and ``vmin`` or ``vmax`` are absent, the colormap range is
computed with robust quantiles instead of the extreme values.
annot : bool, optional
If True, write the data value in each cell.
fmt : string, optional
String formatting code to use when ``annot`` is True.
annot_kws : dict of key, value mappings, optional
Keyword arguments for ``ax.text`` when ``annot`` is True.
linewidths : float, optional
Width of the lines that will divide each cell.
linecolor : color, optional
Color of the lines that will divide each cell.
cbar : boolean, optional
Whether to draw a colorbar.
cbar_kws : dict of key, value mappings, optional
Keyword arguments for `fig.colorbar`.
cbar_ax : matplotlib Axes, optional
Axes in which to draw the colorbar, otherwise take space from the
main Axes.
square : boolean, optional
If True, set the Axes aspect to "equal" so each cell will be
square-shaped.
ax : matplotlib Axes, optional
Axes in which to draw the plot, otherwise use the currently-active
Axes.
xticklabels : list-like, int, or bool, optional
If True, plot the column names of the dataframe. If False, don't plot
the column names. If list-like, plot these alternate labels as the
xticklabels. If an integer, use the column names but plot only every
n label.
yticklabels : list-like, int, or bool, optional
If True, plot the row names of the dataframe. If False, don't plot
the row names. If list-like, plot these alternate labels as the
yticklabels. If an integer, use the index names but plot only every
n label.
mask : boolean array or DataFrame, optional
If passed, data will not be shown in cells where ``mask`` is True.
Cells with missing values are automatically masked.
kwargs : other keyword arguments
All other keyword arguments are passed to ``ax.pcolormesh``.
Returns
-------
ax : matplotlib Axes
Axes object with the heatmap.
Examples
--------
Plot a heatmap for a numpy array:
.. plot::
:context: close-figs
>>> import numpy as np; np.random.seed(0)
>>> import seaborn as sns; sns.set()
>>> uniform_data = np.random.rand(10, 12)
>>> ax = sns.heatmap(uniform_data)
Change the limits of the colormap:
.. plot::
:context: close-figs
>>> ax = sns.heatmap(uniform_data, vmin=0, vmax=1)
Plot a heatmap for data centered on 0:
.. plot::
:context: close-figs
>>> normal_data = np.random.randn(10, 12)
>>> ax = sns.heatmap(normal_data)
Plot a dataframe with meaningful row and column labels:
.. plot::
:context: close-figs
>>> flights = sns.load_dataset("flights")
>>> flights = flights.pivot("month", "year", "passengers")
>>> ax = sns.heatmap(flights)
Annotate each cell with the numeric value using integer formatting:
.. plot::
:context: close-figs
>>> ax = sns.heatmap(flights, annot=True, fmt="d")
Add lines between each cell:
.. plot::
:context: close-figs
>>> ax = sns.heatmap(flights, linewidths=.5)
Use a different colormap:
.. plot::
:context: close-figs
>>> ax = sns.heatmap(flights, cmap="YlGnBu")
Center the colormap at a specific value:
.. plot::
:context: close-figs
>>> ax = sns.heatmap(flights, center=flights.loc["January", 1955])
Plot every other column label and don't plot row labels:
.. plot::
:context: close-figs
>>> data = np.random.randn(50, 20)
>>> ax = sns.heatmap(data, xticklabels=2, yticklabels=False)
Don't draw a colorbar:
.. plot::
:context: close-figs
>>> ax = sns.heatmap(flights, cbar=False)
Use different axes for the colorbar:
.. plot::
:context: close-figs
>>> grid_kws = {"height_ratios": (.9, .05), "hspace": .3}
>>> f, (ax, cbar_ax) = plt.subplots(2, gridspec_kw=grid_kws)
>>> ax = sns.heatmap(flights, ax=ax,
... cbar_ax=cbar_ax,
... cbar_kws={"orientation": "horizontal"})
Use a mask to plot only part of a matrix
.. plot::
:context: close-figs
>>> corr = np.corrcoef(np.random.randn(10, 200))
>>> mask = np.zeros_like(corr)
>>> mask[np.triu_indices_from(mask)] = True
>>> with sns.axes_style("white"):
... ax = sns.heatmap(corr, mask=mask, vmax=.3, square=True)
"""
# Initialize the plotter object
plotter = _HeatMapper(data, vmin, vmax, cmap, center, robust, annot, fmt,
annot_kws, cbar, cbar_kws, xticklabels, yticklabels,
mask)
# Add the pcolormesh kwargs here
kwargs["linewidths"] = linewidths
kwargs["edgecolor"] = linecolor
# Draw the plot and return the Axes
if ax is None:
ax = plt.gca()
if square:
ax.set_aspect("equal")
plotter.plot(ax, cbar_ax, kwargs)
return ax
class _DendrogramPlotter(object):
"""Object for drawing tree of similarities between data rows/columns"""
def __init__(self, data, linkage, metric, method, axis, label, rotate):
"""Plot a dendrogram of the relationships between the columns of data
Parameters
----------
data : pandas.DataFrame
Rectangular data
"""
self.axis = axis
if self.axis == 1:
data = data.T
if isinstance(data, pd.DataFrame):
array = data.values
else:
array = np.asarray(data)
data = pd.DataFrame(array)
self.array = array
self.data = data
self.shape = self.data.shape
self.metric = metric
self.method = method
self.axis = axis
self.label = label
self.rotate = rotate
if linkage is None:
self.linkage = self.calculated_linkage
else:
self.linkage = linkage
self.dendrogram = self.calculate_dendrogram()
# Dendrogram ends are always at multiples of 5, who knows why
ticks = 10 * np.arange(self.data.shape[0]) + 5
if self.label:
ticklabels = _index_to_ticklabels(self.data.index)
ticklabels = [ticklabels[i] for i in self.reordered_ind]
if self.rotate:
self.xticks = []
self.yticks = ticks
self.xticklabels = []
self.yticklabels = ticklabels
self.ylabel = _index_to_label(self.data.index)
self.xlabel = ''
else:
self.xticks = ticks
self.yticks = []
self.xticklabels = ticklabels
self.yticklabels = []
self.ylabel = ''
self.xlabel = _index_to_label(self.data.index)
else:
self.xticks, self.yticks = [], []
self.yticklabels, self.xticklabels = [], []
self.xlabel, self.ylabel = '', ''
if self.rotate:
self.X = self.dendrogram['dcoord']
self.Y = self.dendrogram['icoord']
else:
self.X = self.dendrogram['icoord']
self.Y = self.dendrogram['dcoord']
def _calculate_linkage_scipy(self):
if np.product(self.shape) >= 10000:
UserWarning('This will be slow... (gentle suggestion: '
'"pip install fastcluster")')
pairwise_dists = distance.pdist(self.array, metric=self.metric)
linkage = hierarchy.linkage(pairwise_dists, method=self.method)
del pairwise_dists
return linkage
def _calculate_linkage_fastcluster(self):
import fastcluster
# Fastcluster has a memory-saving vectorized version, but only
# with certain linkage methods, and mostly with euclidean metric
vector_methods = ('single', 'centroid', 'median', 'ward')
euclidean_methods = ('centroid', 'median', 'ward')
euclidean = self.metric == 'euclidean' and self.method in \
euclidean_methods
if euclidean or self.method == 'single':
return fastcluster.linkage_vector(self.array,
method=self.method,
metric=self.metric)
else:
pairwise_dists = distance.pdist(self.array, metric=self.metric)
linkage = fastcluster.linkage(pairwise_dists, method=self.method)
del pairwise_dists
return linkage
@property
def calculated_linkage(self):
try:
return self._calculate_linkage_fastcluster()
except ImportError:
return self._calculate_linkage_scipy()
def calculate_dendrogram(self):
"""Calculates a dendrogram based on the linkage matrix
Made a separate function, not a property because don't want to
recalculate the dendrogram every time it is accessed.
Returns
-------
dendrogram : dict
Dendrogram dictionary as returned by scipy.cluster.hierarchy
.dendrogram. The important key-value pairing is
"reordered_ind" which indicates the re-ordering of the matrix
"""
return hierarchy.dendrogram(self.linkage, no_plot=True,
color_list=['k'], color_threshold=-np.inf)
@property
def reordered_ind(self):
"""Indices of the matrix, reordered by the dendrogram"""
return self.dendrogram['leaves']
def plot(self, ax):
"""Plots a dendrogram of the similarities between data on the axes
Parameters
----------
ax : matplotlib.axes.Axes
Axes object upon which the dendrogram is plotted
"""
for x, y in zip(self.X, self.Y):
ax.plot(x, y, color='k', linewidth=.5)
if self.rotate and self.axis == 0:
ax.invert_xaxis()
ax.yaxis.set_ticks_position('right')
ymax = min(map(min, self.Y)) + max(map(max, self.Y))
ax.set_ylim(0, ymax)
ax.invert_yaxis()
else:
xmax = min(map(min, self.X)) + max(map(max, self.X))
ax.set_xlim(0, xmax)
despine(ax=ax, bottom=True, left=True)
ax.set(xticks=self.xticks, yticks=self.yticks,
xlabel=self.xlabel, ylabel=self.ylabel)
xtl = ax.set_xticklabels(self.xticklabels)
ytl = ax.set_yticklabels(self.yticklabels, rotation='vertical')
# Force a draw of the plot to avoid matplotlib window error
plt.draw()
if len(ytl) > 0 and axis_ticklabels_overlap(ytl):
plt.setp(ytl, rotation="horizontal")
if len(xtl) > 0 and axis_ticklabels_overlap(xtl):
plt.setp(xtl, rotation="vertical")
return self
def dendrogram(data, linkage=None, axis=1, label=True, metric='euclidean',
method='average', rotate=False, ax=None):
"""Draw a tree diagram of relationships within a matrix
Parameters
----------
data : pandas.DataFrame
Rectangular data
linkage : numpy.array, optional
Linkage matrix
axis : int, optional
Which axis to use to calculate linkage. 0 is rows, 1 is columns.
label : bool, optional
If True, label the dendrogram at leaves with column or row names
metric : str, optional
Distance metric. Anything valid for scipy.spatial.distance.pdist
method : str, optional
Linkage method to use. Anything valid for
scipy.cluster.hierarchy.linkage
rotate : bool, optional
When plotting the matrix, whether to rotate it 90 degrees
counter-clockwise, so the leaves face right
ax : matplotlib axis, optional
Axis to plot on, otherwise uses current axis
Returns
-------
dendrogramplotter : _DendrogramPlotter
A Dendrogram plotter object.
Notes
-----
Access the reordered dendrogram indices with
dendrogramplotter.reordered_ind
"""
plotter = _DendrogramPlotter(data, linkage=linkage, axis=axis,
metric=metric, method=method,
label=label, rotate=rotate)
if ax is None:
ax = plt.gca()
return plotter.plot(ax=ax)
class ClusterGrid(Grid):
def __init__(self, data, pivot_kws=None, z_score=None, standard_scale=None,
figsize=None, row_colors=None, col_colors=None, mask=None):
"""Grid object for organizing clustered heatmap input on to axes"""
if isinstance(data, pd.DataFrame):
self.data = data
else:
self.data = pd.DataFrame(data)
self.data2d = self.format_data(self.data, pivot_kws, z_score,
standard_scale)
self.mask = _matrix_mask(self.data2d, mask)
if figsize is None:
width, height = 10, 10
figsize = (width, height)
self.fig = plt.figure(figsize=figsize)
if row_colors is not None:
row_colors = _convert_colors(row_colors)
self.row_colors = row_colors
if col_colors is not None:
col_colors = _convert_colors(col_colors)
self.col_colors = col_colors
width_ratios = self.dim_ratios(self.row_colors,
figsize=figsize,
axis=1)
height_ratios = self.dim_ratios(self.col_colors,
figsize=figsize,
axis=0)
nrows = 3 if self.col_colors is None else 4
ncols = 3 if self.row_colors is None else 4
self.gs = gridspec.GridSpec(nrows, ncols, wspace=0.01, hspace=0.01,
width_ratios=width_ratios,
height_ratios=height_ratios)
self.ax_row_dendrogram = self.fig.add_subplot(self.gs[nrows - 1, 0:2],
axisbg="white")
self.ax_col_dendrogram = self.fig.add_subplot(self.gs[0:2, ncols - 1],
axisbg="white")
self.ax_row_colors = None
self.ax_col_colors = None
if self.row_colors is not None:
self.ax_row_colors = self.fig.add_subplot(
self.gs[nrows - 1, ncols - 2])
if self.col_colors is not None:
self.ax_col_colors = self.fig.add_subplot(
self.gs[nrows - 2, ncols - 1])
self.ax_heatmap = self.fig.add_subplot(self.gs[nrows - 1, ncols - 1])
# colorbar for scale to left corner
self.cax = self.fig.add_subplot(self.gs[0, 0])
self.dendrogram_row = None
self.dendrogram_col = None
def format_data(self, data, pivot_kws, z_score=None,
standard_scale=None):
"""Extract variables from data or use directly."""
# Either the data is already in 2d matrix format, or need to do a pivot
if pivot_kws is not None:
data2d = data.pivot(**pivot_kws)
else:
data2d = data
if z_score is not None and standard_scale is not None:
raise ValueError(
'Cannot perform both z-scoring and standard-scaling on data')
if z_score is not None:
data2d = self.z_score(data2d, z_score)
if standard_scale is not None:
data2d = self.standard_scale(data2d, standard_scale)
return data2d
@staticmethod
def z_score(data2d, axis=1):
"""Standarize the mean and variance of the data axis
Parameters
----------
data2d : pandas.DataFrame
Data to normalize
axis : int
Which axis to normalize across. If 0, normalize across rows, if 1,
normalize across columns.
Returns
-------
normalized : pandas.DataFrame
Noramlized data with a mean of 0 and variance of 1 across the
specified axis.
"""
if axis == 1:
z_scored = data2d
else:
z_scored = data2d.T
z_scored = (z_scored - z_scored.mean()) / z_scored.std()
if axis == 1:
return z_scored
else:
return z_scored.T
@staticmethod
def standard_scale(data2d, axis=1):
"""Divide the data by the difference between the max and min
Parameters
----------
data2d : pandas.DataFrame
Data to normalize
axis : int
Which axis to normalize across. If 0, normalize across rows, if 1,
normalize across columns.
vmin : int
If 0, then subtract the minimum of the data before dividing by
the range.
Returns
-------
standardized : pandas.DataFrame
Noramlized data with a mean of 0 and variance of 1 across the
specified axis.
>>> import numpy as np
>>> d = np.arange(5, 8, 0.5)
>>> ClusterGrid.standard_scale(d)
array([ 0. , 0.2, 0.4, 0.6, 0.8, 1. ])
"""
# Normalize these values to range from 0 to 1
if axis == 1:
standardized = data2d
else:
standardized = data2d.T
subtract = standardized.min()
standardized = (standardized - subtract) / (
standardized.max() - standardized.min())
if axis == 1:
return standardized
else:
return standardized.T
def dim_ratios(self, side_colors, axis, figsize, side_colors_ratio=0.05):
"""Get the proportions of the figure taken up by each axes
"""
figdim = figsize[axis]
# Get resizing proportion of this figure for the dendrogram and
# colorbar, so only the heatmap gets bigger but the dendrogram stays
# the same size.
dendrogram = min(2. / figdim, .2)
# add the colorbar
colorbar_width = .8 * dendrogram
colorbar_height = .2 * dendrogram
if axis == 0:
ratios = [colorbar_width, colorbar_height]
else:
ratios = [colorbar_height, colorbar_width]
if side_colors is not None:
# Add room for the colors
ratios += [side_colors_ratio]
# Add the ratio for the heatmap itself
ratios += [.8]
return ratios
@staticmethod
def color_list_to_matrix_and_cmap(colors, ind, axis=0):
"""Turns a list of colors into a numpy matrix and matplotlib colormap
These arguments can now be plotted using heatmap(matrix, cmap)
and the provided colors will be plotted.
Parameters
----------
colors : list of matplotlib colors
Colors to label the rows or columns of a dataframe.
ind : list of ints
Ordering of the rows or columns, to reorder the original colors
by the clustered dendrogram order
axis : int
Which axis this is labeling
Returns
-------
matrix : numpy.array
A numpy array of integer values, where each corresponds to a color
from the originally provided list of colors
cmap : matplotlib.colors.ListedColormap
"""
# check for nested lists/color palettes.
# Will fail if matplotlib color is list not tuple
if any(issubclass(type(x), list) for x in colors):
all_colors = set(itertools.chain(*colors))
n = len(colors)
m = len(colors[0])
else:
all_colors = set(colors)
n = 1
m = len(colors)
colors = [colors]
color_to_value = dict((col, i) for i, col in enumerate(all_colors))
matrix = np.array([color_to_value[c]
for color in colors for c in color])
shape = (n, m)
matrix = matrix.reshape(shape)
matrix = matrix[:, ind]
if axis == 0:
# row-side:
matrix = matrix.T
cmap = mpl.colors.ListedColormap(all_colors)
return matrix, cmap
def savefig(self, *args, **kwargs):
if 'bbox_inches' not in kwargs:
kwargs['bbox_inches'] = 'tight'
self.fig.savefig(*args, **kwargs)
def plot_dendrograms(self, row_cluster, col_cluster, metric, method,
row_linkage, col_linkage):
# Plot the row dendrogram
if row_cluster:
self.dendrogram_row = dendrogram(
self.data2d, metric=metric, method=method, label=False, axis=0,
ax=self.ax_row_dendrogram, rotate=True, linkage=row_linkage)
else:
self.ax_row_dendrogram.set_xticks([])
self.ax_row_dendrogram.set_yticks([])
# PLot the column dendrogram
if col_cluster:
self.dendrogram_col = dendrogram(
self.data2d, metric=metric, method=method, label=False,
axis=1, ax=self.ax_col_dendrogram, linkage=col_linkage)
else:
self.ax_col_dendrogram.set_xticks([])
self.ax_col_dendrogram.set_yticks([])
despine(ax=self.ax_row_dendrogram, bottom=True, left=True)
despine(ax=self.ax_col_dendrogram, bottom=True, left=True)
def plot_colors(self, xind, yind, **kws):
"""Plots color labels between the dendrogram and the heatmap
Parameters
----------
heatmap_kws : dict
Keyword arguments heatmap
"""
# Remove any custom colormap and centering
kws = kws.copy()
kws.pop('cmap', None)
kws.pop('center', None)
kws.pop('vmin', None)
kws.pop('vmax', None)
kws.pop('xticklabels', None)
kws.pop('yticklabels', None)
if self.row_colors is not None:
matrix, cmap = self.color_list_to_matrix_and_cmap(
self.row_colors, yind, axis=0)
heatmap(matrix, cmap=cmap, cbar=False, ax=self.ax_row_colors,
xticklabels=False, yticklabels=False,
**kws)
else:
despine(self.ax_row_colors, left=True, bottom=True)
if self.col_colors is not None:
matrix, cmap = self.color_list_to_matrix_and_cmap(
self.col_colors, xind, axis=1)
heatmap(matrix, cmap=cmap, cbar=False, ax=self.ax_col_colors,
xticklabels=False, yticklabels=False,
**kws)
else:
despine(self.ax_col_colors, left=True, bottom=True)
def plot_matrix(self, colorbar_kws, xind, yind, **kws):
self.data2d = self.data2d.iloc[yind, xind]
self.mask = self.mask.iloc[yind, xind]
# Try to reorganize specified tick labels, if provided
xtl = kws.pop("xticklabels", True)
try:
xtl = np.asarray(xtl)[xind]
except (TypeError, IndexError):
pass
ytl = kws.pop("yticklabels", True)
try:
ytl = np.asarray(ytl)[yind]
except (TypeError, IndexError):
pass
heatmap(self.data2d, ax=self.ax_heatmap, cbar_ax=self.cax,
cbar_kws=colorbar_kws, mask=self.mask,
xticklabels=xtl, yticklabels=ytl, **kws)
self.ax_heatmap.yaxis.set_ticks_position('right')
self.ax_heatmap.yaxis.set_label_position('right')
def plot(self, metric, method, colorbar_kws, row_cluster, col_cluster,
row_linkage, col_linkage, **kws):
colorbar_kws = {} if colorbar_kws is None else colorbar_kws
self.plot_dendrograms(row_cluster, col_cluster, metric, method,
row_linkage=row_linkage, col_linkage=col_linkage)
try:
xind = self.dendrogram_col.reordered_ind
except AttributeError:
xind = np.arange(self.data2d.shape[1])
try:
yind = self.dendrogram_row.reordered_ind
except AttributeError:
yind = np.arange(self.data2d.shape[0])
self.plot_colors(xind, yind, **kws)
self.plot_matrix(colorbar_kws, xind, yind, **kws)
return self
def clustermap(data, pivot_kws=None, method='average', metric='euclidean',
z_score=None, standard_scale=None, figsize=None, cbar_kws=None,
row_cluster=True, col_cluster=True,
row_linkage=None, col_linkage=None,
row_colors=None, col_colors=None, mask=None, **kwargs):
"""Plot a hierarchically clustered heatmap of a pandas DataFrame
Parameters
----------
data: pandas.DataFrame
Rectangular data for clustering. Cannot contain NAs.
pivot_kws : dict, optional
If `data` is a tidy dataframe, can provide keyword arguments for
pivot to create a rectangular dataframe.
method : str, optional
Linkage method to use for calculating clusters.
See scipy.cluster.hierarchy.linkage documentation for more information:
http://docs.scipy.org/doc/scipy/reference/generated/scipy.cluster.hierarchy.linkage.html
metric : str, optional
Distance metric to use for the data. See
scipy.spatial.distance.pdist documentation for more options
http://docs.scipy.org/doc/scipy/reference/generated/scipy.spatial.distance.pdist.html
z_score : int or None, optional
Either 0 (rows) or 1 (columns). Whether or not to calculate z-scores
for the rows or the columns. Z scores are: z = (x - mean)/std, so
values in each row (column) will get the mean of the row (column)
subtracted, then divided by the standard deviation of the row (column).
This ensures that each row (column) has mean of 0 and variance of 1.
standard_scale : int or None, optional
Either 0 (rows) or 1 (columns). Whether or not to standardize that
dimension, meaning for each row or column, subtract the minimum and
divide each by its maximum.
figsize: tuple of two ints, optional
Size of the figure to create.
cbar_kws : dict, optional
Keyword arguments to pass to ``cbar_kws`` in ``heatmap``, e.g. to
add a label to the colorbar.
{row,col}_cluster : bool, optional
If True, cluster the {rows, columns}.
{row,col}_linkage : numpy.array, optional
Precomputed linkage matrix for the rows or columns. See
scipy.cluster.hierarchy.linkage for specific formats.
{row,col}_colors : list-like, optional
List of colors to label for either the rows or columns. Useful to
evaluate whether samples within a group are clustered together. Can
use nested lists for multiple color levels of labeling.
mask : boolean array or DataFrame, optional
If passed, data will not be shown in cells where ``mask`` is True.
Cells with missing values are automatically masked. Only used for
visualizing, not for calculating.
kwargs : other keyword arguments
All other keyword arguments are passed to ``sns.heatmap``
Returns
-------
clustergrid : ClusterGrid
A ClusterGrid instance.
Notes
-----
The returned object has a ``savefig`` method that should be used if you
want to save the figure object without clipping the dendrograms.
To access the reordered row indices, use:
``clustergrid.dendrogram_row.reordered_ind``
Column indices, use:
``clustergrid.dendrogram_col.reordered_ind``
Examples
--------
Plot a clustered heatmap:
.. plot::
:context: close-figs
>>> import seaborn as sns; sns.set()
>>> flights = sns.load_dataset("flights")
>>> flights = flights.pivot("month", "year", "passengers")
>>> g = sns.clustermap(flights)
Don't cluster one of the axes:
.. plot::
:context: close-figs
>>> g = sns.clustermap(flights, col_cluster=False)
Use a different colormap and add lines to separate the cells:
.. plot::
:context: close-figs
>>> cmap = sns.cubehelix_palette(as_cmap=True, rot=-.3, light=1)
>>> g = sns.clustermap(flights, cmap=cmap, linewidths=.5)
Use a different figure size:
.. plot::
:context: close-figs
>>> g = sns.clustermap(flights, cmap=cmap, figsize=(7, 5))
Standardize the data across the columns:
.. plot::
:context: close-figs
>>> g = sns.clustermap(flights, standard_scale=1)
Normalize the data across the rows:
.. plot::
:context: close-figs
>>> g = sns.clustermap(flights, z_score=0)
Use a different clustering method:
.. plot::
:context: close-figs
>>> g = sns.clustermap(flights, method="single", metric="cosine")
Add colored labels on one of the axes:
.. plot::
:context: close-figs
>>> season_colors = (sns.color_palette("BuPu", 3) +
... sns.color_palette("RdPu", 3) +
... sns.color_palette("YlGn", 3) +
... sns.color_palette("OrRd", 3))
>>> g = sns.clustermap(flights, row_colors=season_colors)
"""
plotter = ClusterGrid(data, pivot_kws=pivot_kws, figsize=figsize,
row_colors=row_colors, col_colors=col_colors,
z_score=z_score, standard_scale=standard_scale,
mask=mask)
return plotter.plot(metric=metric, method=method,
colorbar_kws=cbar_kws,
row_cluster=row_cluster, col_cluster=col_cluster,
row_linkage=row_linkage, col_linkage=col_linkage,
**kwargs)
| bsd-3-clause |
lepinkainen/pyfibot | pyfibot/modules/module_geoip.py | 1 | 1389 | from __future__ import unicode_literals, print_function, division
import pygeoip
import os.path
import sys
import socket
try:
from modules.module_usertrack import get_table
user_track_available = True
except ImportError:
user_track_available = False
# http://dev.maxmind.com/geoip/legacy/geolite/
DATAFILE = os.path.join(sys.path[0], "GeoIP.dat")
# STANDARD = reload from disk
# MEMORY_CACHE = load to memory
# MMAP_CACHE = memory using mmap
gi4 = pygeoip.GeoIP(DATAFILE, pygeoip.MEMORY_CACHE)
def command_geoip(bot, user, channel, args):
"""Determine the user's country based on host or nick, if module_usertrack is used."""
if not args:
return bot.say(channel, "usage: .geoip HOST/NICK")
host = args
nick = None
if user_track_available:
table = get_table(bot, channel)
user = table.find_one(nick=args)
if user:
nick = user["nick"]
host = user["host"]
try:
country = gi4.country_name_by_name(host)
except socket.gaierror:
country = None
if country:
if nick:
return bot.say(channel, "%s (%s) is in %s" % (nick, host, country))
return bot.say(channel, "%s is in %s" % (host, country))
if nick:
return bot.say(channel, "Host not found for %s (%s)" % (nick, host))
return bot.say(channel, "Host not found for %s" % host)
| bsd-3-clause |
valurhrafn/chrome-sync-server | google/protobuf/protobuf_lite_java_parse_pom.py | 88 | 1834 | #!/usr/bin/env python
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Parses the Maven pom.xml file for which files to include in a lite build.
Usage:
protobuf_lite_java_parse_pom.py {path to pom.xml}
This is a helper file for the protobuf_lite_java target in protobuf.gyp.
It parses the pom.xml file, and looks for all the includes specified in the
'lite' profile. It does not return and test includes.
The result is printed as one line per entry.
"""
import sys
from xml.etree import ElementTree
def main(argv):
if (len(argv) < 2):
usage()
return 1
# Setup all file and XML query paths.
pom_path = argv[1]
namespace = "{http://maven.apache.org/POM/4.0.0}"
profile_path = '{0}profiles/{0}profile'.format(namespace)
id_path = '{0}id'.format(namespace)
plugin_path = \
'{0}build/{0}plugins/{0}plugin'.format(namespace)
artifact_path = '{0}artifactId'.format(namespace)
include_path = '{0}configuration/{0}includes/{0}include'.format(namespace)
# Parse XML file and store result in includes list.
includes = []
for profile in ElementTree.parse(pom_path).getroot().findall(profile_path):
id_element = profile.find(id_path)
if (id_element is not None and id_element.text == 'lite'):
for plugin in profile.findall(plugin_path):
artifact_element = plugin.find(artifact_path)
if (artifact_element is not None and
artifact_element.text == 'maven-compiler-plugin'):
for include in plugin.findall(include_path):
includes.append(include.text)
# Print result to stdout, one item on each line.
print '\n'.join(includes)
def usage():
print(__doc__);
if __name__ == '__main__':
sys.exit(main(sys.argv))
| mit |
jspargo/AneMo | django/lib/python2.7/site-packages/django/core/context_processors.py | 80 | 2274 | """
A set of request processors that return dictionaries to be merged into a
template context. Each function takes the request object as its only parameter
and returns a dictionary to add to the context.
These are referenced from the setting TEMPLATE_CONTEXT_PROCESSORS and used by
RequestContext.
"""
from __future__ import unicode_literals
from django.conf import settings
from django.middleware.csrf import get_token
from django.utils import six
from django.utils.encoding import smart_text
from django.utils.functional import lazy
def csrf(request):
"""
Context processor that provides a CSRF token, or the string 'NOTPROVIDED' if
it has not been provided by either a view decorator or the middleware
"""
def _get_val():
token = get_token(request)
if token is None:
# In order to be able to provide debugging info in the
# case of misconfiguration, we use a sentinel value
# instead of returning an empty dict.
return 'NOTPROVIDED'
else:
return smart_text(token)
_get_val = lazy(_get_val, six.text_type)
return {'csrf_token': _get_val()}
def debug(request):
"Returns context variables helpful for debugging."
context_extras = {}
if settings.DEBUG and request.META.get('REMOTE_ADDR') in settings.INTERNAL_IPS:
context_extras['debug'] = True
from django.db import connection
context_extras['sql_queries'] = connection.queries
return context_extras
def i18n(request):
from django.utils import translation
context_extras = {}
context_extras['LANGUAGES'] = settings.LANGUAGES
context_extras['LANGUAGE_CODE'] = translation.get_language()
context_extras['LANGUAGE_BIDI'] = translation.get_language_bidi()
return context_extras
def tz(request):
from django.utils import timezone
return {'TIME_ZONE': timezone.get_current_timezone_name()}
def static(request):
"""
Adds static-related context variables to the context.
"""
return {'STATIC_URL': settings.STATIC_URL}
def media(request):
"""
Adds media-related context variables to the context.
"""
return {'MEDIA_URL': settings.MEDIA_URL}
def request(request):
return {'request': request}
| gpl-2.0 |
leighpauls/k2cro4 | tools/gyp/pylib/gyp/MSVSVersion.py | 122 | 13527 | # Copyright (c) 2012 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Handle version information related to Visual Stuio."""
import errno
import os
import re
import subprocess
import sys
import gyp
class VisualStudioVersion(object):
"""Information regarding a version of Visual Studio."""
def __init__(self, short_name, description,
solution_version, project_version, flat_sln, uses_vcxproj,
path, sdk_based, default_toolset=None):
self.short_name = short_name
self.description = description
self.solution_version = solution_version
self.project_version = project_version
self.flat_sln = flat_sln
self.uses_vcxproj = uses_vcxproj
self.path = path
self.sdk_based = sdk_based
self.default_toolset = default_toolset
def ShortName(self):
return self.short_name
def Description(self):
"""Get the full description of the version."""
return self.description
def SolutionVersion(self):
"""Get the version number of the sln files."""
return self.solution_version
def ProjectVersion(self):
"""Get the version number of the vcproj or vcxproj files."""
return self.project_version
def FlatSolution(self):
return self.flat_sln
def UsesVcxproj(self):
"""Returns true if this version uses a vcxproj file."""
return self.uses_vcxproj
def ProjectExtension(self):
"""Returns the file extension for the project."""
return self.uses_vcxproj and '.vcxproj' or '.vcproj'
def Path(self):
"""Returns the path to Visual Studio installation."""
return self.path
def ToolPath(self, tool):
"""Returns the path to a given compiler tool. """
return os.path.normpath(os.path.join(self.path, "VC/bin", tool))
def DefaultToolset(self):
"""Returns the msbuild toolset version that will be used in the absence
of a user override."""
return self.default_toolset
def SetupScript(self, target_arch):
"""Returns a command (with arguments) to be used to set up the
environment."""
# Check if we are running in the SDK command line environment and use
# the setup script from the SDK if so. |target_arch| should be either
# 'x86' or 'x64'.
assert target_arch in ('x86', 'x64')
sdk_dir = os.environ.get('WindowsSDKDir')
if self.sdk_based and sdk_dir:
return [os.path.normpath(os.path.join(sdk_dir, 'Bin/SetEnv.Cmd')),
'/' + target_arch]
else:
# We don't use VC/vcvarsall.bat for x86 because vcvarsall calls
# vcvars32, which it can only find if VS??COMNTOOLS is set, which it
# isn't always.
if target_arch == 'x86':
return [os.path.normpath(
os.path.join(self.path, 'Common7/Tools/vsvars32.bat'))]
else:
assert target_arch == 'x64'
arg = 'x86_amd64'
if (os.environ.get('PROCESSOR_ARCHITECTURE') == 'AMD64' or
os.environ.get('PROCESSOR_ARCHITEW6432') == 'AMD64'):
# Use the 64-on-64 compiler if we can.
arg = 'amd64'
return [os.path.normpath(
os.path.join(self.path, 'VC/vcvarsall.bat')), arg]
def _RegistryQueryBase(sysdir, key, value):
"""Use reg.exe to read a particular key.
While ideally we might use the win32 module, we would like gyp to be
python neutral, so for instance cygwin python lacks this module.
Arguments:
sysdir: The system subdirectory to attempt to launch reg.exe from.
key: The registry key to read from.
value: The particular value to read.
Return:
stdout from reg.exe, or None for failure.
"""
# Skip if not on Windows or Python Win32 setup issue
if sys.platform not in ('win32', 'cygwin'):
return None
# Setup params to pass to and attempt to launch reg.exe
cmd = [os.path.join(os.environ.get('WINDIR', ''), sysdir, 'reg.exe'),
'query', key]
if value:
cmd.extend(['/v', value])
p = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
# Obtain the stdout from reg.exe, reading to the end so p.returncode is valid
# Note that the error text may be in [1] in some cases
text = p.communicate()[0]
# Check return code from reg.exe; officially 0==success and 1==error
if p.returncode:
return None
return text
def _RegistryQuery(key, value=None):
"""Use reg.exe to read a particular key through _RegistryQueryBase.
First tries to launch from %WinDir%\Sysnative to avoid WoW64 redirection. If
that fails, it falls back to System32. Sysnative is available on Vista and
up and available on Windows Server 2003 and XP through KB patch 942589. Note
that Sysnative will always fail if using 64-bit python due to it being a
virtual directory and System32 will work correctly in the first place.
KB 942589 - http://support.microsoft.com/kb/942589/en-us.
Arguments:
key: The registry key.
value: The particular registry value to read (optional).
Return:
stdout from reg.exe, or None for failure.
"""
text = None
try:
text = _RegistryQueryBase('Sysnative', key, value)
except OSError, e:
if e.errno == errno.ENOENT:
text = _RegistryQueryBase('System32', key, value)
else:
raise
return text
def _RegistryGetValue(key, value):
"""Use reg.exe to obtain the value of a registry key.
Args:
key: The registry key.
value: The particular registry value to read.
Return:
contents of the registry key's value, or None on failure.
"""
text = _RegistryQuery(key, value)
if not text:
return None
# Extract value.
match = re.search(r'REG_\w+\s+([^\r]+)\r\n', text)
if not match:
return None
return match.group(1)
def _RegistryKeyExists(key):
"""Use reg.exe to see if a key exists.
Args:
key: The registry key to check.
Return:
True if the key exists
"""
if not _RegistryQuery(key):
return False
return True
def _CreateVersion(name, path, sdk_based=False):
"""Sets up MSVS project generation.
Setup is based off the GYP_MSVS_VERSION environment variable or whatever is
autodetected if GYP_MSVS_VERSION is not explicitly specified. If a version is
passed in that doesn't match a value in versions python will throw a error.
"""
if path:
path = os.path.normpath(path)
versions = {
'2012': VisualStudioVersion('2012',
'Visual Studio 2012',
solution_version='12.00',
project_version='4.0',
flat_sln=False,
uses_vcxproj=True,
path=path,
sdk_based=sdk_based,
default_toolset='v110'),
'2012e': VisualStudioVersion('2012e',
'Visual Studio 2012',
solution_version='12.00',
project_version='4.0',
flat_sln=True,
uses_vcxproj=True,
path=path,
sdk_based=sdk_based,
default_toolset='v110'),
'2010': VisualStudioVersion('2010',
'Visual Studio 2010',
solution_version='11.00',
project_version='4.0',
flat_sln=False,
uses_vcxproj=True,
path=path,
sdk_based=sdk_based),
'2010e': VisualStudioVersion('2010e',
'Visual Studio 2010',
solution_version='11.00',
project_version='4.0',
flat_sln=True,
uses_vcxproj=True,
path=path,
sdk_based=sdk_based),
'2008': VisualStudioVersion('2008',
'Visual Studio 2008',
solution_version='10.00',
project_version='9.00',
flat_sln=False,
uses_vcxproj=False,
path=path,
sdk_based=sdk_based),
'2008e': VisualStudioVersion('2008e',
'Visual Studio 2008',
solution_version='10.00',
project_version='9.00',
flat_sln=True,
uses_vcxproj=False,
path=path,
sdk_based=sdk_based),
'2005': VisualStudioVersion('2005',
'Visual Studio 2005',
solution_version='9.00',
project_version='8.00',
flat_sln=False,
uses_vcxproj=False,
path=path,
sdk_based=sdk_based),
'2005e': VisualStudioVersion('2005e',
'Visual Studio 2005',
solution_version='9.00',
project_version='8.00',
flat_sln=True,
uses_vcxproj=False,
path=path,
sdk_based=sdk_based),
}
return versions[str(name)]
def _ConvertToCygpath(path):
"""Convert to cygwin path if we are using cygwin."""
if sys.platform == 'cygwin':
p = subprocess.Popen(['cygpath', path], stdout=subprocess.PIPE)
path = p.communicate()[0].strip()
return path
def _DetectVisualStudioVersions(versions_to_check, force_express):
"""Collect the list of installed visual studio versions.
Returns:
A list of visual studio versions installed in descending order of
usage preference.
Base this on the registry and a quick check if devenv.exe exists.
Only versions 8-10 are considered.
Possibilities are:
2005(e) - Visual Studio 2005 (8)
2008(e) - Visual Studio 2008 (9)
2010(e) - Visual Studio 2010 (10)
2012(e) - Visual Studio 2012 (11)
Where (e) is e for express editions of MSVS and blank otherwise.
"""
version_to_year = {
'8.0': '2005', '9.0': '2008', '10.0': '2010', '11.0': '2012'}
versions = []
for version in versions_to_check:
# Old method of searching for which VS version is installed
# We don't use the 2010-encouraged-way because we also want to get the
# path to the binaries, which it doesn't offer.
keys = [r'HKLM\Software\Microsoft\VisualStudio\%s' % version,
r'HKLM\Software\Wow6432Node\Microsoft\VisualStudio\%s' % version,
r'HKLM\Software\Microsoft\VCExpress\%s' % version,
r'HKLM\Software\Wow6432Node\Microsoft\VCExpress\%s' % version]
for index in range(len(keys)):
path = _RegistryGetValue(keys[index], 'InstallDir')
if not path:
continue
path = _ConvertToCygpath(path)
# Check for full.
full_path = os.path.join(path, 'devenv.exe')
express_path = os.path.join(path, 'vcexpress.exe')
if not force_express and os.path.exists(full_path):
# Add this one.
versions.append(_CreateVersion(version_to_year[version],
os.path.join(path, '..', '..')))
# Check for express.
elif os.path.exists(express_path):
# Add this one.
versions.append(_CreateVersion(version_to_year[version] + 'e',
os.path.join(path, '..', '..')))
# The old method above does not work when only SDK is installed.
keys = [r'HKLM\Software\Microsoft\VisualStudio\SxS\VC7',
r'HKLM\Software\Wow6432Node\Microsoft\VisualStudio\SxS\VC7']
for index in range(len(keys)):
path = _RegistryGetValue(keys[index], version)
if not path:
continue
path = _ConvertToCygpath(path)
versions.append(_CreateVersion(version_to_year[version] + 'e',
os.path.join(path, '..'), sdk_based=True))
return versions
def SelectVisualStudioVersion(version='auto'):
"""Select which version of Visual Studio projects to generate.
Arguments:
version: Hook to allow caller to force a particular version (vs auto).
Returns:
An object representing a visual studio project format version.
"""
# In auto mode, check environment variable for override.
if version == 'auto':
version = os.environ.get('GYP_MSVS_VERSION', 'auto')
version_map = {
'auto': ('10.0', '9.0', '8.0', '11.0'),
'2005': ('8.0',),
'2005e': ('8.0',),
'2008': ('9.0',),
'2008e': ('9.0',),
'2010': ('10.0',),
'2010e': ('10.0',),
'2012': ('11.0',),
'2012e': ('11.0',),
}
version = str(version)
versions = _DetectVisualStudioVersions(version_map[version], 'e' in version)
if not versions:
if version == 'auto':
# Default to 2005 if we couldn't find anything
return _CreateVersion('2005', None)
else:
return _CreateVersion(version, None)
return versions[0]
| bsd-3-clause |
fanquake/bitcoin | test/functional/feature_maxuploadtarget.py | 35 | 6653 | #!/usr/bin/env python3
# Copyright (c) 2015-2020 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test behavior of -maxuploadtarget.
* Verify that getdata requests for old blocks (>1week) are dropped
if uploadtarget has been reached.
* Verify that getdata requests for recent blocks are respected even
if uploadtarget has been reached.
* Verify that the upload counters are reset after 24 hours.
"""
from collections import defaultdict
import time
from test_framework.messages import CInv, MSG_BLOCK, msg_getdata
from test_framework.p2p import P2PInterface
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import assert_equal, mine_large_block
class TestP2PConn(P2PInterface):
def __init__(self):
super().__init__()
self.block_receive_map = defaultdict(int)
def on_inv(self, message):
pass
def on_block(self, message):
message.block.calc_sha256()
self.block_receive_map[message.block.sha256] += 1
class MaxUploadTest(BitcoinTestFramework):
def set_test_params(self):
self.setup_clean_chain = True
self.num_nodes = 1
self.extra_args = [[
"-maxuploadtarget=800",
"-acceptnonstdtxn=1",
"-peertimeout=9999", # bump because mocktime might cause a disconnect otherwise
]]
self.supports_cli = False
# Cache for utxos, as the listunspent may take a long time later in the test
self.utxo_cache = []
def skip_test_if_missing_module(self):
self.skip_if_no_wallet()
def run_test(self):
# Before we connect anything, we first set the time on the node
# to be in the past, otherwise things break because the CNode
# time counters can't be reset backward after initialization
old_time = int(time.time() - 2*60*60*24*7)
self.nodes[0].setmocktime(old_time)
# Generate some old blocks
self.nodes[0].generate(130)
# p2p_conns[0] will only request old blocks
# p2p_conns[1] will only request new blocks
# p2p_conns[2] will test resetting the counters
p2p_conns = []
for _ in range(3):
p2p_conns.append(self.nodes[0].add_p2p_connection(TestP2PConn()))
# Now mine a big block
mine_large_block(self.nodes[0], self.utxo_cache)
# Store the hash; we'll request this later
big_old_block = self.nodes[0].getbestblockhash()
old_block_size = self.nodes[0].getblock(big_old_block, True)['size']
big_old_block = int(big_old_block, 16)
# Advance to two days ago
self.nodes[0].setmocktime(int(time.time()) - 2*60*60*24)
# Mine one more block, so that the prior block looks old
mine_large_block(self.nodes[0], self.utxo_cache)
# We'll be requesting this new block too
big_new_block = self.nodes[0].getbestblockhash()
big_new_block = int(big_new_block, 16)
# p2p_conns[0] will test what happens if we just keep requesting the
# the same big old block too many times (expect: disconnect)
getdata_request = msg_getdata()
getdata_request.inv.append(CInv(MSG_BLOCK, big_old_block))
max_bytes_per_day = 800*1024*1024
daily_buffer = 144 * 4000000
max_bytes_available = max_bytes_per_day - daily_buffer
success_count = max_bytes_available // old_block_size
# 576MB will be reserved for relaying new blocks, so expect this to
# succeed for ~235 tries.
for i in range(success_count):
p2p_conns[0].send_and_ping(getdata_request)
assert_equal(p2p_conns[0].block_receive_map[big_old_block], i+1)
assert_equal(len(self.nodes[0].getpeerinfo()), 3)
# At most a couple more tries should succeed (depending on how long
# the test has been running so far).
for _ in range(3):
p2p_conns[0].send_message(getdata_request)
p2p_conns[0].wait_for_disconnect()
assert_equal(len(self.nodes[0].getpeerinfo()), 2)
self.log.info("Peer 0 disconnected after downloading old block too many times")
# Requesting the current block on p2p_conns[1] should succeed indefinitely,
# even when over the max upload target.
# We'll try 800 times
getdata_request.inv = [CInv(MSG_BLOCK, big_new_block)]
for i in range(800):
p2p_conns[1].send_and_ping(getdata_request)
assert_equal(p2p_conns[1].block_receive_map[big_new_block], i+1)
self.log.info("Peer 1 able to repeatedly download new block")
# But if p2p_conns[1] tries for an old block, it gets disconnected too.
getdata_request.inv = [CInv(MSG_BLOCK, big_old_block)]
p2p_conns[1].send_message(getdata_request)
p2p_conns[1].wait_for_disconnect()
assert_equal(len(self.nodes[0].getpeerinfo()), 1)
self.log.info("Peer 1 disconnected after trying to download old block")
self.log.info("Advancing system time on node to clear counters...")
# If we advance the time by 24 hours, then the counters should reset,
# and p2p_conns[2] should be able to retrieve the old block.
self.nodes[0].setmocktime(int(time.time()))
p2p_conns[2].sync_with_ping()
p2p_conns[2].send_and_ping(getdata_request)
assert_equal(p2p_conns[2].block_receive_map[big_old_block], 1)
self.log.info("Peer 2 able to download old block")
self.nodes[0].disconnect_p2ps()
self.log.info("Restarting node 0 with download permission and 1MB maxuploadtarget")
self.restart_node(0, ["[email protected]", "-maxuploadtarget=1"])
# Reconnect to self.nodes[0]
peer = self.nodes[0].add_p2p_connection(TestP2PConn())
#retrieve 20 blocks which should be enough to break the 1MB limit
getdata_request.inv = [CInv(MSG_BLOCK, big_new_block)]
for i in range(20):
peer.send_and_ping(getdata_request)
assert_equal(peer.block_receive_map[big_new_block], i+1)
getdata_request.inv = [CInv(MSG_BLOCK, big_old_block)]
peer.send_and_ping(getdata_request)
self.log.info("Peer still connected after trying to download old block (download permission)")
peer_info = self.nodes[0].getpeerinfo()
assert_equal(len(peer_info), 1) # node is still connected
assert_equal(peer_info[0]['permissions'], ['download'])
if __name__ == '__main__':
MaxUploadTest().main()
| mit |
llou/panopticon | panopticon/core/database.py | 1 | 7145 | # database.py is part of Panopticon.
# Panopticon is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
# Panopticon is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with Panopticon. If not, see <http://www.gnu.org/licenses/>.
from contextlib import contextmanager
from paramiko import RSAKey as pRSAKey, DSSKey
from sqlalchemy import create_engine, Column, DateTime, String, Integer, Text, Boolean
from sqlalchemy.orm import sessionmaker, relationship, backref
from sqlalchemy.sql import not_
from sqlalchemy.schema import ForeignKey
from sqlalchemy.pool import NullPool
from sqlalchemy.ext.declarative import declarative_base
from panopticon.core.util.database import key_value_property
Base = declarative_base()
class Value(Base):
__tablename__ = "values"
id = Column(Integer(), primary_key=True)
name = Column(String(1000))
value = Column(String(1000), nullable=True)
parent_id = Column(Integer, ForeignKey("values.id"), nullable=True)
values = relationship("Value", backref=backref('parent', remote_side=[id],
cascade="all"))
type = Column(String(20))
def __init__(self, name, _type, value="", parent_id=None):
self.name = name
self.type = _type
self.value = value
self.parent_id = parent_id
@property
def root(self):
return self.id == self.parent
class Service(Base):
__tablename__ = "services"
name = Column(String(50), primary_key=True)
class Computer(Base):
__tablename__ = "computers"
__table_args__ = {'sqlite_autoincrement':True}
name = Column(String(255), primary_key=True)
key_name = Column(String(100), ForeignKey('keys.name', onupdate="CASCADE"))
active = Column(Boolean(), default=True)
key = relationship("Key", backref=backref('computers'))
logs = relationship("Log", backref="computer", order_by="Log.time")
def __init__(self, name, key_name="", active=True):
self.name = name
self.active = active
self.key_name = key_name
class Log(Base):
__tablename__ = "logs"
id = Column('id', Integer, primary_key=True)
time = Column(DateTime())
level = Column(String(10))
message = Column(Text())
computer_name = Column(String(255), ForeignKey('computers.name',
ondelete="CASCADE", onupdate="CASCADE"), index=True)
service_name = Column(String(255), ForeignKey('services.name',
ondelete="CASCADE", onupdate="CASCADE"), index=True)
role_name = Column(String(255), index=True)
action_name = Column(String(255), index=True)
def __init__(self, time, level, message, computer_name="",
service_name="", role_name="", action_name=""):
self.time = time
self.level = level
self.message = message
self.computer_name = computer_name
class FileTrack(Base):
__tablename__ = "filetracks"
uid = Column("uid", String(32), primary_key=True)
_computer_name = Column("computer_name", String(255),ForeignKey('computers.name'))
_path = Column("path", Text())
modification_time = Column("modification_time", DateTime())
md5 = Column("md5", String(32))
def __init__(self, computer_name, path, modification_time, md5=""):
self.computer_name = computer_name
self.path = path
self.modification_time = modification_time
self.md5 = md5
self.update_uid()
@property
def computer_name(self):
return self._computer_name
@computer_name.setter
def computer_name(self, value):
self._computer_name = value
self.update_uid()
@property
def path(self):
return self._path
@path.setter
def path(self, value):
self._path = value
self.update_uid()
def update_uid(self):
if self.computer_name and self.path:
self.uid = "%s:%s" % (self.computer_name, self.path)
else:
self.uid = ""
class Key(Base):
__tablename__ = "keys"
name = Column(String(100), primary_key=True)
algorithm = Column(String(20))
v1 = Column(String(2048))
v2 = Column(String(2048))
v3 = Column(String(2048))
v4 = Column(String(2048))
key_class = None
key_vals = []
__mapper_args__ = {'polymorphic_on' : algorithm}
@classmethod
def build_from_paramiko_key(cls, name, p_key):
if isinstance(p_key, pRSAKey):
return RSAKey(name, p_key.e, p_key.n)
elif isinstance(p_key, DSSKey):
return DSAKey(name, p_key.p, p_key.q, p_key.g, p_key.y)
else:
raise Exception("Not valid key")
def __init__(self, name, algorithm, v1, v2, v3, v4):
self.name = name
self.algorithm = algorithm
self.v1 = v1
self.v2 = v2
self.v3 = v3
self.v4 = v4
def get_paramiko_key(self):
vals = [ getattr(self, x) for x in self.key_vals ]
return self.key_class(vals=vals)
class RSAKey(Key):
__mapper_args__ = {'polymorphic_identity':'rsa'}
key_class = pRSAKey
key_vals = [ 'e', 'n' ]
def __init__(self, name, e, n):
self.name = name
self.algorithm = "rsa"
self.e = e
self.n = n
e = key_value_property("v1")
n = key_value_property("v2")
class DSAKey(Key):
__mapper_args__ = {'polymorphic_identity':'dsa'}
key_class = DSSKey
key_vals = [ 'p', 'q', 'g', 'y' ]
def __init__(self, name, p, q, g, y):
self.name = name
self.algorithm = "dsa"
self.p = p
self.q = q
self.g = g
self.y = y
p = key_value_property("v1")
q = key_value_property("v2")
g = key_value_property("v3")
y = key_value_property("v4")
class PanopticonDB(object):
def __init__(self, panopticon, engine=None):
self.panopticon = panopticon
self.engine = engine if engine is not None else create_engine(panopticon.db_url, poolclass=NullPool)
Base.metadata.create_all(self.engine)
self.Session = sessionmaker(bind=self.engine)
self.sync()
@contextmanager
def get_session(self):
session = self.Session()
yield session
session.commit()
session.close()
def purge(self,sure=False):
if sure:
Base.metadata.drop_all(self.engine)
Base.metadata.create_all(self.engine)
def sync(self):
computer_names = [ x[0] for x in self.panopticon.computers ]
with self.get_session() as session:
session.execute(Computer.__table__.update().where(Computer.name.in_(computer_names)).values(active=True))
session.execute(Computer.__table__.update().where(not_(Computer.name.in_(computer_names))).values(active=True))
| gpl-3.0 |
i-namekawa/TopSideMonitor | plotting.py | 1 | 37323 | import os, sys, time
from glob import glob
import cv2
from pylab import *
from mpl_toolkits.mplot3d import Axes3D
from matplotlib.backends.backend_pdf import PdfPages
matplotlib.rcParams['figure.facecolor'] = 'w'
from scipy.signal import argrelextrema
import scipy.stats as stats
import scipy.io as sio
from scipy import signal
from xlwt import Workbook
# specify these in mm to match your behavior chamber.
CHMAMBER_LENGTH=235
WATER_HIGHT=40
# quick plot should also show xy_within and location_one_third etc
# summary PDF: handle exception when a pickle file missing some fish in other pickle file
## these three taken from http://stackoverflow.com/a/18420730/566035
def strided_sliding_std_dev(data, radius=5):
windowed = rolling_window(data, (2*radius, 2*radius))
shape = windowed.shape
windowed = windowed.reshape(shape[0], shape[1], -1)
return windowed.std(axis=-1)
def rolling_window(a, window):
"""Takes a numpy array *a* and a sequence of (or single) *window* lengths
and returns a view of *a* that represents a moving window."""
if not hasattr(window, '__iter__'):
return rolling_window_lastaxis(a, window)
for i, win in enumerate(window):
if win > 1:
a = a.swapaxes(i, -1)
a = rolling_window_lastaxis(a, win)
a = a.swapaxes(-2, i)
return a
def rolling_window_lastaxis(a, window):
"""Directly taken from Erik Rigtorp's post to numpy-discussion.
<http://www.mail-archive.com/[email protected]/msg29450.html>"""
if window < 1:
raise ValueError, "`window` must be at least 1."
if window > a.shape[-1]:
raise ValueError, "`window` is too long."
shape = a.shape[:-1] + (a.shape[-1] - window + 1, window)
strides = a.strides + (a.strides[-1],)
return np.lib.stride_tricks.as_strided(a, shape=shape, strides=strides)
## stealing ends here... //
def filterheadxy(headx,heady,thrs_denom=10):
b, a = signal.butter(8, 0.125)
dhy = np.abs(np.hstack((0, np.diff(heady,1))))
thrs = np.nanstd(dhy)/thrs_denom
ind2remove = dhy>thrs
headx[ind2remove] = np.nan
heady[ind2remove] = np.nan
headx = interp_nan(headx)
heady = interp_nan(heady)
headx = signal.filtfilt(b, a, headx, padlen=150)
heady = signal.filtfilt(b, a, heady, padlen=150)
return headx,heady
def smoothRad(theta, thrs=np.pi/4*3):
jumps = (np.diff(theta) > thrs).nonzero()[0]
print 'jumps.size', jumps.size
while jumps.size:
# print '%d/%d' % (jumps[0], theta.size)
theta[jumps+1] -= np.pi
jumps = (np.diff(theta) > thrs).nonzero()[0]
return theta
def datadct2array(data, key1, key2):
# put these in a MATLAB CELL
trialN = len(data[key1][key2])
matchedUSnameP = np.zeros((trialN,), dtype=np.object)
fnameP = np.zeros((trialN,), dtype=np.object)
# others to append to a list
eventsP = []
speed3DP = []
movingSTDP = []
d2inflowP = []
xP, yP, zP = [], [], []
XP, YP, ZP = [], [], []
ringpixelsP = []
peaks_withinP = []
swimdir_withinP = []
xy_withinP = []
location_one_thirdP = []
dtheta_shapeP = []
dtheta_velP = []
turns_shapeP = []
turns_velP = []
for n, dct in enumerate(data[key1][key2]):
# MATLAB CELL
matchedUSnameP[n] = dct['matchedUSname']
fnameP[n] = dct['fname']
# 2D array
eventsP.append([ele if type(ele) is not list else ele[0] for ele in dct['events']])
speed3DP.append(dct['speed3D'])
movingSTDP.append(dct['movingSTD'])
d2inflowP.append(dct['d2inflow'])
xP.append(dct['x'])
yP.append(dct['y'])
zP.append(dct['z'])
XP.append(dct['X'])
YP.append(dct['Y'])
ZP.append(dct['Z'])
ringpixelsP.append(dct['ringpixels'])
peaks_withinP.append(dct['peaks_within'])
swimdir_withinP.append(dct['swimdir_within'])
xy_withinP.append(dct['xy_within'])
location_one_thirdP.append(dct['location_one_third'])
dtheta_shapeP.append(dct['dtheta_shape'])
dtheta_velP.append(dct['dtheta_vel'])
turns_shapeP.append(dct['turns_shape'])
turns_velP.append(dct['turns_vel'])
TVroi = np.array(dct['TVroi'])
SVroi = np.array(dct['SVroi'])
return matchedUSnameP, fnameP, np.array(eventsP), np.array(speed3DP), np.array(d2inflowP), \
np.array(xP), np.array(yP), np.array(zP), np.array(XP), np.array(YP), np.array(ZP), \
np.array(ringpixelsP), np.array(peaks_withinP), np.array(swimdir_withinP), \
np.array(xy_withinP), np.array(dtheta_shapeP), np.array(dtheta_velP), \
np.array(turns_shapeP), np.array(turns_velP), TVroi, SVroi
def pickle2mat(fp, data=None):
# fp : full path to pickle file
# data : option to provide data to skip np.load(fp)
if not data:
data = np.load(fp)
for key1 in data.keys():
for key2 in data[key1].keys():
matchedUSname, fname, events, speed3D, d2inflow, x, y, z, X, Y, Z, \
ringpixels, peaks_within, swimdir_within, xy_within, dtheta_shape, dtheta_vel, \
turns_shape, turns_vel, TVroi, SVroi = datadct2array(data, key1, key2)
datadict = {
'matchedUSname' : matchedUSname,
'fname' : fname,
'events' : events,
'speed3D' : speed3D,
'd2inflow' : d2inflow,
'x' : x,
'y' : y,
'z' : z,
'X' : X,
'Y' : Y,
'Z' : Z,
'ringpixels' : ringpixels,
'peaks_within' : peaks_within,
'swimdir_within' : swimdir_within,
'xy_within' : xy_within,
'dtheta_shape' : dtheta_shape,
'dtheta_vel' : dtheta_vel,
'turns_shape' : turns_shape,
'turns_vel' : turns_vel,
'TVroi' : TVroi,
'SVroi' : SVroi,
}
outfp = '%s_%s_%s.mat' % (fp[:-7],key1,key2)
sio.savemat(outfp, datadict, oned_as='row', do_compression=True)
def interp_nan(x):
'''
Replace nan by interporation
http://stackoverflow.com/questions/6518811/interpolate-nan-values-in-a-numpy-array
'''
ok = -np.isnan(x)
if (ok == False).all():
return x
else:
xp = ok.ravel().nonzero()[0]
fp = x[ok]
_x = np.isnan(x).ravel().nonzero()[0]
x[-ok] = np.interp(_x, xp, fp)
return x
def polytest(x,y,rx,ry,rw,rh,rang):
points=cv2.ellipse2Poly(
(rx,ry),
axes=(rw/2,rh/2),
angle=rang,
arcStart=0,
arcEnd=360,
delta=3
)
return cv2.pointPolygonTest(np.array(points), (x,y), measureDist=1)
def depthCorrection(z,x,TVx1,TVx2,SVy1,SVy2,SVy3):
z0 = z - SVy1
x0 = x - TVx1
mid = (SVy2-SVy1)/2
adj = (z0 - mid) / (SVy2-SVy1) * (SVy2-SVy3) * (1-(x0)/float(TVx2-TVx1))
return z0 + adj + SVy1 # back to abs coord
def putNp2xls(array, ws):
for r, row in enumerate(array):
for c, val in enumerate(row):
ws.write(r, c, val)
def drawLines(mi, ma, events, fps=30.0):
CS, USs, preRange = events
plot([CS-preRange, CS-preRange], [mi,ma], '--c') # 2 min prior odor
plot([CS , CS ], [mi,ma], '--g', linewidth=2) # CS onset
if USs:
if len(USs) > 3:
colors = 'r' * len(USs)
else:
colors = [_ for _ in ['r','b','c'][:len(USs)]]
for c,us in zip(colors, USs):
plot([us, us],[mi,ma], linestyle='--', color=c, linewidth=2) # US onset
plot([USs[0]+preRange/2,USs[0]+preRange/2], [mi,ma], linestyle='--', color=c, linewidth=2) # end of US window
xtck = np.arange(0, max(CS+preRange, max(USs)), 0.5*60*fps) # every 0.5 min tick
else:
xtck = np.arange(0, CS+preRange, 0.5*60*fps) # every 0.5 min tick
xticks(xtck, xtck/fps/60)
gca().xaxis.set_minor_locator(MultipleLocator(5*fps)) # 5 s minor ticks
def approachevents(x,y,z, ringpolyTVArray, ringpolySVArray, fishlength=134, thrs=None):
'''
fishlength: some old scrits may call this with fishlength
thrs: multitrack GUI provides this by ringAppearochLevel spin control.
can be an numpy array (to track water level change etc)
'''
smoothedz = np.convolve(np.hanning(10)/np.hanning(10).sum(), z, 'same')
peaks = argrelextrema(smoothedz, np.less)[0] # less because 0 is top in image.
# now filter peaks by height.
ringLevel = ringpolySVArray[:,1]
if thrs is None:
thrs = ringLevel+fishlength/2
if type(thrs) == int: # can be numpy array or int
thrs = ringLevel.mean() + thrs
peaks = peaks[ z[peaks] < thrs ]
else: # numpy array should be ready to use
peaks = peaks[ z[peaks] < thrs[peaks] ]
# now filter out by TVringCenter
peaks_within = get_withinring(ringpolyTVArray, peaks, x, y)
return smoothedz, peaks_within
def get_withinring(ringpolyTVArray, timepoints, x, y):
rx = ringpolyTVArray[:,0].astype(np.int)
ry = ringpolyTVArray[:,1].astype(np.int)
rw = ringpolyTVArray[:,2].astype(np.int)
rh = ringpolyTVArray[:,3].astype(np.int)
rang = ringpolyTVArray[:,4].astype(np.int)
# poly test
peaks_within = []
for p in timepoints:
points=cv2.ellipse2Poly(
(rx[p],ry[p]),
axes=(rw[p]/2,rh[p]/2),
angle=rang[p],
arcStart=0,
arcEnd=360,
delta=3
)
inout = cv2.pointPolygonTest(np.array(points), (x[p],y[p]), measureDist=1)
if inout > 0:
peaks_within.append(p)
return peaks_within
def location_ring(x,y,ringpolyTVArray):
rx = ringpolyTVArray[:,0].astype(np.int)
ry = ringpolyTVArray[:,1].astype(np.int)
rw = ringpolyTVArray[:,2].astype(np.int)
rh = ringpolyTVArray[:,3].astype(np.int)
d2ringcenter = np.sqrt((x-rx)**2 + (y-ry)**2)
# filter by radius 20% buffer in case the ring moves around
indices = (d2ringcenter < 1.2*max(rw.max(), rh.max())).nonzero()[0]
xy_within = get_withinring(ringpolyTVArray, indices, x, y)
return xy_within
def swimdir_analysis(x,y,z,ringpolyTVArray,ringpolySVArray,TVx1,TVy1,TVx2,TVy2,fps=30.0):
# smoothing
# z = np.convolve(np.hanning(16)/np.hanning(16).sum(), z, 'same')
# two cameras have different zoom settings. So, distance per pixel is different. But, for
# swim direction, it does not matter how much x,y are compressed relative to z.
# ring z level from SV
rz = ringpolySVArray[:,1].astype(np.int)
# ring all other params from TV
rx = ringpolyTVArray[:,0].astype(np.int)
ry = ringpolyTVArray[:,1].astype(np.int)
rw = ringpolyTVArray[:,2].astype(np.int)
rh = ringpolyTVArray[:,3].astype(np.int)
rang = ringpolyTVArray[:,4].astype(np.int)
speed3D = np.sqrt( np.diff(x)**2 + np.diff(y)**2 + np.diff(z)**2 )
speed3D = np.hstack(([0], speed3D))
# line in 3D http://tutorial.math.lamar.edu/Classes/CalcIII/EqnsOfLines.aspx
# x-x0 y-y0 z-z0
# ---- = ---- = ----
# a b c
# solve them for z = rz. x0,y0,z0 are tvx, tvy, svy
# x = (a * (rz-z)) / c + x0
dt = 3 # define slope as diff between current and dt frame before
a = np.hstack( (np.ones(dt), x[dt:]-x[:-dt]) )
b = np.hstack( (np.ones(dt), y[dt:]-y[:-dt]) )
c = np.hstack( (np.ones(dt), z[dt:]-z[:-dt]) )
c[c==0] = np.nan # avoid zero division
water_x = (a * (rz-z) / c) + x
water_y = (b * (rz-z) / c) + y
upwards = c<-2/30.0*fps # not accurate when c is small or negative
xok = (TVx1 < water_x) & (water_x < TVx2)
yok = (TVy1 < water_y) & (water_y < TVy2)
filtered = upwards & xok & yok# & -np.isinf(water_x) & -np.isinf(water_y)
water_x[-filtered] = np.nan
water_y[-filtered] = np.nan
# figure()
# ax = subplot(111)
# ax.imshow(npData['TVbg'], cmap=cm.gray) # clip out from TVx1,TVy1
# ax.plot(x-TVx1, y-TVy1, 'c')
# ax.plot(water_x-TVx1, water_y-TVy1, 'r.')
# xlim([0, TVx2-TVx1]); ylim([TVy2-TVy1, 0])
# draw(); show()
SwimDir = []
for n in filtered.nonzero()[0]:
inout = polytest(water_x[n],water_y[n],rx[n],ry[n],rw[n],rh[n],rang[n])
SwimDir.append((n, inout, speed3D[n])) # inout>0 are inside
return SwimDir, water_x, water_y
def plot_eachTr(events, x, y, z, inflowpos, ringpixels, peaks_within, swimdir_within=None,
pp=None, _title=None, fps=30.0, inmm=False):
CS, USs, preRange = events
# preRange = 3600 2 min prior and 1 min after CS. +900 for 0.5 min
if USs:
xmin, xmax = CS-preRange-10*fps, USs[0]+preRange/2+10*fps
else:
xmin, xmax = CS-preRange-10*fps, CS+preRange/2+(23+10)*fps
fig = figure(figsize=(12,8), facecolor='w')
subplot(511) # Swimming speed
speed3D = np.sqrt( np.diff(x)**2 + np.diff(y)**2 + np.diff(z)**2 )
drawLines(np.nanmin(speed3D), np.nanmax(speed3D), events, fps) # go behind
plot(speed3D)
movingSTD = np.append( np.zeros(fps*10), strided_sliding_std_dev(speed3D, fps*10) )
plot(movingSTD, linewidth=2)
plot(np.ones_like(speed3D) * speed3D.std()*6, '-.', color='gray')
ylim([-5, speed3D[xmin:xmax].max()])
xlim([xmin,xmax]); title(_title)
if inmm:
ylabel('Speed 3D (mm),\n6SD thr');
else:
ylabel('Speed 3D, 6SD thr');
ax = subplot(512) # z level
drawLines(z.min(), z.max(), events)
plot(z, 'b')
pkx = peaks_within.nonzero()[0]
if inmm:
plot(pkx, peaks_within[pkx]*z[xmin:xmax].max()*0.97, 'mo')
if swimdir_within is not None:
___x = swimdir_within.nonzero()[0]
plot(___x, swimdir_within[___x]*z[xmin:xmax].max()*0.96, 'g+')
ylim([z[xmin:xmax].min()*0.95, z[xmin:xmax].max()])
xlim([xmin,xmax]); ylabel('Z (mm)')
else:
plot(pkx, peaks_within[pkx]*z[xmin:xmax].min()*0.97, 'mo')
if swimdir_within is not None:
___x = swimdir_within.nonzero()[0]
plot(___x, swimdir_within[___x]*z[xmin:xmax].min()*0.96, 'g+')
ylim([z[xmin:xmax].min()*0.95, z[xmin:xmax].max()])
ax.invert_yaxis(); xlim([xmin,xmax]); ylabel('z')
subplot(513) # x
drawLines(x.min(), x.max(), events)
plot(x, 'b')
plot(y, 'g')
xlim([xmin,xmax]); ylabel('x,y')
subplot(514) # Distance to the inflow tube
xin, yin, zin = inflowpos
d2inflow = np.sqrt((x-xin) ** 2 + (y-yin) ** 2 + (z-zin) ** 2 )
drawLines(d2inflow.min(), d2inflow.max(), events)
plot(d2inflow)
ylim([d2inflow[xmin:xmax].min(), d2inflow[xmin:xmax].max()])
xlim([xmin,xmax]); ylabel('distance to\ninflow tube')
subplot(515) # ringpixels: it seems i never considered TV x,y for this
rpmax, rpmin = np.nanmax(ringpixels[xmin:xmax]), np.nanmin(ringpixels[xmin:xmax])
drawLines(rpmin, rpmax, events)
plot(ringpixels)
plot(pkx, peaks_within[pkx]*rpmax*1.06, 'mo')
if swimdir_within is not None:
plot(___x, swimdir_within[___x]*rpmax*1.15, 'g+')
ylim([-100, rpmax*1.2])
xlim([xmin,xmax]); ylabel('ringpixels')
tight_layout()
if pp:
fig.savefig(pp, format='pdf')
rng = np.arange(CS-preRange, CS+preRange, dtype=np.int)
return speed3D[rng], movingSTD[rng], d2inflow[rng], ringpixels[rng]
def plot_turnrates(events, dthetasum_shape,dthetasum_vel,turns_shape,turns_vel,
pp=None, _title=None, thrs=np.pi/4*(133.33333333333334/120), fps=30.0):
CS, USs, preRange = events
# preRange = 3600 2 min prior and 1 min after CS. +900 for 0.5 min
if USs:
xmin, xmax = CS-preRange-10*fps, USs[0]+preRange/2+10*fps
else:
xmin, xmax = CS-preRange-10*fps, CS+preRange/2+(23+10)*fps
fig = figure(figsize=(12,8), facecolor='w')
subplot(211)
drawLines(dthetasum_shape.min(), dthetasum_shape.max(), events)
plot(np.ones_like(dthetasum_shape)*thrs,'gray',linestyle='--')
plot(-np.ones_like(dthetasum_shape)*thrs,'gray',linestyle='--')
plot(dthetasum_shape)
dmax = dthetasum_shape[xmin:xmax].max()
plot(turns_shape, (0.5+dmax)*np.ones_like(turns_shape), 'o')
temp = np.zeros_like(dthetasum_shape)
temp[turns_shape] = 1
shape_cumsum = np.cumsum(temp)
shape_cumsum -= shape_cumsum[xmin]
plot( shape_cumsum / shape_cumsum[xmax] * (dmax-dthetasum_shape.min()) + dthetasum_shape.min())
xlim([xmin,xmax]); ylabel('Shape based'); title('Orientation change per 4 frames: ' + _title)
ylim([dthetasum_shape[xmin:xmax].min()-1, dmax+1])
subplot(212)
drawLines(dthetasum_vel.min(), dthetasum_vel.max(), events)
plot(np.ones_like(dthetasum_vel)*thrs,'gray',linestyle='--')
plot(-np.ones_like(dthetasum_vel)*thrs,'gray',linestyle='--')
plot(dthetasum_vel)
dmax = dthetasum_vel[xmin:xmax].max()
plot(turns_vel, (0.5+dmax)*np.ones_like(turns_vel), 'o')
temp = np.zeros_like(dthetasum_vel)
temp[turns_vel] = 1
vel_cumsum = np.cumsum(temp)
vel_cumsum -= vel_cumsum[xmin]
plot( vel_cumsum / vel_cumsum[xmax] * (dmax-dthetasum_shape.min()) + dthetasum_shape.min())
ylim([dthetasum_vel[xmin:xmax].min()-1, dmax+1])
xlim([xmin,xmax]); ylabel('Velocity based')
tight_layout()
if pp:
fig.savefig(pp, format='pdf')
def trajectory(x, y, z, rng, ax, _xlim=[0,640], _ylim=[480,480+300], _zlim=[150,340],
color='b', fps=30.0, ringpolygon=None):
ax.plot(x[rng],y[rng],z[rng], color=color)
ax.view_init(azim=-75, elev=-180+15)
if ringpolygon:
rx, ry, rz = ringpolygon
ax.plot(rx, ry, rz, color='gray')
ax.set_xlim(_xlim[0],_xlim[1])
ax.set_ylim(_ylim[0],_ylim[1])
ax.set_zlim(_zlim[0],_zlim[1])
title(("(%2.1f min to %2.1f min)" % (rng[0]/fps/60.0,(rng[-1]+1)/60.0/fps)))
draw()
def plotTrajectory(x, y, z, events, _xlim=None, _ylim=None, _zlim=None, fps=30.0, pp=None, ringpolygon=None):
CS, USs, preRange = events
rng1 = np.arange(CS-preRange, CS-preRange/2, dtype=int)
rng2 = np.arange(CS-preRange/2, CS, dtype=int)
if USs:
rng3 = np.arange(CS, min(USs), dtype=int)
rng4 = np.arange(min(USs), min(USs)+preRange/2, dtype=int)
combined = np.hstack((rng1,rng2,rng3,rng4))
else:
combined = np.hstack((rng1,rng2))
if _xlim is None:
_xlim = map( int, ( x[combined].min(), x[combined].max() ) )
if _ylim is None:
_ylim = map( int, ( y[combined].min(), y[combined].max() ) )
if _zlim is None:
_zlim = map( int, ( z[combined].min(), z[combined].max() ) )
if ringpolygon:
_zlim[0] = min( _zlim[0], int(ringpolygon[2][0]) )
fig3D = plt.figure(figsize=(12,8), facecolor='w')
ax = fig3D.add_subplot(221, projection='3d'); trajectory(x,y,z,rng1,ax,_xlim,_ylim,_zlim,'c',fps,ringpolygon)
ax = fig3D.add_subplot(222, projection='3d'); trajectory(x,y,z,rng2,ax,_xlim,_ylim,_zlim,'c',fps,ringpolygon)
if USs:
ax = fig3D.add_subplot(223, projection='3d'); trajectory(x,y,z,rng3,ax,_xlim,_ylim,_zlim,'g',fps,ringpolygon)
ax = fig3D.add_subplot(224, projection='3d'); trajectory(x,y,z,rng4,ax,_xlim,_ylim,_zlim,'r',fps,ringpolygon)
tight_layout()
if pp:
fig3D.savefig(pp, format='pdf')
def add2DataAndPlot(fp, fish, data, createPDF):
if createPDF:
pp = PdfPages(fp[:-7]+'_'+fish+'.pdf')
else:
pp = None
params = np.load(fp)
fname = os.path.basename(fp).split('.')[0] + '.avi'
dirname = os.path.dirname(fp)
preRange = params[(fname, 'mog')]['preRange']
fps = params[(fname, 'mog')]['fps']
TVx1 = params[(fname, fish)]['TVx1']
TVy1 = params[(fname, fish)]['TVy1']
TVx2 = params[(fname, fish)]['TVx2']
TVy2 = params[(fname, fish)]['TVy2']
SVx1 = params[(fname, fish)]['SVx1']
SVx2 = params[(fname, fish)]['SVx2']
SVx3 = params[(fname, fish)]['SVx3']
SVy1 = params[(fname, fish)]['SVy1']
SVy2 = params[(fname, fish)]['SVy2']
SVy3 = params[(fname, fish)]['SVy3']
ringAppearochLevel = params[(fname, fish)]['ringAppearochLevel']
_npz = os.path.join(dirname, os.path.join('%s_%s.npz' % (fname[:-4], fish)))
# if os.path.exists(_npz):
npData = np.load(_npz)
tvx = npData['TVtracking'][:,0] # x with nan
tvy = npData['TVtracking'][:,1] # y
headx = npData['TVtracking'][:,3] # headx
heady = npData['TVtracking'][:,4] # heady
svy = npData['SVtracking'][:,1] # z
InflowTubeTVArray = npData['InflowTubeTVArray']
InflowTubeSVArray = npData['InflowTubeSVArray']
inflowpos = InflowTubeTVArray[:,0], InflowTubeTVArray[:,1], InflowTubeSVArray[:,1]
ringpixels = npData['ringpixel']
ringpolyTVArray = npData['ringpolyTVArray']
ringpolySVArray = npData['ringpolySVArray']
TVbg = npData['TVbg']
print os.path.basename(_npz), 'loaded.'
x,y,z = map(interp_nan, [tvx,tvy,svy])
# z level correction by depth (x)
z = depthCorrection(z,x,TVx1,TVx2,SVy1,SVy2,SVy3)
smoothedz, peaks_within = approachevents(x, y, z,
ringpolyTVArray, ringpolySVArray, thrs=ringAppearochLevel)
# convert to numpy array from list
temp = np.zeros_like(x)
temp[peaks_within] = 1
peaks_within = temp
# normalize to mm
longaxis = float(max((TVx2-TVx1), (TVy2-TVy1))) # before rotation H is applied they are orthogonal
waterlevel = float(SVy2-SVy1)
X = (x-TVx1) / longaxis * CHMAMBER_LENGTH
Y = (TVy2-y) / longaxis * CHMAMBER_LENGTH
Z = (SVy2-z) / waterlevel * WATER_HIGHT # bottom of chamber = 0, higher more positive
inflowpos_mm = ((inflowpos[0]-TVx1) / longaxis * CHMAMBER_LENGTH,
(TVy2-inflowpos[1]) / longaxis * CHMAMBER_LENGTH,
(SVy2-inflowpos[2]) / waterlevel * WATER_HIGHT )
# do the swim direction analysis here
swimdir, water_x, water_y = swimdir_analysis(x,y,z,
ringpolyTVArray,ringpolySVArray,TVx1,TVy1,TVx2,TVy2,fps)
# all of swimdir are within ROI (frame#, inout, speed) but not necessary within ring
sdir = np.array(swimdir)
withinRing = sdir[:,1]>0 # inout>0 are inside ring
temp = np.zeros_like(x)
temp[ sdir[withinRing,0].astype(int) ] = 1
swimdir_within = temp
# location_ring
xy_within = location_ring(x,y, ringpolyTVArray)
temp = np.zeros_like(x)
temp[xy_within] = 1
xy_within = temp
# location_one_third
if (TVx2-TVx1) > (TVy2-TVy1):
if np.abs(np.arange(TVx1, longaxis+TVx1, longaxis/3) + longaxis/6 - inflowpos[0].mean()).argmin() == 2:
location_one_third = x-TVx1 > longaxis/3*2
else:
location_one_third = x < longaxis/3
else:
if np.abs(np.arange(TVy1, longaxis+TVy1, longaxis/3) + longaxis/6 - inflowpos[1].mean()).argmin() == 2:
location_one_third = y-TVy1 > longaxis/3*2
else:
location_one_third = y < longaxis/3
# turn rate analysis (shape based)
heady, headx = map(interp_nan, [heady, headx])
headx, heady = filterheadxy(headx, heady)
dy = heady - y
dx = headx - x
theta_shape = np.arctan2(dy, dx)
# velocity based
cx, cy = filterheadxy(x.copy(), y.copy()) # centroid x,y
vx = np.append(0, np.diff(cx))
vy = np.append(0, np.diff(cy))
theta_vel = np.arctan2(vy, vx)
# prepare ringpolygon for trajectory plot
rx, ry, rw, rh, rang = ringpolyTVArray.mean(axis=0).astype(int) # use mm ver above
rz = ringpolySVArray.mean(axis=0)[1].astype(int)
RX = (rx-TVx1) / longaxis * CHMAMBER_LENGTH
RY = (TVy2-ry) / longaxis * CHMAMBER_LENGTH
RW = rw / longaxis * CHMAMBER_LENGTH / 2
RH = rh / longaxis * CHMAMBER_LENGTH / 2
RZ = (SVy2-rz) / waterlevel * WATER_HIGHT
points = cv2.ellipse2Poly(
(RX.astype(int),RY.astype(int)),
axes=(RW.astype(int),RH.astype(int)),
angle=rang,
arcStart=0,
arcEnd=360,
delta=3
)
ringpolygon = [points[:,0], points[:,1], np.ones(points.shape[0]) * RZ]
eventTypeKeys = params[(fname, fish)]['EventData'].keys()
CSs = [_ for _ in eventTypeKeys if _.startswith('CS')]
USs = [_ for _ in eventTypeKeys if _.startswith('US')]
# print CSs, USs
# events
for CS in CSs:
CS_Timings = params[(fname, fish)]['EventData'][CS]
CS_Timings.sort()
# initialize when needed
if CS not in data[fish].keys():
data[fish][CS] = []
# now look around for US after it within preRange
for t in CS_Timings:
tr = len(data[fish][CS])+1
rng = np.arange(t-preRange, t+preRange, dtype=np.int)
matchedUSname = None
for us in USs:
us_Timings = params[(fname, fish)]['EventData'][us]
matched = [_ for _ in us_Timings if t-preRange < _ < t+preRange]
if matched:
events = [t, matched, preRange] # ex. CS+
matchedUSname = us
break
else:
continue
_title = '(%s, %s) trial#%02d %s (%s)' % (CS, matchedUSname[0], tr, fname, fish)
print _title, events
_speed3D, _movingSTD, _d2inflow, _ringpixels = plot_eachTr(events, X, Y, Z, inflowpos_mm,
ringpixels, peaks_within, swimdir_within, pp, _title, fps, inmm=True)
# 3d trajectory
_xlim = (0, CHMAMBER_LENGTH)
_zlim = (RZ.max(),0)
plotTrajectory(X, Y, Z, events, _xlim=_xlim, _zlim=_zlim, fps=fps, pp=pp, ringpolygon=ringpolygon)
# turn rate analysis
# shape based
theta_shape[rng] = smoothRad(theta_shape[rng].copy(), thrs=np.pi/2)
dtheta_shape = np.append(0, np.diff(theta_shape)) # full length
kernel = np.ones(4)
dthetasum_shape = np.convolve(dtheta_shape, kernel, 'same')
# 4 frames = 1000/30.0*4 = 133.3 ms
thrs = (np.pi / 2) * (133.33333333333334/120) # Braubach et al 2009 90 degree in 120 ms
peaks_shape = argrelextrema(abs(dthetasum_shape), np.greater)[0]
turns_shape = peaks_shape[ (abs(dthetasum_shape[peaks_shape]) > thrs).nonzero()[0] ]
# velocity based
theta_vel[rng] = smoothRad(theta_vel[rng].copy(), thrs=np.pi/2)
dtheta_vel = np.append(0, np.diff(theta_vel))
dthetasum_vel = np.convolve(dtheta_vel, kernel, 'same')
peaks_vel = argrelextrema(abs(dthetasum_vel), np.greater)[0]
turns_vel = peaks_vel[ (abs(dthetasum_vel[peaks_vel]) > thrs).nonzero()[0] ]
plot_turnrates(events, dthetasum_shape, dthetasum_vel, turns_shape, turns_vel, pp, _title, fps=fps)
_temp = np.zeros_like(dtheta_shape)
_temp[turns_shape] = 1
turns_shape_array = _temp
_temp = np.zeros_like(dtheta_vel)
_temp[turns_vel] = 1
turns_vel_array = _temp
# plot swim direction analysis
fig = figure(figsize=(12,8), facecolor='w')
ax1 = subplot(211)
ax1.imshow(TVbg, cmap=cm.gray) # TVbg is clip out of ROI
ax1.plot(x[rng]-TVx1, y[rng]-TVy1, 'gray')
ax1.plot(water_x[t-preRange:t]-TVx1, water_y[t-preRange:t]-TVy1, 'c.')
if matched:
ax1.plot( water_x[t:matched[0]]-TVx1,
water_y[t:matched[0]]-TVy1, 'g.')
ax1.plot( water_x[matched[0]:matched[0]+preRange/4]-TVx1,
water_y[matched[0]:matched[0]+preRange/4]-TVy1, 'r.')
xlim([0, TVx2-TVx1]); ylim([TVy2-TVy1, 0])
title(_title)
ax2 = subplot(212)
ax2.plot( swimdir_within )
ax2.plot( peaks_within*1.15-0.1, 'mo' )
if matched:
xmin, xmax = t-preRange-10*fps, matched[0]+preRange/4
else:
xmin, xmax = t-preRange-10*fps, t+preRange/2+10*fps
gzcs = np.cumsum(swimdir_within)
gzcs -= gzcs[xmin]
ax2.plot( gzcs/gzcs[xmax] )
drawLines(0,1.2, events)
ylim([0,1.2])
xlim([xmin, xmax])
ylabel('|: SwimDirection\no: approach events')
data[fish][CS].append( {
'fname' : fname,
'x': x[rng], 'y': y[rng], 'z': z[rng],
'X': X[rng], 'Y': Y[rng], 'Z': Z[rng], # calibrate space (mm)
'speed3D': _speed3D, # calibrate space (mm)
'movingSTD' : _movingSTD, # calibrate space (mm)
'd2inflow': _d2inflow, # calibrate space (mm)
'ringpixels': _ringpixels,
'peaks_within': peaks_within[rng],
'xy_within': xy_within[rng],
'location_one_third' : location_one_third[rng],
'swimdir_within' : swimdir_within[rng],
'dtheta_shape': dtheta_shape[rng],
'dtheta_vel': dtheta_vel[rng],
'turns_shape': turns_shape_array[rng], # already +/- preRange
'turns_vel': turns_vel_array[rng],
'events' : events,
'matchedUSname' : matchedUSname,
'TVroi' : (TVx1,TVy1,TVx2,TVy2),
'SVroi' : (SVx1,SVy1,SVx2,SVy2),
} )
if pp:
fig.savefig(pp, format='pdf')
close('all') # release memory ASAP!
if pp:
pp.close()
def getPDFs(pickle_files, fishnames=None, createPDF=True):
# type checking args
if type(pickle_files) is str:
pickle_files = [pickle_files]
# convert to a list or set of fish names
if type(fishnames) is str:
fishnames = [fishnames]
elif not fishnames:
fishnames = set()
# re-organize trials into a dict "data"
data = {}
# figure out trial number (sometime many trials in one files) for each fish
# go through all pickle_files and use timestamps of file to sort events.
timestamps = []
for fp in pickle_files:
# collect ctime of pickled files
fname = os.path.basename(fp).split('.')[0] + '.avi'
timestamps.append( time.strptime(fname, "%b-%d-%Y_%H_%M_%S.avi") )
# look into the pickle and collect fish analyzed
params = np.load(fp) # loading pickled file!
if type(fishnames) is set:
for fish in [fs for fl,fs in params.keys() if fl == fname and fs != 'mog']:
fishnames.add(fish)
timestamps = sorted(range(len(timestamps)), key=timestamps.__getitem__)
# For each fish, go thru all pickled files
for fish in fishnames:
data[fish] = {}
# now go thru the sorted
for ind in timestamps:
fp = pickle_files[ind]
print 'processing #%d\n%s' % (ind, fp)
add2DataAndPlot(fp, fish, data, createPDF)
return data
def plotTrials(data, fish, CSname, key, step, offset=0, pp=None):
fig = figure(figsize=(12,8), facecolor='w')
ax1 = fig.add_subplot(121) # raw trace
ax2 = fig.add_subplot(222) # learning curve
ax3 = fig.add_subplot(224) # bar plot
preP, postP, postP2 = [], [], []
longestUS = 0
for n, measurement in enumerate(data[fish][CSname]):
tr = n+1
CS, USs, preRange = measurement['events']
subplot(ax1)
mi = -step*(tr-1)
ma = mi + step
drawLines(mi, ma, (preRange, [preRange+(USs[0]-CS)], preRange))
longestUS = max([us-CS+preRange*3/2 for us in USs]+[longestUS])
# 'measurement[key]': vector around the CS timing (+/-) preRange. i.e., preRange is the center
ax1.plot(measurement[key]-step*(tr-1)+offset)
title(CSname+': '+key) # cf. preRange = 3600 frames
pre = measurement[key][:preRange].mean()+offset # 2 min window
post = measurement[key][preRange:preRange+(USs[0]-CS)].mean()+offset # 23 s window
post2 = measurement[key][preRange+(USs[0]-CS):preRange*3/2+(USs[0]-CS)].mean()+offset # 1 min window after US
preP.append(pre)
postP.append(post)
postP2.append(post2)
ax3.plot([1, 2, 3], [pre, post, post2],'o-')
ax1.set_xlim([0,longestUS])
ax1.axis('off')
subplot(ax2)
x = range(1, tr+1)
y = np.diff((preP,postP), axis=0).ravel()
ax2.plot( x, y, 'ko-', linewidth=2 )
ax2.plot( x, np.zeros_like(x), '-.', linewidth=1, color='gray' )
# grid()
slope, intercept, rvalue, pval, stderr = stats.stats.linregress(x,y)
title('slope = zero? p-value = %f' % pval)
ax2.set_xlabel("Trial#")
ax2.set_xlim([0.5,tr+0.5])
ax2.set_ylabel('CS - pre')
subplot(ax3)
ax3.bar([0.6, 1.6, 2.6], [np.nanmean(preP), np.nanmean(postP), np.nanmean(postP2)], facecolor='none')
t, pval = stats.ttest_rel(postP, preP)
title('paired t p-value = %f' % pval)
ax3.set_xticks([1,2,3])
ax3.set_xticklabels(['pre', CSname, measurement['matchedUSname']])
ax3.set_xlim([0.5,3.5])
ax3.set_ylabel('Raw mean values')
tight_layout(2, h_pad=1, w_pad=1)
if pp:
fig.savefig(pp, format='pdf')
close('all')
return np.vstack((preP, postP, postP2))
def getSummary(data, dirname=None):
for fish in data.keys():
for CSname in data[fish].keys():
if dirname:
pp = PdfPages(os.path.join(dirname, '%s_for_%s.pdf' % (CSname,fish)))
print 'generating %s_for_%s.pdf' % (CSname,fish)
book = Workbook()
sheet1 = book.add_sheet('speed3D')
avgs = plotTrials(data, fish, CSname, 'speed3D', 30, pp=pp)
putNp2xls(avgs, sheet1)
sheet2 = book.add_sheet('d2inflow')
avgs = plotTrials(data, fish, CSname, 'd2inflow', 200, pp=pp)
putNp2xls(avgs, sheet2)
# sheet3 = book.add_sheet('smoothedz')
sheet3 = book.add_sheet('Z')
# avgs = plotTrials(data, fish, CSname, 'smoothedz', 100, pp=pp)
avgs = plotTrials(data, fish, CSname, 'Z', 30, pp=pp)
putNp2xls(avgs, sheet3)
sheet4 = book.add_sheet('ringpixels')
avgs = plotTrials(data, fish, CSname, 'ringpixels', 1200, pp=pp)
putNp2xls(avgs, sheet4)
sheet5 = book.add_sheet('peaks_within')
avgs = plotTrials(data, fish, CSname, 'peaks_within', 1.5, pp=pp)
putNp2xls(avgs, sheet5)
sheet6 = book.add_sheet('swimdir_within')
avgs = plotTrials(data, fish, CSname, 'swimdir_within', 1.5, pp=pp)
putNp2xls(avgs, sheet6)
sheet7 = book.add_sheet('xy_within')
avgs = plotTrials(data, fish, CSname, 'xy_within', 1.5, pp=pp)
putNp2xls(avgs, sheet7)
sheet8 = book.add_sheet('turns_shape')
avgs = plotTrials(data, fish, CSname, 'turns_shape', 1.5, pp=pp)
putNp2xls(avgs, sheet8)
sheet9 = book.add_sheet('turns_vel')
avgs = plotTrials(data, fish, CSname, 'turns_vel', 1.5, pp=pp)
putNp2xls(avgs, sheet9)
if dirname:
pp.close()
book.save(os.path.join(dirname, '%s_for_%s.xls' % (CSname,fish)))
close('all')
else:
show()
def add2Pickles(dirname, pickle_files):
# dirname : folder to look for pickle files
# pickle_files : output, a list to be concatenated.
pattern = os.path.join(dirname, '*.pickle')
temp = [_ for _ in glob(pattern) if not _.endswith('- Copy.pickle') and
not os.path.basename(_).startswith('Summary')]
pickle_files += temp
if __name__ == '__main__':
pickle_files = []
# small test data
# add2Pickles('R:/Data/itoiori/behav/adult whitlock/conditioning/NeuroD/Aug4/test', pickle_files)
# outputdir = 'R:/Data/itoiori/behav/adult whitlock/conditioning/NeuroD/Aug4/test'
# show me what you got
for pf in pickle_files:
print pf
fp = os.path.join(outputdir, 'Summary.pickle')
createPDF = True # useful when plotting etc code updated
if 1: # refresh analysis
data = getPDFs(pickle_files, createPDF=createPDF)
import cPickle as pickle
with open(os.path.join(outputdir, 'Summary.pickle'), 'wb') as f:
pickle.dump(data, f)
else: # or reuse previous
data = np.load(fp)
getSummary(data, outputdir)
pickle2mat(fp, data)
| bsd-3-clause |
RapidApplicationDevelopment/tensorflow | tensorflow/contrib/metrics/python/kernel_tests/histogram_ops_test.py | 12 | 9744 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for histogram_ops."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import tensorflow as tf
from tensorflow.contrib.metrics.python.ops import histogram_ops
class Strict1dCumsumTest(tf.test.TestCase):
"""Test this private function."""
def test_empty_tensor_returns_empty(self):
with self.test_session():
tensor = tf.constant([])
result = histogram_ops._strict_1d_cumsum(tensor, 0)
expected = tf.constant([])
np.testing.assert_array_equal(expected.eval(), result.eval())
def test_length_1_tensor_works(self):
with self.test_session():
tensor = tf.constant([3], dtype=tf.float32)
result = histogram_ops._strict_1d_cumsum(tensor, 1)
expected = tf.constant([3], dtype=tf.float32)
np.testing.assert_array_equal(expected.eval(), result.eval())
def test_length_3_tensor_works(self):
with self.test_session():
tensor = tf.constant([1, 2, 3], dtype=tf.float32)
result = histogram_ops._strict_1d_cumsum(tensor, 3)
expected = tf.constant([1, 3, 6], dtype=tf.float32)
np.testing.assert_array_equal(expected.eval(), result.eval())
class AUCUsingHistogramTest(tf.test.TestCase):
def setUp(self):
self.rng = np.random.RandomState(0)
def test_empty_labels_and_scores_gives_nan_auc(self):
with self.test_session():
labels = tf.constant([], shape=[0], dtype=tf.bool)
scores = tf.constant([], shape=[0], dtype=tf.float32)
score_range = [0, 1.]
auc, update_op = tf.contrib.metrics.auc_using_histogram(labels, scores,
score_range)
tf.local_variables_initializer().run()
update_op.run()
self.assertTrue(np.isnan(auc.eval()))
def test_perfect_scores_gives_auc_1(self):
self._check_auc(nbins=100,
desired_auc=1.0,
score_range=[0, 1.],
num_records=50,
frac_true=0.5,
atol=0.05,
num_updates=1)
def test_terrible_scores_gives_auc_0(self):
self._check_auc(nbins=100,
desired_auc=0.0,
score_range=[0, 1.],
num_records=50,
frac_true=0.5,
atol=0.05,
num_updates=1)
def test_many_common_conditions(self):
for nbins in [50]:
for desired_auc in [0.3, 0.5, 0.8]:
for score_range in [[-1, 1], [-10, 0]]:
for frac_true in [0.3, 0.8]:
# Tests pass with atol = 0.03. Moved up to 0.05 to avoid flakes.
self._check_auc(nbins=nbins,
desired_auc=desired_auc,
score_range=score_range,
num_records=100,
frac_true=frac_true,
atol=0.05,
num_updates=50)
def test_large_class_imbalance_still_ok(self):
# With probability frac_true ** num_records, each batch contains only True
# records. In this case, ~ 95%.
# Tests pass with atol = 0.02. Increased to 0.05 to avoid flakes.
self._check_auc(nbins=100,
desired_auc=0.8,
score_range=[-1, 1.],
num_records=10,
frac_true=0.995,
atol=0.05,
num_updates=1000)
def test_super_accuracy_with_many_bins_and_records(self):
# Test passes with atol = 0.0005. Increased atol to avoid flakes.
self._check_auc(nbins=1000,
desired_auc=0.75,
score_range=[0, 1.],
num_records=1000,
frac_true=0.5,
atol=0.005,
num_updates=100)
def _check_auc(self,
nbins=100,
desired_auc=0.75,
score_range=None,
num_records=50,
frac_true=0.5,
atol=0.05,
num_updates=10):
"""Check auc accuracy against synthetic data.
Args:
nbins: nbins arg from contrib.metrics.auc_using_histogram.
desired_auc: Number in [0, 1]. The desired auc for synthetic data.
score_range: 2-tuple, (low, high), giving the range of the resultant
scores. Defaults to [0, 1.].
num_records: Positive integer. The number of records to return.
frac_true: Number in (0, 1). Expected fraction of resultant labels that
will be True. This is just in expectation...more or less may actually
be True.
atol: Absolute tolerance for final AUC estimate.
num_updates: Update internal histograms this many times, each with a new
batch of synthetic data, before computing final AUC.
Raises:
AssertionError: If resultant AUC is not within atol of theoretical AUC
from synthetic data.
"""
score_range = [0, 1.] or score_range
with self.test_session():
labels = tf.placeholder(tf.bool, shape=[num_records])
scores = tf.placeholder(tf.float32, shape=[num_records])
auc, update_op = tf.contrib.metrics.auc_using_histogram(labels,
scores,
score_range,
nbins=nbins)
tf.local_variables_initializer().run()
# Updates, then extract auc.
for _ in range(num_updates):
labels_a, scores_a = synthetic_data(desired_auc, score_range,
num_records, self.rng, frac_true)
update_op.run(feed_dict={labels: labels_a, scores: scores_a})
labels_a, scores_a = synthetic_data(desired_auc, score_range, num_records,
self.rng, frac_true)
# Fetch current auc, and verify that fetching again doesn't change it.
auc_eval = auc.eval()
self.assertAlmostEqual(auc_eval, auc.eval(), places=5)
msg = ('nbins: %s, desired_auc: %s, score_range: %s, '
'num_records: %s, frac_true: %s, num_updates: %s') % (nbins,
desired_auc,
score_range,
num_records,
frac_true,
num_updates)
np.testing.assert_allclose(desired_auc, auc_eval, atol=atol, err_msg=msg)
def synthetic_data(desired_auc, score_range, num_records, rng, frac_true):
"""Create synthetic boolean_labels and scores with adjustable auc.
Args:
desired_auc: Number in [0, 1], the theoretical AUC of resultant data.
score_range: 2-tuple, (low, high), giving the range of the resultant scores
num_records: Positive integer. The number of records to return.
rng: Initialized np.random.RandomState random number generator
frac_true: Number in (0, 1). Expected fraction of resultant labels that
will be True. This is just in expectation...more or less may actually be
True.
Returns:
boolean_labels: np.array, dtype=bool.
scores: np.array, dtype=np.float32
"""
# We prove here why the method (below) for computing AUC works. Of course we
# also checked this against sklearn.metrics.roc_auc_curve.
#
# First do this for score_range = [0, 1], then rescale.
# WLOG assume AUC >= 0.5, otherwise we will solve for AUC >= 0.5 then swap
# the labels.
# So for AUC in [0, 1] we create False and True labels
# and corresponding scores drawn from:
# F ~ U[0, 1], T ~ U[x, 1]
# We have,
# AUC
# = P[T > F]
# = P[T > F | F < x] P[F < x] + P[T > F | F > x] P[F > x]
# = (1 * x) + (0.5 * (1 - x)).
# Inverting, we have:
# x = 2 * AUC - 1, when AUC >= 0.5.
assert 0 <= desired_auc <= 1
assert 0 < frac_true < 1
if desired_auc < 0.5:
flip_labels = True
desired_auc = 1 - desired_auc
frac_true = 1 - frac_true
else:
flip_labels = False
x = 2 * desired_auc - 1
labels = rng.binomial(1, frac_true, size=num_records).astype(bool)
num_true = labels.sum()
num_false = num_records - labels.sum()
# Draw F ~ U[0, 1], and T ~ U[x, 1]
false_scores = rng.rand(num_false)
true_scores = x + rng.rand(num_true) * (1 - x)
# Reshape [0, 1] to score_range.
def reshape(scores):
return score_range[0] + scores * (score_range[1] - score_range[0])
false_scores = reshape(false_scores)
true_scores = reshape(true_scores)
# Place into one array corresponding with the labels.
scores = np.nan * np.ones(num_records, dtype=np.float32)
scores[labels] = true_scores
scores[~labels] = false_scores
if flip_labels:
labels = ~labels
return labels, scores
if __name__ == '__main__':
tf.test.main()
| apache-2.0 |
DavidNorman/tensorflow | tensorflow/python/ops/weights_broadcast_ops.py | 133 | 7197 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Weight broadcasting operations.
In `tf.losses` and `tf.metrics`, we support limited weight broadcasting. This
file includes operations for those broadcasting rules.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import sets
def _has_valid_dims(weights_shape, values_shape):
with ops.name_scope(
None, "has_invalid_dims", (weights_shape, values_shape)) as scope:
values_shape_2d = array_ops.expand_dims(values_shape, -1)
valid_dims = array_ops.concat(
(values_shape_2d, array_ops.ones_like(values_shape_2d)), axis=1)
weights_shape_2d = array_ops.expand_dims(weights_shape, -1)
invalid_dims = sets.set_difference(weights_shape_2d, valid_dims)
num_invalid_dims = array_ops.size(
invalid_dims.values, name="num_invalid_dims")
return math_ops.equal(0, num_invalid_dims, name=scope)
def _has_valid_nonscalar_shape(
weights_rank, weights_shape, values_rank, values_shape):
with ops.name_scope(
None, "has_valid_nonscalar_shape",
(weights_rank, weights_shape, values_rank, values_shape)) as scope:
is_same_rank = math_ops.equal(
values_rank, weights_rank, name="is_same_rank")
return control_flow_ops.cond(
is_same_rank,
lambda: _has_valid_dims(weights_shape, values_shape),
lambda: is_same_rank,
name=scope)
_ASSERT_BROADCASTABLE_ERROR_PREFIX = "weights can not be broadcast to values."
def assert_broadcastable(weights, values):
"""Asserts `weights` can be broadcast to `values`.
In `tf.losses` and `tf.metrics`, we support limited weight broadcasting. We
let weights be either scalar, or the same rank as the target values, with each
dimension either 1, or the same as the corresponding values dimension.
Args:
weights: `Tensor` of weights.
values: `Tensor` of values to which weights are applied.
Returns:
`Operation` raising `InvalidArgumentError` if `weights` has incorrect shape.
`no_op` if static checks determine `weights` has correct shape.
Raises:
ValueError: If static checks determine `weights` has incorrect shape.
"""
with ops.name_scope(None, "assert_broadcastable", (weights, values)) as scope:
with ops.name_scope(None, "weights", (weights,)) as weights_scope:
weights = ops.convert_to_tensor(weights, name=weights_scope)
weights_shape = array_ops.shape(weights, name="shape")
weights_rank = array_ops.rank(weights, name="rank")
weights_rank_static = tensor_util.constant_value(weights_rank)
with ops.name_scope(None, "values", (values,)) as values_scope:
values = ops.convert_to_tensor(values, name=values_scope)
values_shape = array_ops.shape(values, name="shape")
values_rank = array_ops.rank(values, name="rank")
values_rank_static = tensor_util.constant_value(values_rank)
# Try static checks.
if weights_rank_static is not None and values_rank_static is not None:
if weights_rank_static == 0:
return control_flow_ops.no_op(name="static_scalar_check_success")
if weights_rank_static != values_rank_static:
raise ValueError(
"%s values.rank=%s. weights.rank=%s."
" values.shape=%s. weights.shape=%s." % (
_ASSERT_BROADCASTABLE_ERROR_PREFIX, values_rank_static,
weights_rank_static, values.shape, weights.shape))
weights_shape_static = tensor_util.constant_value(weights_shape)
values_shape_static = tensor_util.constant_value(values_shape)
if weights_shape_static is not None and values_shape_static is not None:
# Sanity check, this should always be true since we checked rank above.
ndims = len(values_shape_static)
assert ndims == len(weights_shape_static)
for i in range(ndims):
if weights_shape_static[i] not in (1, values_shape_static[i]):
raise ValueError(
"%s Mismatch at dim %s. values.shape=%s weights.shape=%s." % (
_ASSERT_BROADCASTABLE_ERROR_PREFIX, i, values_shape_static,
weights_shape_static))
return control_flow_ops.no_op(name="static_dims_check_success")
# Dynamic checks.
is_scalar = math_ops.equal(0, weights_rank, name="is_scalar")
data = (
_ASSERT_BROADCASTABLE_ERROR_PREFIX,
"weights.shape=", weights.name, weights_shape,
"values.shape=", values.name, values_shape,
"is_scalar=", is_scalar,
)
is_valid_shape = control_flow_ops.cond(
is_scalar,
lambda: is_scalar,
lambda: _has_valid_nonscalar_shape( # pylint: disable=g-long-lambda
weights_rank, weights_shape, values_rank, values_shape),
name="is_valid_shape")
return control_flow_ops.Assert(is_valid_shape, data, name=scope)
def broadcast_weights(weights, values):
"""Broadcast `weights` to the same shape as `values`.
This returns a version of `weights` following the same broadcast rules as
`mul(weights, values)`, but limited to the weights shapes allowed by
`assert_broadcastable`. When computing a weighted average, use this function
to broadcast `weights` before summing them; e.g.,
`reduce_sum(w * v) / reduce_sum(_broadcast_weights(w, v))`.
Args:
weights: `Tensor` whose shape is broadcastable to `values` according to the
rules of `assert_broadcastable`.
values: `Tensor` of any shape.
Returns:
`weights` broadcast to `values` shape according to the rules of
`assert_broadcastable`.
"""
with ops.name_scope(None, "broadcast_weights", (weights, values)) as scope:
values = ops.convert_to_tensor(values, name="values")
weights = ops.convert_to_tensor(
weights, dtype=values.dtype.base_dtype, name="weights")
# Try static check for exact match.
weights_shape = weights.get_shape()
values_shape = values.get_shape()
if (weights_shape.is_fully_defined() and
values_shape.is_fully_defined() and
weights_shape.is_compatible_with(values_shape)):
return weights
with ops.control_dependencies((assert_broadcastable(weights, values),)):
return math_ops.multiply(
weights, array_ops.ones_like(values), name=scope)
| apache-2.0 |
ecederstrand/django | django/db/backends/base/features.py | 193 | 9883 | from django.db.models.aggregates import StdDev
from django.db.utils import ProgrammingError
from django.utils.functional import cached_property
class BaseDatabaseFeatures(object):
gis_enabled = False
allows_group_by_pk = False
allows_group_by_selected_pks = False
empty_fetchmany_value = []
update_can_self_select = True
# Does the backend distinguish between '' and None?
interprets_empty_strings_as_nulls = False
# Does the backend allow inserting duplicate NULL rows in a nullable
# unique field? All core backends implement this correctly, but other
# databases such as SQL Server do not.
supports_nullable_unique_constraints = True
# Does the backend allow inserting duplicate rows when a unique_together
# constraint exists and some fields are nullable but not all of them?
supports_partially_nullable_unique_constraints = True
can_use_chunked_reads = True
can_return_id_from_insert = False
has_bulk_insert = False
uses_savepoints = False
can_release_savepoints = False
can_combine_inserts_with_and_without_auto_increment_pk = False
# If True, don't use integer foreign keys referring to, e.g., positive
# integer primary keys.
related_fields_match_type = False
allow_sliced_subqueries = True
has_select_for_update = False
has_select_for_update_nowait = False
supports_select_related = True
# Does the default test database allow multiple connections?
# Usually an indication that the test database is in-memory
test_db_allows_multiple_connections = True
# Can an object be saved without an explicit primary key?
supports_unspecified_pk = False
# Can a fixture contain forward references? i.e., are
# FK constraints checked at the end of transaction, or
# at the end of each save operation?
supports_forward_references = True
# Does the backend truncate names properly when they are too long?
truncates_names = False
# Is there a REAL datatype in addition to floats/doubles?
has_real_datatype = False
supports_subqueries_in_group_by = True
supports_bitwise_or = True
# Is there a true datatype for uuid?
has_native_uuid_field = False
# Is there a true datatype for timedeltas?
has_native_duration_field = False
# Does the database driver support timedeltas as arguments?
# This is only relevant when there is a native duration field.
# Specifically, there is a bug with cx_Oracle:
# https://bitbucket.org/anthony_tuininga/cx_oracle/issue/7/
driver_supports_timedelta_args = False
# Do time/datetime fields have microsecond precision?
supports_microsecond_precision = True
# Does the __regex lookup support backreferencing and grouping?
supports_regex_backreferencing = True
# Can date/datetime lookups be performed using a string?
supports_date_lookup_using_string = True
# Can datetimes with timezones be used?
supports_timezones = True
# Does the database have a copy of the zoneinfo database?
has_zoneinfo_database = True
# When performing a GROUP BY, is an ORDER BY NULL required
# to remove any ordering?
requires_explicit_null_ordering_when_grouping = False
# Does the backend order NULL values as largest or smallest?
nulls_order_largest = False
# Is there a 1000 item limit on query parameters?
supports_1000_query_parameters = True
# Can an object have an autoincrement primary key of 0? MySQL says No.
allows_auto_pk_0 = True
# Do we need to NULL a ForeignKey out, or can the constraint check be
# deferred
can_defer_constraint_checks = False
# date_interval_sql can properly handle mixed Date/DateTime fields and timedeltas
supports_mixed_date_datetime_comparisons = True
# Does the backend support tablespaces? Default to False because it isn't
# in the SQL standard.
supports_tablespaces = False
# Does the backend reset sequences between tests?
supports_sequence_reset = True
# Can the backend determine reliably the length of a CharField?
can_introspect_max_length = True
# Can the backend determine reliably if a field is nullable?
# Note that this is separate from interprets_empty_strings_as_nulls,
# although the latter feature, when true, interferes with correct
# setting (and introspection) of CharFields' nullability.
# This is True for all core backends.
can_introspect_null = True
# Can the backend introspect the default value of a column?
can_introspect_default = True
# Confirm support for introspected foreign keys
# Every database can do this reliably, except MySQL,
# which can't do it for MyISAM tables
can_introspect_foreign_keys = True
# Can the backend introspect an AutoField, instead of an IntegerField?
can_introspect_autofield = False
# Can the backend introspect a BigIntegerField, instead of an IntegerField?
can_introspect_big_integer_field = True
# Can the backend introspect an BinaryField, instead of an TextField?
can_introspect_binary_field = True
# Can the backend introspect an DecimalField, instead of an FloatField?
can_introspect_decimal_field = True
# Can the backend introspect an IPAddressField, instead of an CharField?
can_introspect_ip_address_field = False
# Can the backend introspect a PositiveIntegerField, instead of an IntegerField?
can_introspect_positive_integer_field = False
# Can the backend introspect a SmallIntegerField, instead of an IntegerField?
can_introspect_small_integer_field = False
# Can the backend introspect a TimeField, instead of a DateTimeField?
can_introspect_time_field = True
# Support for the DISTINCT ON clause
can_distinct_on_fields = False
# Does the backend decide to commit before SAVEPOINT statements
# when autocommit is disabled? http://bugs.python.org/issue8145#msg109965
autocommits_when_autocommit_is_off = False
# Does the backend prevent running SQL queries in broken transactions?
atomic_transactions = True
# Can we roll back DDL in a transaction?
can_rollback_ddl = False
# Can we issue more than one ALTER COLUMN clause in an ALTER TABLE?
supports_combined_alters = False
# Does it support foreign keys?
supports_foreign_keys = True
# Does it support CHECK constraints?
supports_column_check_constraints = True
# Does the backend support 'pyformat' style ("... %(name)s ...", {'name': value})
# parameter passing? Note this can be provided by the backend even if not
# supported by the Python driver
supports_paramstyle_pyformat = True
# Does the backend require literal defaults, rather than parameterized ones?
requires_literal_defaults = False
# Does the backend require a connection reset after each material schema change?
connection_persists_old_columns = False
# What kind of error does the backend throw when accessing closed cursor?
closed_cursor_error_class = ProgrammingError
# Does 'a' LIKE 'A' match?
has_case_insensitive_like = True
# Does the backend require the sqlparse library for splitting multi-line
# statements before executing them?
requires_sqlparse_for_splitting = True
# Suffix for backends that don't support "SELECT xxx;" queries.
bare_select_suffix = ''
# If NULL is implied on columns without needing to be explicitly specified
implied_column_null = False
uppercases_column_names = False
# Does the backend support "select for update" queries with limit (and offset)?
supports_select_for_update_with_limit = True
# Does the backend ignore null expressions in GREATEST and LEAST queries unless
# every expression is null?
greatest_least_ignores_nulls = False
# Can the backend clone databases for parallel test execution?
# Defaults to False to allow third-party backends to opt-in.
can_clone_databases = False
def __init__(self, connection):
self.connection = connection
@cached_property
def supports_transactions(self):
"""Confirm support for transactions."""
with self.connection.cursor() as cursor:
cursor.execute('CREATE TABLE ROLLBACK_TEST (X INT)')
self.connection.set_autocommit(False)
cursor.execute('INSERT INTO ROLLBACK_TEST (X) VALUES (8)')
self.connection.rollback()
self.connection.set_autocommit(True)
cursor.execute('SELECT COUNT(X) FROM ROLLBACK_TEST')
count, = cursor.fetchone()
cursor.execute('DROP TABLE ROLLBACK_TEST')
return count == 0
@cached_property
def supports_stddev(self):
"""Confirm support for STDDEV and related stats functions."""
try:
self.connection.ops.check_expression_support(StdDev(1))
return True
except NotImplementedError:
return False
def introspected_boolean_field_type(self, field=None, created_separately=False):
"""
What is the type returned when the backend introspects a BooleanField?
The optional arguments may be used to give further details of the field to be
introspected; in particular, they are provided by Django's test suite:
field -- the field definition
created_separately -- True if the field was added via a SchemaEditor's AddField,
False if the field was created with the model
Note that return value from this function is compared by tests against actual
introspection results; it should provide expectations, not run an introspection
itself.
"""
if self.can_introspect_null and field and field.null:
return 'NullBooleanField'
return 'BooleanField'
| bsd-3-clause |
kubeup/archon | vendor/github.com/influxdata/influxdb/build.py | 21 | 41033 | #!/usr/bin/python2.7 -u
import sys
import os
import subprocess
import time
from datetime import datetime
import shutil
import tempfile
import hashlib
import re
import logging
import argparse
################
#### InfluxDB Variables
################
# Packaging variables
PACKAGE_NAME = "influxdb"
INSTALL_ROOT_DIR = "/usr/bin"
LOG_DIR = "/var/log/influxdb"
DATA_DIR = "/var/lib/influxdb"
SCRIPT_DIR = "/usr/lib/influxdb/scripts"
CONFIG_DIR = "/etc/influxdb"
LOGROTATE_DIR = "/etc/logrotate.d"
MAN_DIR = "/usr/share/man"
INIT_SCRIPT = "scripts/init.sh"
SYSTEMD_SCRIPT = "scripts/influxdb.service"
PREINST_SCRIPT = "scripts/pre-install.sh"
POSTINST_SCRIPT = "scripts/post-install.sh"
POSTUNINST_SCRIPT = "scripts/post-uninstall.sh"
LOGROTATE_SCRIPT = "scripts/logrotate"
DEFAULT_CONFIG = "etc/config.sample.toml"
# Default AWS S3 bucket for uploads
DEFAULT_BUCKET = "dl.influxdata.com/influxdb/artifacts"
CONFIGURATION_FILES = [
CONFIG_DIR + '/influxdb.conf',
LOGROTATE_DIR + '/influxdb',
]
PACKAGE_LICENSE = "MIT"
PACKAGE_URL = "https://github.com/influxdata/influxdb"
MAINTAINER = "[email protected]"
VENDOR = "InfluxData"
DESCRIPTION = "Distributed time-series database."
prereqs = [ 'git', 'go' ]
go_vet_command = "go tool vet ./"
optional_prereqs = [ 'fpm', 'rpmbuild', 'gpg' ]
fpm_common_args = "-f -s dir --log error \
--vendor {} \
--url {} \
--after-install {} \
--before-install {} \
--after-remove {} \
--license {} \
--maintainer {} \
--directories {} \
--directories {} \
--directories {} \
--description \"{}\"".format(
VENDOR,
PACKAGE_URL,
POSTINST_SCRIPT,
PREINST_SCRIPT,
POSTUNINST_SCRIPT,
PACKAGE_LICENSE,
MAINTAINER,
LOG_DIR,
DATA_DIR,
MAN_DIR,
DESCRIPTION)
for f in CONFIGURATION_FILES:
fpm_common_args += " --config-files {}".format(f)
targets = {
'influx' : './cmd/influx',
'influxd' : './cmd/influxd',
'influx_stress' : './cmd/influx_stress',
'influx_inspect' : './cmd/influx_inspect',
'influx_tsm' : './cmd/influx_tsm',
}
supported_builds = {
'darwin': [ "amd64" ],
'windows': [ "amd64" ],
'linux': [ "amd64", "i386", "armhf", "arm64", "armel", "static_i386", "static_amd64" ]
}
supported_packages = {
"darwin": [ "tar" ],
"linux": [ "deb", "rpm", "tar" ],
"windows": [ "zip" ],
}
################
#### InfluxDB Functions
################
def print_banner():
logging.info("""
___ __ _ ___ ___
|_ _|_ _ / _| |_ ___ _| \\| _ )
| || ' \\| _| | || \\ \\ / |) | _ \\
|___|_||_|_| |_|\\_,_/_\\_\\___/|___/
Build Script
""")
def create_package_fs(build_root):
"""Create a filesystem structure to mimic the package filesystem.
"""
logging.debug("Creating package filesystem at location: {}".format(build_root))
# Using [1:] for the path names due to them being absolute
# (will overwrite previous paths, per 'os.path.join' documentation)
dirs = [ INSTALL_ROOT_DIR[1:],
LOG_DIR[1:],
DATA_DIR[1:],
SCRIPT_DIR[1:],
CONFIG_DIR[1:],
LOGROTATE_DIR[1:],
MAN_DIR[1:] ]
for d in dirs:
os.makedirs(os.path.join(build_root, d))
os.chmod(os.path.join(build_root, d), 0o755)
def package_scripts(build_root, config_only=False, windows=False):
"""Copy the necessary scripts and configuration files to the package
filesystem.
"""
if config_only:
logging.debug("Copying configuration to build directory.")
shutil.copyfile(DEFAULT_CONFIG, os.path.join(build_root, "influxdb.conf"))
os.chmod(os.path.join(build_root, "influxdb.conf"), 0o644)
else:
logging.debug("Copying scripts and sample configuration to build directory.")
shutil.copyfile(INIT_SCRIPT, os.path.join(build_root, SCRIPT_DIR[1:], INIT_SCRIPT.split('/')[1]))
os.chmod(os.path.join(build_root, SCRIPT_DIR[1:], INIT_SCRIPT.split('/')[1]), 0o644)
shutil.copyfile(SYSTEMD_SCRIPT, os.path.join(build_root, SCRIPT_DIR[1:], SYSTEMD_SCRIPT.split('/')[1]))
os.chmod(os.path.join(build_root, SCRIPT_DIR[1:], SYSTEMD_SCRIPT.split('/')[1]), 0o644)
shutil.copyfile(LOGROTATE_SCRIPT, os.path.join(build_root, LOGROTATE_DIR[1:], "influxdb"))
os.chmod(os.path.join(build_root, LOGROTATE_DIR[1:], "influxdb"), 0o644)
shutil.copyfile(DEFAULT_CONFIG, os.path.join(build_root, CONFIG_DIR[1:], "influxdb.conf"))
os.chmod(os.path.join(build_root, CONFIG_DIR[1:], "influxdb.conf"), 0o644)
def package_man_files(build_root):
"""Copy and gzip man pages to the package filesystem."""
logging.debug("Installing man pages.")
run("make -C man/ clean install DESTDIR={}/usr".format(build_root))
for path, dir, files in os.walk(os.path.join(build_root, MAN_DIR[1:])):
for f in files:
run("gzip -9n {}".format(os.path.join(path, f)))
def run_generate():
"""Run 'go generate' to rebuild any static assets.
"""
logging.info("Running 'go generate'...")
if not check_path_for("statik"):
run("go install github.com/rakyll/statik")
orig_path = None
if os.path.join(os.environ.get("GOPATH"), "bin") not in os.environ["PATH"].split(os.pathsep):
orig_path = os.environ["PATH"].split(os.pathsep)
os.environ["PATH"] = os.environ["PATH"].split(os.pathsep).append(os.path.join(os.environ.get("GOPATH"), "bin"))
run("rm -f ./services/admin/statik/statik.go")
run("go generate ./services/admin")
if orig_path is not None:
os.environ["PATH"] = orig_path
return True
def go_get(branch, update=False, no_uncommitted=False):
"""Retrieve build dependencies or restore pinned dependencies.
"""
if local_changes() and no_uncommitted:
logging.error("There are uncommitted changes in the current directory.")
return False
if not check_path_for("gdm"):
logging.info("Downloading `gdm`...")
get_command = "go get github.com/sparrc/gdm"
run(get_command)
logging.info("Retrieving dependencies with `gdm`...")
sys.stdout.flush()
run("{}/bin/gdm restore -v".format(os.environ.get("GOPATH")))
return True
def run_tests(race, parallel, timeout, no_vet):
"""Run the Go test suite on binary output.
"""
logging.info("Starting tests...")
if race:
logging.info("Race is enabled.")
if parallel is not None:
logging.info("Using parallel: {}".format(parallel))
if timeout is not None:
logging.info("Using timeout: {}".format(timeout))
out = run("go fmt ./...")
if len(out) > 0:
logging.error("Code not formatted. Please use 'go fmt ./...' to fix formatting errors.")
logging.error("{}".format(out))
return False
if not no_vet:
logging.info("Running 'go vet'...")
out = run(go_vet_command)
if len(out) > 0:
logging.error("Go vet failed. Please run 'go vet ./...' and fix any errors.")
logging.error("{}".format(out))
return False
else:
logging.info("Skipping 'go vet' call...")
test_command = "go test -v"
if race:
test_command += " -race"
if parallel is not None:
test_command += " -parallel {}".format(parallel)
if timeout is not None:
test_command += " -timeout {}".format(timeout)
test_command += " ./..."
logging.info("Running tests...")
output = run(test_command)
logging.debug("Test output:\n{}".format(output.encode('ascii', 'ignore')))
return True
################
#### All InfluxDB-specific content above this line
################
def run(command, allow_failure=False, shell=False):
"""Run shell command (convenience wrapper around subprocess).
"""
out = None
logging.debug("{}".format(command))
try:
if shell:
out = subprocess.check_output(command, stderr=subprocess.STDOUT, shell=shell)
else:
out = subprocess.check_output(command.split(), stderr=subprocess.STDOUT)
out = out.decode('utf-8').strip()
# logging.debug("Command output: {}".format(out))
except subprocess.CalledProcessError as e:
if allow_failure:
logging.warn("Command '{}' failed with error: {}".format(command, e.output))
return None
else:
logging.error("Command '{}' failed with error: {}".format(command, e.output))
sys.exit(1)
except OSError as e:
if allow_failure:
logging.warn("Command '{}' failed with error: {}".format(command, e))
return out
else:
logging.error("Command '{}' failed with error: {}".format(command, e))
sys.exit(1)
else:
return out
def create_temp_dir(prefix = None):
""" Create temporary directory with optional prefix.
"""
if prefix is None:
return tempfile.mkdtemp(prefix="{}-build.".format(PACKAGE_NAME))
else:
return tempfile.mkdtemp(prefix=prefix)
def increment_minor_version(version):
"""Return the version with the minor version incremented and patch
version set to zero.
"""
ver_list = version.split('.')
if len(ver_list) != 3:
logging.warn("Could not determine how to increment version '{}', will just use provided version.".format(version))
return version
ver_list[1] = str(int(ver_list[1]) + 1)
ver_list[2] = str(0)
inc_version = '.'.join(ver_list)
logging.debug("Incremented version from '{}' to '{}'.".format(version, inc_version))
return inc_version
def get_current_version_tag():
"""Retrieve the raw git version tag.
"""
version = run("git describe --always --tags --abbrev=0")
return version
def get_current_version():
"""Parse version information from git tag output.
"""
version_tag = get_current_version_tag()
# Remove leading 'v'
if version_tag[0] == 'v':
version_tag = version_tag[1:]
# Replace any '-'/'_' with '~'
if '-' in version_tag:
version_tag = version_tag.replace("-","~")
if '_' in version_tag:
version_tag = version_tag.replace("_","~")
return version_tag
def get_current_commit(short=False):
"""Retrieve the current git commit.
"""
command = None
if short:
command = "git log --pretty=format:'%h' -n 1"
else:
command = "git rev-parse HEAD"
out = run(command)
return out.strip('\'\n\r ')
def get_current_branch():
"""Retrieve the current git branch.
"""
command = "git rev-parse --abbrev-ref HEAD"
out = run(command)
return out.strip()
def local_changes():
"""Return True if there are local un-committed changes.
"""
output = run("git diff-files --ignore-submodules --").strip()
if len(output) > 0:
return True
return False
def get_system_arch():
"""Retrieve current system architecture.
"""
arch = os.uname()[4]
if arch == "x86_64":
arch = "amd64"
elif arch == "386":
arch = "i386"
elif 'arm' in arch:
# Prevent uname from reporting full ARM arch (eg 'armv7l')
arch = "arm"
return arch
def get_system_platform():
"""Retrieve current system platform.
"""
if sys.platform.startswith("linux"):
return "linux"
else:
return sys.platform
def get_go_version():
"""Retrieve version information for Go.
"""
out = run("go version")
matches = re.search('go version go(\S+)', out)
if matches is not None:
return matches.groups()[0].strip()
return None
def check_path_for(b):
"""Check the the user's path for the provided binary.
"""
def is_exe(fpath):
return os.path.isfile(fpath) and os.access(fpath, os.X_OK)
for path in os.environ["PATH"].split(os.pathsep):
path = path.strip('"')
full_path = os.path.join(path, b)
if os.path.isfile(full_path) and os.access(full_path, os.X_OK):
return full_path
def check_environ(build_dir = None):
"""Check environment for common Go variables.
"""
logging.info("Checking environment...")
for v in [ "GOPATH", "GOBIN", "GOROOT" ]:
logging.debug("Using '{}' for {}".format(os.environ.get(v), v))
cwd = os.getcwd()
if build_dir is None and os.environ.get("GOPATH") and os.environ.get("GOPATH") not in cwd:
logging.warn("Your current directory is not under your GOPATH. This may lead to build failures.")
return True
def check_prereqs():
"""Check user path for required dependencies.
"""
logging.info("Checking for dependencies...")
for req in prereqs:
if not check_path_for(req):
logging.error("Could not find dependency: {}".format(req))
return False
return True
def upload_packages(packages, bucket_name=None, overwrite=False):
"""Upload provided package output to AWS S3.
"""
logging.debug("Uploading files to bucket '{}': {}".format(bucket_name, packages))
try:
import boto
from boto.s3.key import Key
from boto.s3.connection import OrdinaryCallingFormat
logging.getLogger("boto").setLevel(logging.WARNING)
except ImportError:
logging.warn("Cannot upload packages without 'boto' Python library!")
return False
logging.info("Connecting to AWS S3...")
# Up the number of attempts to 10 from default of 1
boto.config.add_section("Boto")
boto.config.set("Boto", "metadata_service_num_attempts", "10")
c = boto.connect_s3(calling_format=OrdinaryCallingFormat())
if bucket_name is None:
bucket_name = DEFAULT_BUCKET
bucket = c.get_bucket(bucket_name.split('/')[0])
for p in packages:
if '/' in bucket_name:
# Allow for nested paths within the bucket name (ex:
# bucket/folder). Assuming forward-slashes as path
# delimiter.
name = os.path.join('/'.join(bucket_name.split('/')[1:]),
os.path.basename(p))
else:
name = os.path.basename(p)
logging.debug("Using key: {}".format(name))
if bucket.get_key(name) is None or overwrite:
logging.info("Uploading file {}".format(name))
k = Key(bucket)
k.key = name
if overwrite:
n = k.set_contents_from_filename(p, replace=True)
else:
n = k.set_contents_from_filename(p, replace=False)
k.make_public()
else:
logging.warn("Not uploading file {}, as it already exists in the target bucket.".format(name))
return True
def go_list(vendor=False, relative=False):
"""
Return a list of packages
If vendor is False vendor package are not included
If relative is True the package prefix defined by PACKAGE_URL is stripped
"""
p = subprocess.Popen(["go", "list", "./..."], stdout=subprocess.PIPE, stderr=subprocess.PIPE)
out, err = p.communicate()
packages = out.split('\n')
if packages[-1] == '':
packages = packages[:-1]
if not vendor:
non_vendor = []
for p in packages:
if '/vendor/' not in p:
non_vendor.append(p)
packages = non_vendor
if relative:
relative_pkgs = []
for p in packages:
r = p.replace(PACKAGE_URL, '.')
if r != '.':
relative_pkgs.append(r)
packages = relative_pkgs
return packages
def build(version=None,
platform=None,
arch=None,
nightly=False,
race=False,
clean=False,
outdir=".",
tags=[],
static=False):
"""Build each target for the specified architecture and platform.
"""
logging.info("Starting build for {}/{}...".format(platform, arch))
logging.info("Using Go version: {}".format(get_go_version()))
logging.info("Using git branch: {}".format(get_current_branch()))
logging.info("Using git commit: {}".format(get_current_commit()))
if static:
logging.info("Using statically-compiled output.")
if race:
logging.info("Race is enabled.")
if len(tags) > 0:
logging.info("Using build tags: {}".format(','.join(tags)))
logging.info("Sending build output to: {}".format(outdir))
if not os.path.exists(outdir):
os.makedirs(outdir)
elif clean and outdir != '/' and outdir != ".":
logging.info("Cleaning build directory '{}' before building.".format(outdir))
shutil.rmtree(outdir)
os.makedirs(outdir)
logging.info("Using version '{}' for build.".format(version))
for target, path in targets.items():
logging.info("Building target: {}".format(target))
build_command = ""
# Handle static binary output
if static is True or "static_" in arch:
if "static_" in arch:
static = True
arch = arch.replace("static_", "")
build_command += "CGO_ENABLED=0 "
# Handle variations in architecture output
if arch == "i386" or arch == "i686":
arch = "386"
elif "arm" in arch:
arch = "arm"
build_command += "GOOS={} GOARCH={} ".format(platform, arch)
if "arm" in arch:
if arch == "armel":
build_command += "GOARM=5 "
elif arch == "armhf" or arch == "arm":
build_command += "GOARM=6 "
elif arch == "arm64":
# TODO(rossmcdonald) - Verify this is the correct setting for arm64
build_command += "GOARM=7 "
else:
logging.error("Invalid ARM architecture specified: {}".format(arch))
logging.error("Please specify either 'armel', 'armhf', or 'arm64'.")
return False
if platform == 'windows':
target = target + '.exe'
build_command += "go build -o {} ".format(os.path.join(outdir, target))
if race:
build_command += "-race "
if len(tags) > 0:
build_command += "-tags {} ".format(','.join(tags))
if "1.4" in get_go_version():
if static:
build_command += "-ldflags=\"-s -X main.version {} -X main.branch {} -X main.commit {}\" ".format(version,
get_current_branch(),
get_current_commit())
else:
build_command += "-ldflags=\"-X main.version {} -X main.branch {} -X main.commit {}\" ".format(version,
get_current_branch(),
get_current_commit())
else:
# Starting with Go 1.5, the linker flag arguments changed to 'name=value' from 'name value'
if static:
build_command += "-ldflags=\"-s -X main.version={} -X main.branch={} -X main.commit={}\" ".format(version,
get_current_branch(),
get_current_commit())
else:
build_command += "-ldflags=\"-X main.version={} -X main.branch={} -X main.commit={}\" ".format(version,
get_current_branch(),
get_current_commit())
if static:
build_command += "-a -installsuffix cgo "
build_command += path
start_time = datetime.utcnow()
run(build_command, shell=True)
end_time = datetime.utcnow()
logging.info("Time taken: {}s".format((end_time - start_time).total_seconds()))
return True
def generate_md5_from_file(path):
"""Generate MD5 signature based on the contents of the file at path.
"""
m = hashlib.md5()
with open(path, 'rb') as f:
for chunk in iter(lambda: f.read(4096), b""):
m.update(chunk)
return m.hexdigest()
def generate_sig_from_file(path):
"""Generate a detached GPG signature from the file at path.
"""
logging.debug("Generating GPG signature for file: {}".format(path))
gpg_path = check_path_for('gpg')
if gpg_path is None:
logging.warn("gpg binary not found on path! Skipping signature creation.")
return False
if os.environ.get("GNUPG_HOME") is not None:
run('gpg --homedir {} --armor --yes --detach-sign {}'.format(os.environ.get("GNUPG_HOME"), path))
else:
run('gpg --armor --detach-sign --yes {}'.format(path))
return True
def package(build_output, pkg_name, version, nightly=False, iteration=1, static=False, release=False):
"""Package the output of the build process.
"""
outfiles = []
tmp_build_dir = create_temp_dir()
logging.debug("Packaging for build output: {}".format(build_output))
logging.info("Using temporary directory: {}".format(tmp_build_dir))
try:
for platform in build_output:
# Create top-level folder displaying which platform (linux, etc)
os.makedirs(os.path.join(tmp_build_dir, platform))
for arch in build_output[platform]:
logging.info("Creating packages for {}/{}".format(platform, arch))
# Create second-level directory displaying the architecture (amd64, etc)
current_location = build_output[platform][arch]
# Create directory tree to mimic file system of package
build_root = os.path.join(tmp_build_dir,
platform,
arch,
'{}-{}-{}'.format(PACKAGE_NAME, version, iteration))
os.makedirs(build_root)
# Copy packaging scripts to build directory
if platform == "windows":
# For windows and static builds, just copy
# binaries to root of package (no other scripts or
# directories)
package_scripts(build_root, config_only=True, windows=True)
elif static or "static_" in arch:
package_scripts(build_root, config_only=True)
else:
create_package_fs(build_root)
package_scripts(build_root)
if platform != "windows":
package_man_files(build_root)
for binary in targets:
# Copy newly-built binaries to packaging directory
if platform == 'windows':
binary = binary + '.exe'
if platform == 'windows' or static or "static_" in arch:
# Where the binary should go in the package filesystem
to = os.path.join(build_root, binary)
# Where the binary currently is located
fr = os.path.join(current_location, binary)
else:
# Where the binary currently is located
fr = os.path.join(current_location, binary)
# Where the binary should go in the package filesystem
to = os.path.join(build_root, INSTALL_ROOT_DIR[1:], binary)
shutil.copy(fr, to)
for package_type in supported_packages[platform]:
# Package the directory structure for each package type for the platform
logging.debug("Packaging directory '{}' as '{}'.".format(build_root, package_type))
name = pkg_name
# Reset version, iteration, and current location on each run
# since they may be modified below.
package_version = version
package_iteration = iteration
if "static_" in arch:
# Remove the "static_" from the displayed arch on the package
package_arch = arch.replace("static_", "")
else:
package_arch = arch
if not release and not nightly:
# For non-release builds, just use the commit hash as the version
package_version = "{}~{}".format(version,
get_current_commit(short=True))
package_iteration = "0"
package_build_root = build_root
current_location = build_output[platform][arch]
if package_type in ['zip', 'tar']:
# For tars and zips, start the packaging one folder above
# the build root (to include the package name)
package_build_root = os.path.join('/', '/'.join(build_root.split('/')[:-1]))
if nightly:
if static or "static_" in arch:
name = '{}-static-nightly_{}_{}'.format(name,
platform,
package_arch)
else:
name = '{}-nightly_{}_{}'.format(name,
platform,
package_arch)
else:
if static or "static_" in arch:
name = '{}-{}-static_{}_{}'.format(name,
package_version,
platform,
package_arch)
else:
name = '{}-{}_{}_{}'.format(name,
package_version,
platform,
package_arch)
current_location = os.path.join(os.getcwd(), current_location)
if package_type == 'tar':
tar_command = "cd {} && tar -cvzf {}.tar.gz ./*".format(package_build_root, name)
run(tar_command, shell=True)
run("mv {}.tar.gz {}".format(os.path.join(package_build_root, name), current_location), shell=True)
outfile = os.path.join(current_location, name + ".tar.gz")
outfiles.append(outfile)
elif package_type == 'zip':
zip_command = "cd {} && zip -r {}.zip ./*".format(package_build_root, name)
run(zip_command, shell=True)
run("mv {}.zip {}".format(os.path.join(package_build_root, name), current_location), shell=True)
outfile = os.path.join(current_location, name + ".zip")
outfiles.append(outfile)
elif package_type not in ['zip', 'tar'] and static or "static_" in arch:
logging.info("Skipping package type '{}' for static builds.".format(package_type))
else:
fpm_command = "fpm {} --name {} -a {} -t {} --version {} --iteration {} -C {} -p {} ".format(
fpm_common_args,
name,
package_arch,
package_type,
package_version,
package_iteration,
package_build_root,
current_location)
if package_type == "rpm":
fpm_command += "--depends coreutils --rpm-posttrans {}".format(POSTINST_SCRIPT)
out = run(fpm_command, shell=True)
matches = re.search(':path=>"(.*)"', out)
outfile = None
if matches is not None:
outfile = matches.groups()[0]
if outfile is None:
logging.warn("Could not determine output from packaging output!")
else:
if nightly:
# Strip nightly version from package name
new_outfile = outfile.replace("{}-{}".format(package_version, package_iteration), "nightly")
os.rename(outfile, new_outfile)
outfile = new_outfile
else:
if package_type == 'rpm':
# rpm's convert any dashes to underscores
package_version = package_version.replace("-", "_")
new_outfile = outfile.replace("{}-{}".format(package_version, package_iteration), package_version)
os.rename(outfile, new_outfile)
outfile = new_outfile
outfiles.append(os.path.join(os.getcwd(), outfile))
logging.debug("Produced package files: {}".format(outfiles))
return outfiles
finally:
# Cleanup
shutil.rmtree(tmp_build_dir)
def main(args):
global PACKAGE_NAME
if args.release and args.nightly:
logging.error("Cannot be both a nightly and a release.")
return 1
if args.nightly:
args.version = increment_minor_version(args.version)
args.version = "{}~n{}".format(args.version,
datetime.utcnow().strftime("%Y%m%d%H%M"))
args.iteration = 0
# Pre-build checks
check_environ()
if not check_prereqs():
return 1
if args.build_tags is None:
args.build_tags = []
else:
args.build_tags = args.build_tags.split(',')
orig_commit = get_current_commit(short=True)
orig_branch = get_current_branch()
if args.platform not in supported_builds and args.platform != 'all':
logging.error("Invalid build platform: {}".format(target_platform))
return 1
build_output = {}
if args.branch != orig_branch and args.commit != orig_commit:
logging.error("Can only specify one branch or commit to build from.")
return 1
elif args.branch != orig_branch:
logging.info("Moving to git branch: {}".format(args.branch))
run("git checkout {}".format(args.branch))
elif args.commit != orig_commit:
logging.info("Moving to git commit: {}".format(args.commit))
run("git checkout {}".format(args.commit))
if not args.no_get:
if not go_get(args.branch, update=args.update, no_uncommitted=args.no_uncommitted):
return 1
if args.generate:
if not run_generate():
return 1
if args.test:
if not run_tests(args.race, args.parallel, args.timeout, args.no_vet):
return 1
platforms = []
single_build = True
if args.platform == 'all':
platforms = supported_builds.keys()
single_build = False
else:
platforms = [args.platform]
for platform in platforms:
build_output.update( { platform : {} } )
archs = []
if args.arch == "all":
single_build = False
archs = supported_builds.get(platform)
else:
archs = [args.arch]
for arch in archs:
od = args.outdir
if not single_build:
od = os.path.join(args.outdir, platform, arch)
if not build(version=args.version,
platform=platform,
arch=arch,
nightly=args.nightly,
race=args.race,
clean=args.clean,
outdir=od,
tags=args.build_tags,
static=args.static):
return 1
build_output.get(platform).update( { arch : od } )
# Build packages
if args.package:
if not check_path_for("fpm"):
logging.error("FPM ruby gem required for packaging. Stopping.")
return 1
packages = package(build_output,
args.name,
args.version,
nightly=args.nightly,
iteration=args.iteration,
static=args.static,
release=args.release)
if args.sign:
logging.debug("Generating GPG signatures for packages: {}".format(packages))
sigs = [] # retain signatures so they can be uploaded with packages
for p in packages:
if generate_sig_from_file(p):
sigs.append(p + '.asc')
else:
logging.error("Creation of signature for package [{}] failed!".format(p))
return 1
packages += sigs
if args.upload:
logging.debug("Files staged for upload: {}".format(packages))
if args.nightly:
args.upload_overwrite = True
if not upload_packages(packages, bucket_name=args.bucket, overwrite=args.upload_overwrite):
return 1
logging.info("Packages created:")
for p in packages:
logging.info("{} (MD5={})".format(p.split('/')[-1:][0],
generate_md5_from_file(p)))
if orig_branch != get_current_branch():
logging.info("Moving back to original git branch: {}".format(orig_branch))
run("git checkout {}".format(orig_branch))
return 0
if __name__ == '__main__':
LOG_LEVEL = logging.INFO
if '--debug' in sys.argv[1:]:
LOG_LEVEL = logging.DEBUG
log_format = '[%(levelname)s] %(funcName)s: %(message)s'
logging.basicConfig(level=LOG_LEVEL,
format=log_format)
parser = argparse.ArgumentParser(description='InfluxDB build and packaging script.')
parser.add_argument('--verbose','-v','--debug',
action='store_true',
help='Use debug output')
parser.add_argument('--outdir', '-o',
metavar='<output directory>',
default='./build/',
type=os.path.abspath,
help='Output directory')
parser.add_argument('--name', '-n',
metavar='<name>',
default=PACKAGE_NAME,
type=str,
help='Name to use for package name (when package is specified)')
parser.add_argument('--arch',
metavar='<amd64|i386|armhf|arm64|armel|all>',
type=str,
default=get_system_arch(),
help='Target architecture for build output')
parser.add_argument('--platform',
metavar='<linux|darwin|windows|all>',
type=str,
default=get_system_platform(),
help='Target platform for build output')
parser.add_argument('--branch',
metavar='<branch>',
type=str,
default=get_current_branch(),
help='Build from a specific branch')
parser.add_argument('--commit',
metavar='<commit>',
type=str,
default=get_current_commit(short=True),
help='Build from a specific commit')
parser.add_argument('--version',
metavar='<version>',
type=str,
default=get_current_version(),
help='Version information to apply to build output (ex: 0.12.0)')
parser.add_argument('--iteration',
metavar='<package iteration>',
type=str,
default="1",
help='Package iteration to apply to build output (defaults to 1)')
parser.add_argument('--stats',
action='store_true',
help='Emit build metrics (requires InfluxDB Python client)')
parser.add_argument('--stats-server',
metavar='<hostname:port>',
type=str,
help='Send build stats to InfluxDB using provided hostname and port')
parser.add_argument('--stats-db',
metavar='<database name>',
type=str,
help='Send build stats to InfluxDB using provided database name')
parser.add_argument('--nightly',
action='store_true',
help='Mark build output as nightly build (will incremement the minor version)')
parser.add_argument('--update',
action='store_true',
help='Update build dependencies prior to building')
parser.add_argument('--package',
action='store_true',
help='Package binary output')
parser.add_argument('--release',
action='store_true',
help='Mark build output as release')
parser.add_argument('--clean',
action='store_true',
help='Clean output directory before building')
parser.add_argument('--no-get',
action='store_true',
help='Do not retrieve pinned dependencies when building')
parser.add_argument('--no-uncommitted',
action='store_true',
help='Fail if uncommitted changes exist in the working directory')
parser.add_argument('--upload',
action='store_true',
help='Upload output packages to AWS S3')
parser.add_argument('--upload-overwrite','-w',
action='store_true',
help='Upload output packages to AWS S3')
parser.add_argument('--bucket',
metavar='<S3 bucket name>',
type=str,
default=DEFAULT_BUCKET,
help='Destination bucket for uploads')
parser.add_argument('--generate',
action='store_true',
help='Run "go generate" before building')
parser.add_argument('--build-tags',
metavar='<tags>',
help='Optional build tags to use for compilation')
parser.add_argument('--static',
action='store_true',
help='Create statically-compiled binary output')
parser.add_argument('--sign',
action='store_true',
help='Create GPG detached signatures for packages (when package is specified)')
parser.add_argument('--test',
action='store_true',
help='Run tests (does not produce build output)')
parser.add_argument('--no-vet',
action='store_true',
help='Do not run "go vet" when running tests')
parser.add_argument('--race',
action='store_true',
help='Enable race flag for build output')
parser.add_argument('--parallel',
metavar='<num threads>',
type=int,
help='Number of tests to run simultaneously')
parser.add_argument('--timeout',
metavar='<timeout>',
type=str,
help='Timeout for tests before failing')
args = parser.parse_args()
print_banner()
sys.exit(main(args))
| apache-2.0 |
loco-odoo/localizacion_co | openerp/addons-extra/odoo-pruebas/odoo-server/addons/hr_expense/report/__init__.py | 380 | 1071 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import hr_expense_report
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
prymatex/SublimeCodeIntel | libs/codeintel2/perlcile.py | 7 | 8773 | #!/usr/bin/env python
# ***** BEGIN LICENSE BLOCK *****
# Version: MPL 1.1/GPL 2.0/LGPL 2.1
#
# The contents of this file are subject to the Mozilla Public License
# Version 1.1 (the "License"); you may not use this file except in
# compliance with the License. You may obtain a copy of the License at
# http://www.mozilla.org/MPL/
#
# Software distributed under the License is distributed on an "AS IS"
# basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See the
# License for the specific language governing rights and limitations
# under the License.
#
# The Original Code is Komodo code.
#
# The Initial Developer of the Original Code is ActiveState Software Inc.
# Portions created by ActiveState Software Inc are Copyright (C) 2000-2007
# ActiveState Software Inc. All Rights Reserved.
#
# Contributor(s):
# ActiveState Software Inc
#
# Alternatively, the contents of this file may be used under the terms of
# either the GNU General Public License Version 2 or later (the "GPL"), or
# the GNU Lesser General Public License Version 2.1 or later (the "LGPL"),
# in which case the provisions of the GPL or the LGPL are applicable instead
# of those above. If you wish to allow use of your version of this file only
# under the terms of either the GPL or the LGPL, and not to allow others to
# use your version of this file under the terms of the MPL, indicate your
# decision by deleting the provisions above and replace them with the notice
# and other provisions required by the GPL or the LGPL. If you do not delete
# the provisions above, a recipient may use your version of this file under
# the terms of any one of the MPL, the GPL or the LGPL.
#
# ***** END LICENSE BLOCK *****
#
# Contributors:
# Eric Promislow ([email protected])
"""
perlcile - a Code Intelligence Language Engine for the Perl language
Module Usage:
from perlcile import scan_purelang
content = open("foo.pl", "r").read()
scan_purelang(content, "foo.pl")
Command-line Usage:
perlcile.py [<options>...] [<Perl file>]
Options:
-h, --help dump this help and exit
-V, --version dump this script's version and exit
-v, --verbose verbose output, use twice for more verbose output
-f, --filename <path> specify the filename of the file content
passed in on stdin, this is used for the "path"
attribute of the emitted <file> tag.
--md5=<string> md5 hash for the input
--mtime=<secs> modification time for output info, in #secs since
1/1/70.
-L, --language <name>
the language of the file being scanned
-c, --clock print timing info for scans (CIX is not printed)
One or more Perl files can be specified as arguments or content can be
passed in on stdin. A directory can also be specified, in which case
all .pl files in that directory are scanned.
This is a Language Engine for the Code Intelligence (codeintel) system.
Code Intelligence XML format. See:
http://specs.activestate.com/Komodo_3.0/func/code_intelligence.html
http://specs.tl.activestate.com/kd/kd-0100.html
The command-line interface will return non-zero iff the scan failed.
"""
import os
import os.path
import sys
import getopt
from hashlib import md5
import re
import logging
import glob
import time
import stat
from ciElementTree import Element, SubElement, tostring
from SilverCity import ScintillaConstants
from codeintel2 import perl_lexer, perl_parser, util
from codeintel2.tree import pretty_tree_from_tree
from codeintel2.common import CILEError
from codeintel2 import parser_cix
#---- global data
_version_ = (0, 1, 0)
log = logging.getLogger("perlcile")
# log.setLevel(logging.DEBUG)
_gClockIt = 0 # if true then we are gathering timing data
_gClock = None # if gathering timing data this is set to time retrieval fn
_gStartTime = None # start time of current file being scanned
gProvideFullDocs = False
#---- internal support
# This code has intimate knowledge of the code objects defined in
# perl_parser.py
def scan_purelang(buf):
content = buf.accessor.text.expandtabs(8)
tokenizer = perl_lexer.PerlLexer(content, gProvideFullDocs)
parser = perl_parser.Parser(tokenizer, provide_full_docs=gProvideFullDocs)
parser.moduleName = buf.path
parse_tree = parser.parse()
tree = parser.produce_CIX()
return tree
def scan_multilang(tokens, module_elem):
"""Build the Perl module CIX element tree.
"tokens" is a generator of UDL tokens for this UDL-based
multi-lang document.
"module_elem" is the <module> element of a CIX element tree on
which the Perl module should be built.
This should return a list of the CSL tokens in the token stream.
"""
tokenizer = perl_lexer.PerlMultiLangLexer(tokens)
# "PerlHTML" is about all we need for whichever Perl-based
# template language is being used. This could just as easily be a
# boolean that indicates whether we're processing a pure language
# or a multi-lang one.
parser = perl_parser.Parser(
tokenizer, lang="PerlHTML", provide_full_docs=gProvideFullDocs)
parser.moduleName = "" # Unknown
parser.parse()
parse_tree = parser.produce_CIX_NoHeader(module_elem)
csl_tokens = tokenizer.get_csl_tokens()
return csl_tokens, tokenizer.has_perl_code()
#---- mainline
def main(argv):
logging.basicConfig()
# Parse options.
try:
opts, args = getopt.getopt(argv[1:], "Vvhf:cL:",
["version", "verbose", "help", "filename=", "md5=", "mtime=",
"clock", "language="])
except getopt.GetoptError as ex:
log.error(str(ex))
log.error("Try `perlcile --help'.")
return 1
numVerboses = 0
stdinFilename = None
md5sum = None
mtime = None
lang = "Perl"
global _gClockIt
for opt, optarg in opts:
if opt in ("-h", "--help"):
sys.stdout.write(__doc__)
return
elif opt in ("-V", "--version"):
ver = '.'.join([str(part) for part in _version_])
print("perlcile %s" % ver)
return
elif opt in ("-v", "--verbose"):
numVerboses += 1
if numVerboses == 1:
log.setLevel(logging.INFO)
else:
log.setLevel(logging.DEBUG)
elif opt in ("-f", "--filename"):
stdinFilename = optarg
elif opt in ("-L", "--language"):
lang = optarg
elif opt in ("--md5",):
md5sum = optarg
elif opt in ("--mtime",):
mtime = optarg
elif opt in ("-c", "--clock"):
_gClockIt = 1
global _gClock
if sys.platform.startswith("win"):
_gClock = time.clock
else:
_gClock = time.time
if len(args) == 0:
contentOnStdin = 1
filenames = [stdinFilename or "<stdin>"]
else:
contentOnStdin = 0
paths = []
for arg in args:
paths += glob.glob(arg)
filenames = []
for path in paths:
if os.path.isfile(path):
filenames.append(path)
elif os.path.isdir(path):
perlfiles = [os.path.join(path, n) for n in os.listdir(path)
if os.path.splitext(n)[1] in (".pl", ".pm")]
perlfiles = [f for f in perlfiles if os.path.isfile(f)]
filenames += perlfiles
if 1:
for filename in filenames:
if contentOnStdin:
log.debug("reading content from stdin")
content = sys.stdin.read()
log.debug("finished reading content from stdin")
if mtime is None:
mtime = int(time.time())
else:
if mtime is None:
mtime = int(os.stat(filename)[stat.ST_MTIME])
content = open(filename, 'r').read()
if _gClockIt:
sys.stdout.write("scanning '%s'..." % filename)
global _gStartTime
_gStartTime = _gClock()
data = scan(
content, filename, md5sum=md5sum, mtime=mtime, lang=lang)
if _gClockIt:
sys.stdout.write(" %.3fs\n" % (_gClock()-_gStartTime))
elif data:
sys.stdout.write(data)
try:
pass
except KeyboardInterrupt:
log.debug("user abort")
return 1
if __name__ == "__main__":
sys.exit(main(sys.argv))
| gpl-2.0 |
postlund/home-assistant | script/scaffold/templates/config_flow_oauth2/integration/__init__.py | 9 | 2558 | """The NEW_NAME integration."""
import asyncio
import voluptuous as vol
from homeassistant.config_entries import ConfigEntry
from homeassistant.const import CONF_CLIENT_ID, CONF_CLIENT_SECRET
from homeassistant.core import HomeAssistant
from homeassistant.helpers import (
aiohttp_client,
config_entry_oauth2_flow,
config_validation as cv,
)
from . import api, config_flow
from .const import DOMAIN, OAUTH2_AUTHORIZE, OAUTH2_TOKEN
CONFIG_SCHEMA = vol.Schema(
{
DOMAIN: vol.Schema(
{
vol.Required(CONF_CLIENT_ID): cv.string,
vol.Required(CONF_CLIENT_SECRET): cv.string,
}
)
},
extra=vol.ALLOW_EXTRA,
)
# TODO List the platforms that you want to support.
# For your initial PR, limit it to 1 platform.
PLATFORMS = ["light"]
async def async_setup(hass: HomeAssistant, config: dict):
"""Set up the NEW_NAME component."""
hass.data[DOMAIN] = {}
if DOMAIN not in config:
return True
config_flow.OAuth2FlowHandler.async_register_implementation(
hass,
config_entry_oauth2_flow.LocalOAuth2Implementation(
hass,
DOMAIN,
config[DOMAIN][CONF_CLIENT_ID],
config[DOMAIN][CONF_CLIENT_SECRET],
OAUTH2_AUTHORIZE,
OAUTH2_TOKEN,
),
)
return True
async def async_setup_entry(hass: HomeAssistant, entry: ConfigEntry):
"""Set up NEW_NAME from a config entry."""
implementation = await config_entry_oauth2_flow.async_get_config_entry_implementation(
hass, entry
)
session = config_entry_oauth2_flow.OAuth2Session(hass, entry, implementation)
# If using a requests-based API lib
hass.data[DOMAIN][entry.entry_id] = api.ConfigEntryAuth(hass, entry, session)
# If using an aiohttp-based API lib
hass.data[DOMAIN][entry.entry_id] = api.AsyncConfigEntryAuth(
aiohttp_client.async_get_clientsession(hass), session
)
for component in PLATFORMS:
hass.async_create_task(
hass.config_entries.async_forward_entry_setup(entry, component)
)
return True
async def async_unload_entry(hass: HomeAssistant, entry: ConfigEntry):
"""Unload a config entry."""
unload_ok = all(
await asyncio.gather(
*[
hass.config_entries.async_forward_entry_unload(entry, component)
for component in PLATFORMS
]
)
)
if unload_ok:
hass.data[DOMAIN].pop(entry.entry_id)
return unload_ok
| apache-2.0 |
vaygr/ansible | contrib/inventory/stacki.py | 39 | 6286 | #!/usr/bin/env python
# Copyright (c) 2016, Hugh Ma <[email protected]>
#
# This module is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This software is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this software. If not, see <http://www.gnu.org/licenses/>.
# Stacki inventory script
# Configure stacki.yml with proper auth information and place in the following:
# - ../inventory/stacki.yml
# - /etc/stacki/stacki.yml
# - /etc/ansible/stacki.yml
# The stacki.yml file can contain entries for authentication information
# regarding the Stacki front-end node.
#
# use_hostnames uses hostname rather than interface ip as connection
#
#
"""
Example Usage:
List Stacki Nodes
$ ./stack.py --list
Example Configuration:
---
stacki:
auth:
stacki_user: admin
stacki_password: abc12345678910
stacki_endpoint: http://192.168.200.50/stack
use_hostnames: false
"""
import argparse
import os
import sys
import yaml
from distutils.version import StrictVersion
try:
import json
except:
import simplejson as json
try:
import requests
except:
sys.exit('requests package is required for this inventory script')
CONFIG_FILES = ['/etc/stacki/stacki.yml', '/etc/ansible/stacki.yml']
def stack_auth(params):
endpoint = params['stacki_endpoint']
auth_creds = {'USERNAME': params['stacki_user'],
'PASSWORD': params['stacki_password']}
client = requests.session()
client.get(endpoint)
init_csrf = client.cookies['csrftoken']
header = {'csrftoken': init_csrf, 'X-CSRFToken': init_csrf,
'Content-type': 'application/x-www-form-urlencoded'}
login_endpoint = endpoint + "/login"
login_req = client.post(login_endpoint, data=auth_creds, headers=header)
csrftoken = login_req.cookies['csrftoken']
sessionid = login_req.cookies['sessionid']
auth_creds.update(CSRFTOKEN=csrftoken, SESSIONID=sessionid)
return client, auth_creds
def stack_build_header(auth_creds):
header = {'csrftoken': auth_creds['CSRFTOKEN'],
'X-CSRFToken': auth_creds['CSRFTOKEN'],
'sessionid': auth_creds['SESSIONID'],
'Content-type': 'application/json'}
return header
def stack_host_list(endpoint, header, client):
stack_r = client.post(endpoint, data=json.dumps({"cmd": "list host"}),
headers=header)
return json.loads(stack_r.json())
def stack_net_list(endpoint, header, client):
stack_r = client.post(endpoint, data=json.dumps({"cmd": "list host interface"}),
headers=header)
return json.loads(stack_r.json())
def format_meta(hostdata, intfdata, config):
use_hostnames = config['use_hostnames']
meta = dict(all=dict(hosts=list()),
frontends=dict(hosts=list()),
backends=dict(hosts=list()),
_meta=dict(hostvars=dict()))
# Iterate through list of dicts of hosts and remove
# environment key as it causes conflicts
for host in hostdata:
del host['environment']
meta['_meta']['hostvars'][host['host']] = host
meta['_meta']['hostvars'][host['host']]['interfaces'] = list()
# @bbyhuy to improve readability in next iteration
for intf in intfdata:
if intf['host'] in meta['_meta']['hostvars']:
meta['_meta']['hostvars'][intf['host']]['interfaces'].append(intf)
if intf['default'] is True:
meta['_meta']['hostvars'][intf['host']]['ansible_host'] = intf['ip']
if not use_hostnames:
meta['all']['hosts'].append(intf['ip'])
if meta['_meta']['hostvars'][intf['host']]['appliance'] != 'frontend':
meta['backends']['hosts'].append(intf['ip'])
else:
meta['frontends']['hosts'].append(intf['ip'])
else:
meta['all']['hosts'].append(intf['host'])
if meta['_meta']['hostvars'][intf['host']]['appliance'] != 'frontend':
meta['backends']['hosts'].append(intf['host'])
else:
meta['frontends']['hosts'].append(intf['host'])
return meta
def parse_args():
parser = argparse.ArgumentParser(description='Stacki Inventory Module')
group = parser.add_mutually_exclusive_group(required=True)
group.add_argument('--list', action='store_true',
help='List active hosts')
group.add_argument('--host', help='List details about the specific host')
return parser.parse_args()
def main():
args = parse_args()
if StrictVersion(requests.__version__) < StrictVersion("2.4.3"):
sys.exit('requests>=2.4.3 is required for this inventory script')
try:
config_files = CONFIG_FILES
config_files.append(os.path.dirname(os.path.realpath(__file__)) + '/stacki.yml')
config = None
for cfg_file in config_files:
if os.path.isfile(cfg_file):
stream = open(cfg_file, 'r')
config = yaml.safe_load(stream)
break
if not config:
sys.stderr.write("No config file found at {0}\n".format(config_files))
sys.exit(1)
client, auth_creds = stack_auth(config['stacki']['auth'])
header = stack_build_header(auth_creds)
host_list = stack_host_list(config['stacki']['auth']['stacki_endpoint'], header, client)
intf_list = stack_net_list(config['stacki']['auth']['stacki_endpoint'], header, client)
final_meta = format_meta(host_list, intf_list, config)
print(json.dumps(final_meta, indent=4))
except Exception as e:
sys.stderr.write('%s\n' % e.message)
sys.exit(1)
sys.exit(0)
if __name__ == '__main__':
main()
| gpl-3.0 |
melund/wcwidth | wcwidth/table_zero.py | 7 | 20001 | """Zero_Width table. Created by setup.py."""
# Generated: 2015-09-14T01:48:19.532217
# Source: DerivedGeneralCategory-8.0.0.txt
# Date: 2015-02-13, 13:47:11 GMT [MD]
ZERO_WIDTH = (
(0x0300, 0x036f,), # Combining Grave Accent ..Combining Latin Small Le
(0x0483, 0x0489,), # Combining Cyrillic Titlo..Combining Cyrillic Milli
(0x0591, 0x05bd,), # Hebrew Accent Etnahta ..Hebrew Point Meteg
(0x05bf, 0x05bf,), # Hebrew Point Rafe ..Hebrew Point Rafe
(0x05c1, 0x05c2,), # Hebrew Point Shin Dot ..Hebrew Point Sin Dot
(0x05c4, 0x05c5,), # Hebrew Mark Upper Dot ..Hebrew Mark Lower Dot
(0x05c7, 0x05c7,), # Hebrew Point Qamats Qata..Hebrew Point Qamats Qata
(0x0610, 0x061a,), # Arabic Sign Sallallahou ..Arabic Small Kasra
(0x064b, 0x065f,), # Arabic Fathatan ..Arabic Wavy Hamza Below
(0x0670, 0x0670,), # Arabic Letter Superscrip..Arabic Letter Superscrip
(0x06d6, 0x06dc,), # Arabic Small High Ligatu..Arabic Small High Seen
(0x06df, 0x06e4,), # Arabic Small High Rounde..Arabic Small High Madda
(0x06e7, 0x06e8,), # Arabic Small High Yeh ..Arabic Small High Noon
(0x06ea, 0x06ed,), # Arabic Empty Centre Low ..Arabic Small Low Meem
(0x0711, 0x0711,), # Syriac Letter Superscrip..Syriac Letter Superscrip
(0x0730, 0x074a,), # Syriac Pthaha Above ..Syriac Barrekh
(0x07a6, 0x07b0,), # Thaana Abafili ..Thaana Sukun
(0x07eb, 0x07f3,), # Nko Combining Short High..Nko Combining Double Dot
(0x0816, 0x0819,), # Samaritan Mark In ..Samaritan Mark Dagesh
(0x081b, 0x0823,), # Samaritan Mark Epentheti..Samaritan Vowel Sign A
(0x0825, 0x0827,), # Samaritan Vowel Sign Sho..Samaritan Vowel Sign U
(0x0829, 0x082d,), # Samaritan Vowel Sign Lon..Samaritan Mark Nequdaa
(0x0859, 0x085b,), # Mandaic Affrication Mark..Mandaic Gemination Mark
(0x08e3, 0x0902,), # Arabic Turned Damma Belo..Devanagari Sign Anusvara
(0x093a, 0x093a,), # Devanagari Vowel Sign Oe..Devanagari Vowel Sign Oe
(0x093c, 0x093c,), # Devanagari Sign Nukta ..Devanagari Sign Nukta
(0x0941, 0x0948,), # Devanagari Vowel Sign U ..Devanagari Vowel Sign Ai
(0x094d, 0x094d,), # Devanagari Sign Virama ..Devanagari Sign Virama
(0x0951, 0x0957,), # Devanagari Stress Sign U..Devanagari Vowel Sign Uu
(0x0962, 0x0963,), # Devanagari Vowel Sign Vo..Devanagari Vowel Sign Vo
(0x0981, 0x0981,), # Bengali Sign Candrabindu..Bengali Sign Candrabindu
(0x09bc, 0x09bc,), # Bengali Sign Nukta ..Bengali Sign Nukta
(0x09c1, 0x09c4,), # Bengali Vowel Sign U ..Bengali Vowel Sign Vocal
(0x09cd, 0x09cd,), # Bengali Sign Virama ..Bengali Sign Virama
(0x09e2, 0x09e3,), # Bengali Vowel Sign Vocal..Bengali Vowel Sign Vocal
(0x0a01, 0x0a02,), # Gurmukhi Sign Adak Bindi..Gurmukhi Sign Bindi
(0x0a3c, 0x0a3c,), # Gurmukhi Sign Nukta ..Gurmukhi Sign Nukta
(0x0a41, 0x0a42,), # Gurmukhi Vowel Sign U ..Gurmukhi Vowel Sign Uu
(0x0a47, 0x0a48,), # Gurmukhi Vowel Sign Ee ..Gurmukhi Vowel Sign Ai
(0x0a4b, 0x0a4d,), # Gurmukhi Vowel Sign Oo ..Gurmukhi Sign Virama
(0x0a51, 0x0a51,), # Gurmukhi Sign Udaat ..Gurmukhi Sign Udaat
(0x0a70, 0x0a71,), # Gurmukhi Tippi ..Gurmukhi Addak
(0x0a75, 0x0a75,), # Gurmukhi Sign Yakash ..Gurmukhi Sign Yakash
(0x0a81, 0x0a82,), # Gujarati Sign Candrabind..Gujarati Sign Anusvara
(0x0abc, 0x0abc,), # Gujarati Sign Nukta ..Gujarati Sign Nukta
(0x0ac1, 0x0ac5,), # Gujarati Vowel Sign U ..Gujarati Vowel Sign Cand
(0x0ac7, 0x0ac8,), # Gujarati Vowel Sign E ..Gujarati Vowel Sign Ai
(0x0acd, 0x0acd,), # Gujarati Sign Virama ..Gujarati Sign Virama
(0x0ae2, 0x0ae3,), # Gujarati Vowel Sign Voca..Gujarati Vowel Sign Voca
(0x0b01, 0x0b01,), # Oriya Sign Candrabindu ..Oriya Sign Candrabindu
(0x0b3c, 0x0b3c,), # Oriya Sign Nukta ..Oriya Sign Nukta
(0x0b3f, 0x0b3f,), # Oriya Vowel Sign I ..Oriya Vowel Sign I
(0x0b41, 0x0b44,), # Oriya Vowel Sign U ..Oriya Vowel Sign Vocalic
(0x0b4d, 0x0b4d,), # Oriya Sign Virama ..Oriya Sign Virama
(0x0b56, 0x0b56,), # Oriya Ai Length Mark ..Oriya Ai Length Mark
(0x0b62, 0x0b63,), # Oriya Vowel Sign Vocalic..Oriya Vowel Sign Vocalic
(0x0b82, 0x0b82,), # Tamil Sign Anusvara ..Tamil Sign Anusvara
(0x0bc0, 0x0bc0,), # Tamil Vowel Sign Ii ..Tamil Vowel Sign Ii
(0x0bcd, 0x0bcd,), # Tamil Sign Virama ..Tamil Sign Virama
(0x0c00, 0x0c00,), # Telugu Sign Combining Ca..Telugu Sign Combining Ca
(0x0c3e, 0x0c40,), # Telugu Vowel Sign Aa ..Telugu Vowel Sign Ii
(0x0c46, 0x0c48,), # Telugu Vowel Sign E ..Telugu Vowel Sign Ai
(0x0c4a, 0x0c4d,), # Telugu Vowel Sign O ..Telugu Sign Virama
(0x0c55, 0x0c56,), # Telugu Length Mark ..Telugu Ai Length Mark
(0x0c62, 0x0c63,), # Telugu Vowel Sign Vocali..Telugu Vowel Sign Vocali
(0x0c81, 0x0c81,), # Kannada Sign Candrabindu..Kannada Sign Candrabindu
(0x0cbc, 0x0cbc,), # Kannada Sign Nukta ..Kannada Sign Nukta
(0x0cbf, 0x0cbf,), # Kannada Vowel Sign I ..Kannada Vowel Sign I
(0x0cc6, 0x0cc6,), # Kannada Vowel Sign E ..Kannada Vowel Sign E
(0x0ccc, 0x0ccd,), # Kannada Vowel Sign Au ..Kannada Sign Virama
(0x0ce2, 0x0ce3,), # Kannada Vowel Sign Vocal..Kannada Vowel Sign Vocal
(0x0d01, 0x0d01,), # Malayalam Sign Candrabin..Malayalam Sign Candrabin
(0x0d41, 0x0d44,), # Malayalam Vowel Sign U ..Malayalam Vowel Sign Voc
(0x0d4d, 0x0d4d,), # Malayalam Sign Virama ..Malayalam Sign Virama
(0x0d62, 0x0d63,), # Malayalam Vowel Sign Voc..Malayalam Vowel Sign Voc
(0x0dca, 0x0dca,), # Sinhala Sign Al-lakuna ..Sinhala Sign Al-lakuna
(0x0dd2, 0x0dd4,), # Sinhala Vowel Sign Ketti..Sinhala Vowel Sign Ketti
(0x0dd6, 0x0dd6,), # Sinhala Vowel Sign Diga ..Sinhala Vowel Sign Diga
(0x0e31, 0x0e31,), # Thai Character Mai Han-a..Thai Character Mai Han-a
(0x0e34, 0x0e3a,), # Thai Character Sara I ..Thai Character Phinthu
(0x0e47, 0x0e4e,), # Thai Character Maitaikhu..Thai Character Yamakkan
(0x0eb1, 0x0eb1,), # Lao Vowel Sign Mai Kan ..Lao Vowel Sign Mai Kan
(0x0eb4, 0x0eb9,), # Lao Vowel Sign I ..Lao Vowel Sign Uu
(0x0ebb, 0x0ebc,), # Lao Vowel Sign Mai Kon ..Lao Semivowel Sign Lo
(0x0ec8, 0x0ecd,), # Lao Tone Mai Ek ..Lao Niggahita
(0x0f18, 0x0f19,), # Tibetan Astrological Sig..Tibetan Astrological Sig
(0x0f35, 0x0f35,), # Tibetan Mark Ngas Bzung ..Tibetan Mark Ngas Bzung
(0x0f37, 0x0f37,), # Tibetan Mark Ngas Bzung ..Tibetan Mark Ngas Bzung
(0x0f39, 0x0f39,), # Tibetan Mark Tsa -phru ..Tibetan Mark Tsa -phru
(0x0f71, 0x0f7e,), # Tibetan Vowel Sign Aa ..Tibetan Sign Rjes Su Nga
(0x0f80, 0x0f84,), # Tibetan Vowel Sign Rever..Tibetan Mark Halanta
(0x0f86, 0x0f87,), # Tibetan Sign Lci Rtags ..Tibetan Sign Yang Rtags
(0x0f8d, 0x0f97,), # Tibetan Subjoined Sign L..Tibetan Subjoined Letter
(0x0f99, 0x0fbc,), # Tibetan Subjoined Letter..Tibetan Subjoined Letter
(0x0fc6, 0x0fc6,), # Tibetan Symbol Padma Gda..Tibetan Symbol Padma Gda
(0x102d, 0x1030,), # Myanmar Vowel Sign I ..Myanmar Vowel Sign Uu
(0x1032, 0x1037,), # Myanmar Vowel Sign Ai ..Myanmar Sign Dot Below
(0x1039, 0x103a,), # Myanmar Sign Virama ..Myanmar Sign Asat
(0x103d, 0x103e,), # Myanmar Consonant Sign M..Myanmar Consonant Sign M
(0x1058, 0x1059,), # Myanmar Vowel Sign Vocal..Myanmar Vowel Sign Vocal
(0x105e, 0x1060,), # Myanmar Consonant Sign M..Myanmar Consonant Sign M
(0x1071, 0x1074,), # Myanmar Vowel Sign Geba ..Myanmar Vowel Sign Kayah
(0x1082, 0x1082,), # Myanmar Consonant Sign S..Myanmar Consonant Sign S
(0x1085, 0x1086,), # Myanmar Vowel Sign Shan ..Myanmar Vowel Sign Shan
(0x108d, 0x108d,), # Myanmar Sign Shan Counci..Myanmar Sign Shan Counci
(0x109d, 0x109d,), # Myanmar Vowel Sign Aiton..Myanmar Vowel Sign Aiton
(0x135d, 0x135f,), # Ethiopic Combining Gemin..Ethiopic Combining Gemin
(0x1712, 0x1714,), # Tagalog Vowel Sign I ..Tagalog Sign Virama
(0x1732, 0x1734,), # Hanunoo Vowel Sign I ..Hanunoo Sign Pamudpod
(0x1752, 0x1753,), # Buhid Vowel Sign I ..Buhid Vowel Sign U
(0x1772, 0x1773,), # Tagbanwa Vowel Sign I ..Tagbanwa Vowel Sign U
(0x17b4, 0x17b5,), # Khmer Vowel Inherent Aq ..Khmer Vowel Inherent Aa
(0x17b7, 0x17bd,), # Khmer Vowel Sign I ..Khmer Vowel Sign Ua
(0x17c6, 0x17c6,), # Khmer Sign Nikahit ..Khmer Sign Nikahit
(0x17c9, 0x17d3,), # Khmer Sign Muusikatoan ..Khmer Sign Bathamasat
(0x17dd, 0x17dd,), # Khmer Sign Atthacan ..Khmer Sign Atthacan
(0x180b, 0x180d,), # Mongolian Free Variation..Mongolian Free Variation
(0x18a9, 0x18a9,), # Mongolian Letter Ali Gal..Mongolian Letter Ali Gal
(0x1920, 0x1922,), # Limbu Vowel Sign A ..Limbu Vowel Sign U
(0x1927, 0x1928,), # Limbu Vowel Sign E ..Limbu Vowel Sign O
(0x1932, 0x1932,), # Limbu Small Letter Anusv..Limbu Small Letter Anusv
(0x1939, 0x193b,), # Limbu Sign Mukphreng ..Limbu Sign Sa-i
(0x1a17, 0x1a18,), # Buginese Vowel Sign I ..Buginese Vowel Sign U
(0x1a1b, 0x1a1b,), # Buginese Vowel Sign Ae ..Buginese Vowel Sign Ae
(0x1a56, 0x1a56,), # Tai Tham Consonant Sign ..Tai Tham Consonant Sign
(0x1a58, 0x1a5e,), # Tai Tham Sign Mai Kang L..Tai Tham Consonant Sign
(0x1a60, 0x1a60,), # Tai Tham Sign Sakot ..Tai Tham Sign Sakot
(0x1a62, 0x1a62,), # Tai Tham Vowel Sign Mai ..Tai Tham Vowel Sign Mai
(0x1a65, 0x1a6c,), # Tai Tham Vowel Sign I ..Tai Tham Vowel Sign Oa B
(0x1a73, 0x1a7c,), # Tai Tham Vowel Sign Oa A..Tai Tham Sign Khuen-lue
(0x1a7f, 0x1a7f,), # Tai Tham Combining Crypt..Tai Tham Combining Crypt
(0x1ab0, 0x1abe,), # Combining Doubled Circum..Combining Parentheses Ov
(0x1b00, 0x1b03,), # Balinese Sign Ulu Ricem ..Balinese Sign Surang
(0x1b34, 0x1b34,), # Balinese Sign Rerekan ..Balinese Sign Rerekan
(0x1b36, 0x1b3a,), # Balinese Vowel Sign Ulu ..Balinese Vowel Sign Ra R
(0x1b3c, 0x1b3c,), # Balinese Vowel Sign La L..Balinese Vowel Sign La L
(0x1b42, 0x1b42,), # Balinese Vowel Sign Pepe..Balinese Vowel Sign Pepe
(0x1b6b, 0x1b73,), # Balinese Musical Symbol ..Balinese Musical Symbol
(0x1b80, 0x1b81,), # Sundanese Sign Panyecek ..Sundanese Sign Panglayar
(0x1ba2, 0x1ba5,), # Sundanese Consonant Sign..Sundanese Vowel Sign Pan
(0x1ba8, 0x1ba9,), # Sundanese Vowel Sign Pam..Sundanese Vowel Sign Pan
(0x1bab, 0x1bad,), # Sundanese Sign Virama ..Sundanese Consonant Sign
(0x1be6, 0x1be6,), # Batak Sign Tompi ..Batak Sign Tompi
(0x1be8, 0x1be9,), # Batak Vowel Sign Pakpak ..Batak Vowel Sign Ee
(0x1bed, 0x1bed,), # Batak Vowel Sign Karo O ..Batak Vowel Sign Karo O
(0x1bef, 0x1bf1,), # Batak Vowel Sign U For S..Batak Consonant Sign H
(0x1c2c, 0x1c33,), # Lepcha Vowel Sign E ..Lepcha Consonant Sign T
(0x1c36, 0x1c37,), # Lepcha Sign Ran ..Lepcha Sign Nukta
(0x1cd0, 0x1cd2,), # Vedic Tone Karshana ..Vedic Tone Prenkha
(0x1cd4, 0x1ce0,), # Vedic Sign Yajurvedic Mi..Vedic Tone Rigvedic Kash
(0x1ce2, 0x1ce8,), # Vedic Sign Visarga Svari..Vedic Sign Visarga Anuda
(0x1ced, 0x1ced,), # Vedic Sign Tiryak ..Vedic Sign Tiryak
(0x1cf4, 0x1cf4,), # Vedic Tone Candra Above ..Vedic Tone Candra Above
(0x1cf8, 0x1cf9,), # Vedic Tone Ring Above ..Vedic Tone Double Ring A
(0x1dc0, 0x1df5,), # Combining Dotted Grave A..Combining Up Tack Above
(0x1dfc, 0x1dff,), # Combining Double Inverte..Combining Right Arrowhea
(0x20d0, 0x20f0,), # Combining Left Harpoon A..Combining Asterisk Above
(0x2cef, 0x2cf1,), # Coptic Combining Ni Abov..Coptic Combining Spiritu
(0x2d7f, 0x2d7f,), # Tifinagh Consonant Joine..Tifinagh Consonant Joine
(0x2de0, 0x2dff,), # Combining Cyrillic Lette..Combining Cyrillic Lette
(0x302a, 0x302d,), # Ideographic Level Tone M..Ideographic Entering Ton
(0x3099, 0x309a,), # Combining Katakana-hirag..Combining Katakana-hirag
(0xa66f, 0xa672,), # Combining Cyrillic Vzmet..Combining Cyrillic Thous
(0xa674, 0xa67d,), # Combining Cyrillic Lette..Combining Cyrillic Payer
(0xa69e, 0xa69f,), # Combining Cyrillic Lette..Combining Cyrillic Lette
(0xa6f0, 0xa6f1,), # Bamum Combining Mark Koq..Bamum Combining Mark Tuk
(0xa802, 0xa802,), # Syloti Nagri Sign Dvisva..Syloti Nagri Sign Dvisva
(0xa806, 0xa806,), # Syloti Nagri Sign Hasant..Syloti Nagri Sign Hasant
(0xa80b, 0xa80b,), # Syloti Nagri Sign Anusva..Syloti Nagri Sign Anusva
(0xa825, 0xa826,), # Syloti Nagri Vowel Sign ..Syloti Nagri Vowel Sign
(0xa8c4, 0xa8c4,), # Saurashtra Sign Virama ..Saurashtra Sign Virama
(0xa8e0, 0xa8f1,), # Combining Devanagari Dig..Combining Devanagari Sig
(0xa926, 0xa92d,), # Kayah Li Vowel Ue ..Kayah Li Tone Calya Plop
(0xa947, 0xa951,), # Rejang Vowel Sign I ..Rejang Consonant Sign R
(0xa980, 0xa982,), # Javanese Sign Panyangga ..Javanese Sign Layar
(0xa9b3, 0xa9b3,), # Javanese Sign Cecak Telu..Javanese Sign Cecak Telu
(0xa9b6, 0xa9b9,), # Javanese Vowel Sign Wulu..Javanese Vowel Sign Suku
(0xa9bc, 0xa9bc,), # Javanese Vowel Sign Pepe..Javanese Vowel Sign Pepe
(0xa9e5, 0xa9e5,), # Myanmar Sign Shan Saw ..Myanmar Sign Shan Saw
(0xaa29, 0xaa2e,), # Cham Vowel Sign Aa ..Cham Vowel Sign Oe
(0xaa31, 0xaa32,), # Cham Vowel Sign Au ..Cham Vowel Sign Ue
(0xaa35, 0xaa36,), # Cham Consonant Sign La ..Cham Consonant Sign Wa
(0xaa43, 0xaa43,), # Cham Consonant Sign Fina..Cham Consonant Sign Fina
(0xaa4c, 0xaa4c,), # Cham Consonant Sign Fina..Cham Consonant Sign Fina
(0xaa7c, 0xaa7c,), # Myanmar Sign Tai Laing T..Myanmar Sign Tai Laing T
(0xaab0, 0xaab0,), # Tai Viet Mai Kang ..Tai Viet Mai Kang
(0xaab2, 0xaab4,), # Tai Viet Vowel I ..Tai Viet Vowel U
(0xaab7, 0xaab8,), # Tai Viet Mai Khit ..Tai Viet Vowel Ia
(0xaabe, 0xaabf,), # Tai Viet Vowel Am ..Tai Viet Tone Mai Ek
(0xaac1, 0xaac1,), # Tai Viet Tone Mai Tho ..Tai Viet Tone Mai Tho
(0xaaec, 0xaaed,), # Meetei Mayek Vowel Sign ..Meetei Mayek Vowel Sign
(0xaaf6, 0xaaf6,), # Meetei Mayek Virama ..Meetei Mayek Virama
(0xabe5, 0xabe5,), # Meetei Mayek Vowel Sign ..Meetei Mayek Vowel Sign
(0xabe8, 0xabe8,), # Meetei Mayek Vowel Sign ..Meetei Mayek Vowel Sign
(0xabed, 0xabed,), # Meetei Mayek Apun Iyek ..Meetei Mayek Apun Iyek
(0xfb1e, 0xfb1e,), # Hebrew Point Judeo-spani..Hebrew Point Judeo-spani
(0xfe00, 0xfe0f,), # Variation Selector-1 ..Variation Selector-16
(0xfe20, 0xfe2f,), # Combining Ligature Left ..Combining Cyrillic Titlo
(0x101fd, 0x101fd,), # Phaistos Disc Sign Combi..Phaistos Disc Sign Combi
(0x102e0, 0x102e0,), # Coptic Epact Thousands M..Coptic Epact Thousands M
(0x10376, 0x1037a,), # Combining Old Permic Let..Combining Old Permic Let
(0x10a01, 0x10a03,), # Kharoshthi Vowel Sign I ..Kharoshthi Vowel Sign Vo
(0x10a05, 0x10a06,), # Kharoshthi Vowel Sign E ..Kharoshthi Vowel Sign O
(0x10a0c, 0x10a0f,), # Kharoshthi Vowel Length ..Kharoshthi Sign Visarga
(0x10a38, 0x10a3a,), # Kharoshthi Sign Bar Abov..Kharoshthi Sign Dot Belo
(0x10a3f, 0x10a3f,), # Kharoshthi Virama ..Kharoshthi Virama
(0x10ae5, 0x10ae6,), # Manichaean Abbreviation ..Manichaean Abbreviation
(0x11001, 0x11001,), # Brahmi Sign Anusvara ..Brahmi Sign Anusvara
(0x11038, 0x11046,), # Brahmi Vowel Sign Aa ..Brahmi Virama
(0x1107f, 0x11081,), # Brahmi Number Joiner ..Kaithi Sign Anusvara
(0x110b3, 0x110b6,), # Kaithi Vowel Sign U ..Kaithi Vowel Sign Ai
(0x110b9, 0x110ba,), # Kaithi Sign Virama ..Kaithi Sign Nukta
(0x11100, 0x11102,), # Chakma Sign Candrabindu ..Chakma Sign Visarga
(0x11127, 0x1112b,), # Chakma Vowel Sign A ..Chakma Vowel Sign Uu
(0x1112d, 0x11134,), # Chakma Vowel Sign Ai ..Chakma Maayyaa
(0x11173, 0x11173,), # Mahajani Sign Nukta ..Mahajani Sign Nukta
(0x11180, 0x11181,), # Sharada Sign Candrabindu..Sharada Sign Anusvara
(0x111b6, 0x111be,), # Sharada Vowel Sign U ..Sharada Vowel Sign O
(0x111ca, 0x111cc,), # Sharada Sign Nukta ..Sharada Extra Short Vowe
(0x1122f, 0x11231,), # Khojki Vowel Sign U ..Khojki Vowel Sign Ai
(0x11234, 0x11234,), # Khojki Sign Anusvara ..Khojki Sign Anusvara
(0x11236, 0x11237,), # Khojki Sign Nukta ..Khojki Sign Shadda
(0x112df, 0x112df,), # Khudawadi Sign Anusvara ..Khudawadi Sign Anusvara
(0x112e3, 0x112ea,), # Khudawadi Vowel Sign U ..Khudawadi Sign Virama
(0x11300, 0x11301,), # Grantha Sign Combining A..Grantha Sign Candrabindu
(0x1133c, 0x1133c,), # Grantha Sign Nukta ..Grantha Sign Nukta
(0x11340, 0x11340,), # Grantha Vowel Sign Ii ..Grantha Vowel Sign Ii
(0x11366, 0x1136c,), # Combining Grantha Digit ..Combining Grantha Digit
(0x11370, 0x11374,), # Combining Grantha Letter..Combining Grantha Letter
(0x114b3, 0x114b8,), # Tirhuta Vowel Sign U ..Tirhuta Vowel Sign Vocal
(0x114ba, 0x114ba,), # Tirhuta Vowel Sign Short..Tirhuta Vowel Sign Short
(0x114bf, 0x114c0,), # Tirhuta Sign Candrabindu..Tirhuta Sign Anusvara
(0x114c2, 0x114c3,), # Tirhuta Sign Virama ..Tirhuta Sign Nukta
(0x115b2, 0x115b5,), # Siddham Vowel Sign U ..Siddham Vowel Sign Vocal
(0x115bc, 0x115bd,), # Siddham Sign Candrabindu..Siddham Sign Anusvara
(0x115bf, 0x115c0,), # Siddham Sign Virama ..Siddham Sign Nukta
(0x115dc, 0x115dd,), # Siddham Vowel Sign Alter..Siddham Vowel Sign Alter
(0x11633, 0x1163a,), # Modi Vowel Sign U ..Modi Vowel Sign Ai
(0x1163d, 0x1163d,), # Modi Sign Anusvara ..Modi Sign Anusvara
(0x1163f, 0x11640,), # Modi Sign Virama ..Modi Sign Ardhacandra
(0x116ab, 0x116ab,), # Takri Sign Anusvara ..Takri Sign Anusvara
(0x116ad, 0x116ad,), # Takri Vowel Sign Aa ..Takri Vowel Sign Aa
(0x116b0, 0x116b5,), # Takri Vowel Sign U ..Takri Vowel Sign Au
(0x116b7, 0x116b7,), # Takri Sign Nukta ..Takri Sign Nukta
(0x1171d, 0x1171f,), # Ahom Consonant Sign Medi..Ahom Consonant Sign Medi
(0x11722, 0x11725,), # Ahom Vowel Sign I ..Ahom Vowel Sign Uu
(0x11727, 0x1172b,), # Ahom Vowel Sign Aw ..Ahom Sign Killer
(0x16af0, 0x16af4,), # Bassa Vah Combining High..Bassa Vah Combining High
(0x16b30, 0x16b36,), # Pahawh Hmong Mark Cim Tu..Pahawh Hmong Mark Cim Ta
(0x16f8f, 0x16f92,), # Miao Tone Right ..Miao Tone Below
(0x1bc9d, 0x1bc9e,), # Duployan Thick Letter Se..Duployan Double Mark
(0x1d167, 0x1d169,), # Musical Symbol Combining..Musical Symbol Combining
(0x1d17b, 0x1d182,), # Musical Symbol Combining..Musical Symbol Combining
(0x1d185, 0x1d18b,), # Musical Symbol Combining..Musical Symbol Combining
(0x1d1aa, 0x1d1ad,), # Musical Symbol Combining..Musical Symbol Combining
(0x1d242, 0x1d244,), # Combining Greek Musical ..Combining Greek Musical
(0x1da00, 0x1da36,), # Signwriting Head Rim ..Signwriting Air Sucking
(0x1da3b, 0x1da6c,), # Signwriting Mouth Closed..Signwriting Excitement
(0x1da75, 0x1da75,), # Signwriting Upper Body T..Signwriting Upper Body T
(0x1da84, 0x1da84,), # Signwriting Location Hea..Signwriting Location Hea
(0x1da9b, 0x1da9f,), # Signwriting Fill Modifie..Signwriting Fill Modifie
(0x1daa1, 0x1daaf,), # Signwriting Rotation Mod..Signwriting Rotation Mod
(0x1e8d0, 0x1e8d6,), # Mende Kikakui Combining ..Mende Kikakui Combining
(0xe0100, 0xe01ef,), # Variation Selector-17 ..Variation Selector-256
)
| mit |
vnsofthe/odoo | addons/hr_attendance/report/__init__.py | 375 | 1071 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import attendance_errors
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
jkunimune15/Map-Projections | src/zupplemental/compose_maps.py | 1 | 5115 | #compose_maps.py
#make ALL the maps
import math
from generate_borders import generate_borders
from generate_graticule import generate_graticule, generate_backdrop
from generate_indicatrices import generate_indicatrices
from generate_orthodromes import generate_orthodromes
from generate_shape import plot_shapes
from generate_labels import generate_topographical_labels, label_shapes, label_points
def compose_landmasses():
print('\t<g transform="matrix(1,0,0,-1,180,90)">')
print('\t\t<g class="land">')
plot_shapes('ne_50m_land', trim_antarctica=True)
print('\t\t</g>')
print('\t\t<g class="water">')
plot_shapes('ne_50m_lakes', max_rank=4)
print('\t\t</g>')
print('\t</g>')
def compose_graticule():
print('\t<g transform="matrix(1,0,0,-1,180,90)">')
print('\t\t<g class="graticule">')
generate_graticule(5, 1, include_tropics=True, adjust_poles=True)
print('\t\t</g>')
print('\t</g>')
def compose_graticule2():
print('\t<g transform="matrix(1,0,0,-1,180,90)">')
print('\t\t<g class="graticule">')
generate_graticule(15, .25, include_tropics=True, adjust_poles=True, double_dateline=True)
print('\t\t</g>')
print('\t</g>')
def compose_compound():
print('\t<g transform="matrix(1,0,0,-1,180,90)">')
print('\t\t<g class="land">')
plot_shapes('ne_50m_land', trim_antarctica=True)
print('\t\t</g>')
print('\t\t<g class="river">')
plot_shapes('ne_50m_rivers_lake_centerlines', max_rank=4)
print('\t\t</g>')
print('\t\t<g class="lakes">')
plot_shapes('ne_50m_lakes', max_rank=4)
print('\t\t</g>')
print('\t\t<g class="graticule">')
generate_graticule(15, 1, include_tropics=True, adjust_poles=True)
print('\t\t</g>')
print('\t</g>')
def compose_indicatrices():
print('\t<g transform="matrix(1,0,0,-1,180,90)">')
print('\t\t<g class="land">')
plot_shapes('ne_50m_land', trim_antarctica=True)
print('\t\t</g>')
print('\t\t<g class="lakes">')
plot_shapes('ne_50m_lakes', max_rank=4)
print('\t\t</g>')
print('\t\t<g class="tissot">')
generate_indicatrices(15, math.radians(3.75), resolution=180, adjust_poles=True)
print('\t\t</g>')
print('\t</g>')
def compose_indicatrices2(ctr_meridian):
print('\t<g transform="matrix(1,0,0,-1,180,90)">')
print('\t\t<g class="water">')
generate_backdrop(.5, ctr_meridian=ctr_meridian)
print('\t\t</g>')
print('\t\t<g class="land">')
plot_shapes('ne_110m_land', flesh_out_antarctica=True)
print('\t\t</g>')
print('\t\t<g class="lakes">')
plot_shapes('ne_110m_lakes')
print('\t\t</g>')
print('\t\t<g class="graticule">')
generate_graticule(10, .5, double_dateline=(ctr_meridian==0))
print('\t\t</g>')
print('\t\t<g class="tissot">')
generate_indicatrices(30, 500/6371, ctr_meridian=ctr_meridian, adjust_poles=True, resolution=120, side_res=5, pole_res=120)
print('\t\t</g>')
print('\t</g>')
def compose_political():
print('\t<g transform="matrix(1,0,0,-1,180,90)">')
print('\t\t<g class="country">')
generate_borders('ne_50m', trim_antarctica=True)
print('\t\t</g>')
print('\t\t<g class="lakes">')
plot_shapes('ne_50m_lakes', max_rank=4)
print('\t\t</g>')
print('\t</g>')
label_shapes('ne_50m_admin_0_countries', "pol")
def compose_orthodromes():
print('\t<g transform="matrix(1,0,0,-1,180,90)">')
print('\t\t<g class="lines">')
generate_orthodromes()
print('\t\t</g>')
print('\t</g>')
def compose_everything():
print('\t<g transform="matrix(1,0,0,-1,180,90)">')
print('\t\t<g class="country">')
generate_borders('ne_10m', trim_antarctica=True, borders_only=False)
print('\t\t<g class="border">')
generate_borders('ne_10m', trim_antarctica=True, borders_only=True)
print('\t\t</g>')
print('\t\t</g>')
print('\t\t<g class="sovereign">')
plot_shapes('ne_10m_admin_0_map_units')
print('\t\t</g>')
print('\t\t<g class="admin">')
plot_shapes('ne_10m_admin_1_states_provinces_lines', filter_field='adm0_a3',
filter_vals=['RUS','CAN','CHN','USA','BRA','AUS','IND','ARG','KAZ'])
print('\t\t</g>')
print('\t\t<g class="dispute">')
plot_shapes('ne_10m_admin_0_boundary_lines_disputed_areas')
print('\t\t</g>')
print('\t\t<g class="coastline">')
plot_shapes('ne_10m_coastline', trim_antarctica=True)
print('\t\t</g>')
print('\t\t<g class="river">')
plot_shapes('ne_10m_rivers_lake_centerlines', max_rank=5)
print('\t\t</g>')
print('\t\t<g class="lake">')
plot_shapes('ne_10m_lakes', max_rank=4)
print('\t\t</g>')
print('\t\t<g class="graticule">')
generate_graticule(5, 1, include_tropics=True, adjust_poles=True)
plot_shapes('ne_10m_geographic_lines', clazz="dateline", filter_field='name', filter_vals=["International Date Line"])
print('\t\t</g>')
print('\t</g>')
generate_topographical_labels('ne_50m', max_rank=2, text_size=4)
label_shapes('ne_10m_lakes', "sea", max_rank=1, text_size=1)
label_shapes('ne_10m_admin_0_countries', "pol", text_size=4)
label_points('cities_capital', "cap", text_size=1)
label_points('cities_other', "cit", text_size=0)
if __name__ == '__main__':
# compose_landmasses()
# compose_graticule()
# compose_compound()
# compose_indicatrices()
# compose_indicatrices2(-0)
# compose_political()
# compose_orthodromes()
compose_everything()
| mit |
sarvex/tensorflow | tensorflow/python/keras/preprocessing/image_dataset.py | 6 | 11428 | # Copyright 2020 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Keras image dataset loading utilities."""
# pylint: disable=g-classes-have-attributes
import numpy as np
from tensorflow.python.data.ops import dataset_ops
from tensorflow.python.keras.layers.preprocessing import image_preprocessing
from tensorflow.python.keras.preprocessing import dataset_utils
from tensorflow.python.keras.preprocessing import image as keras_image_ops
from tensorflow.python.ops import image_ops
from tensorflow.python.ops import io_ops
from tensorflow.python.util.tf_export import keras_export
ALLOWLIST_FORMATS = ('.bmp', '.gif', '.jpeg', '.jpg', '.png')
@keras_export('keras.utils.image_dataset_from_directory',
'keras.preprocessing.image_dataset_from_directory',
v1=[])
def image_dataset_from_directory(directory,
labels='inferred',
label_mode='int',
class_names=None,
color_mode='rgb',
batch_size=32,
image_size=(256, 256),
shuffle=True,
seed=None,
validation_split=None,
subset=None,
interpolation='bilinear',
follow_links=False,
crop_to_aspect_ratio=False,
**kwargs):
"""Generates a `tf.data.Dataset` from image files in a directory.
If your directory structure is:
```
main_directory/
...class_a/
......a_image_1.jpg
......a_image_2.jpg
...class_b/
......b_image_1.jpg
......b_image_2.jpg
```
Then calling `image_dataset_from_directory(main_directory, labels='inferred')`
will return a `tf.data.Dataset` that yields batches of images from
the subdirectories `class_a` and `class_b`, together with labels
0 and 1 (0 corresponding to `class_a` and 1 corresponding to `class_b`).
Supported image formats: jpeg, png, bmp, gif.
Animated gifs are truncated to the first frame.
Args:
directory: Directory where the data is located.
If `labels` is "inferred", it should contain
subdirectories, each containing images for a class.
Otherwise, the directory structure is ignored.
labels: Either "inferred"
(labels are generated from the directory structure),
None (no labels),
or a list/tuple of integer labels of the same size as the number of
image files found in the directory. Labels should be sorted according
to the alphanumeric order of the image file paths
(obtained via `os.walk(directory)` in Python).
label_mode:
- 'int': means that the labels are encoded as integers
(e.g. for `sparse_categorical_crossentropy` loss).
- 'categorical' means that the labels are
encoded as a categorical vector
(e.g. for `categorical_crossentropy` loss).
- 'binary' means that the labels (there can be only 2)
are encoded as `float32` scalars with values 0 or 1
(e.g. for `binary_crossentropy`).
- None (no labels).
class_names: Only valid if "labels" is "inferred". This is the explict
list of class names (must match names of subdirectories). Used
to control the order of the classes
(otherwise alphanumerical order is used).
color_mode: One of "grayscale", "rgb", "rgba". Default: "rgb".
Whether the images will be converted to
have 1, 3, or 4 channels.
batch_size: Size of the batches of data. Default: 32.
image_size: Size to resize images to after they are read from disk.
Defaults to `(256, 256)`.
Since the pipeline processes batches of images that must all have
the same size, this must be provided.
shuffle: Whether to shuffle the data. Default: True.
If set to False, sorts the data in alphanumeric order.
seed: Optional random seed for shuffling and transformations.
validation_split: Optional float between 0 and 1,
fraction of data to reserve for validation.
subset: One of "training" or "validation".
Only used if `validation_split` is set.
interpolation: String, the interpolation method used when resizing images.
Defaults to `bilinear`. Supports `bilinear`, `nearest`, `bicubic`,
`area`, `lanczos3`, `lanczos5`, `gaussian`, `mitchellcubic`.
follow_links: Whether to visits subdirectories pointed to by symlinks.
Defaults to False.
crop_to_aspect_ratio: If True, resize the images without aspect
ratio distortion. When the original aspect ratio differs from the target
aspect ratio, the output image will be cropped so as to return the largest
possible window in the image (of size `image_size`) that matches
the target aspect ratio. By default (`crop_to_aspect_ratio=False`),
aspect ratio may not be preserved.
**kwargs: Legacy keyword arguments.
Returns:
A `tf.data.Dataset` object.
- If `label_mode` is None, it yields `float32` tensors of shape
`(batch_size, image_size[0], image_size[1], num_channels)`,
encoding images (see below for rules regarding `num_channels`).
- Otherwise, it yields a tuple `(images, labels)`, where `images`
has shape `(batch_size, image_size[0], image_size[1], num_channels)`,
and `labels` follows the format described below.
Rules regarding labels format:
- if `label_mode` is `int`, the labels are an `int32` tensor of shape
`(batch_size,)`.
- if `label_mode` is `binary`, the labels are a `float32` tensor of
1s and 0s of shape `(batch_size, 1)`.
- if `label_mode` is `categorial`, the labels are a `float32` tensor
of shape `(batch_size, num_classes)`, representing a one-hot
encoding of the class index.
Rules regarding number of channels in the yielded images:
- if `color_mode` is `grayscale`,
there's 1 channel in the image tensors.
- if `color_mode` is `rgb`,
there are 3 channel in the image tensors.
- if `color_mode` is `rgba`,
there are 4 channel in the image tensors.
"""
if 'smart_resize' in kwargs:
crop_to_aspect_ratio = kwargs.pop('smart_resize')
if kwargs:
raise TypeError(f'Unknown keywords argument(s): {tuple(kwargs.keys())}')
if labels not in ('inferred', None):
if not isinstance(labels, (list, tuple)):
raise ValueError(
'`labels` argument should be a list/tuple of integer labels, of '
'the same size as the number of image files in the target '
'directory. If you wish to infer the labels from the subdirectory '
'names in the target directory, pass `labels="inferred"`. '
'If you wish to get a dataset that only contains images '
'(no labels), pass `label_mode=None`.')
if class_names:
raise ValueError('You can only pass `class_names` if the labels are '
'inferred from the subdirectory names in the target '
'directory (`labels="inferred"`).')
if label_mode not in {'int', 'categorical', 'binary', None}:
raise ValueError(
'`label_mode` argument must be one of "int", "categorical", "binary", '
'or None. Received: %s' % (label_mode,))
if labels is None or label_mode is None:
labels = None
label_mode = None
if color_mode == 'rgb':
num_channels = 3
elif color_mode == 'rgba':
num_channels = 4
elif color_mode == 'grayscale':
num_channels = 1
else:
raise ValueError(
'`color_mode` must be one of {"rbg", "rgba", "grayscale"}. '
'Received: %s' % (color_mode,))
interpolation = image_preprocessing.get_interpolation(interpolation)
dataset_utils.check_validation_split_arg(
validation_split, subset, shuffle, seed)
if seed is None:
seed = np.random.randint(1e6)
image_paths, labels, class_names = dataset_utils.index_directory(
directory,
labels,
formats=ALLOWLIST_FORMATS,
class_names=class_names,
shuffle=shuffle,
seed=seed,
follow_links=follow_links)
if label_mode == 'binary' and len(class_names) != 2:
raise ValueError(
'When passing `label_mode="binary", there must exactly 2 classes. '
'Found the following classes: %s' % (class_names,))
image_paths, labels = dataset_utils.get_training_or_validation_split(
image_paths, labels, validation_split, subset)
if not image_paths:
raise ValueError('No images found.')
dataset = paths_and_labels_to_dataset(
image_paths=image_paths,
image_size=image_size,
num_channels=num_channels,
labels=labels,
label_mode=label_mode,
num_classes=len(class_names),
interpolation=interpolation,
crop_to_aspect_ratio=crop_to_aspect_ratio)
if shuffle:
# Shuffle locally at each iteration
dataset = dataset.shuffle(buffer_size=batch_size * 8, seed=seed)
dataset = dataset.batch(batch_size)
# Users may need to reference `class_names`.
dataset.class_names = class_names
# Include file paths for images as attribute.
dataset.file_paths = image_paths
return dataset
def paths_and_labels_to_dataset(image_paths,
image_size,
num_channels,
labels,
label_mode,
num_classes,
interpolation,
crop_to_aspect_ratio=False):
"""Constructs a dataset of images and labels."""
# TODO(fchollet): consider making num_parallel_calls settable
path_ds = dataset_ops.Dataset.from_tensor_slices(image_paths)
args = (image_size, num_channels, interpolation, crop_to_aspect_ratio)
img_ds = path_ds.map(
lambda x: load_image(x, *args))
if label_mode:
label_ds = dataset_utils.labels_to_dataset(labels, label_mode, num_classes)
img_ds = dataset_ops.Dataset.zip((img_ds, label_ds))
return img_ds
def load_image(path, image_size, num_channels, interpolation,
crop_to_aspect_ratio=False):
"""Load an image from a path and resize it."""
img = io_ops.read_file(path)
img = image_ops.decode_image(
img, channels=num_channels, expand_animations=False)
if crop_to_aspect_ratio:
img = keras_image_ops.smart_resize(img, image_size,
interpolation=interpolation)
else:
img = image_ops.resize_images_v2(img, image_size, method=interpolation)
img.set_shape((image_size[0], image_size[1], num_channels))
return img
| apache-2.0 |
ricardogsilva/QGIS | tests/src/python/test_qgsimagecache.py | 41 | 5431 | # -*- coding: utf-8 -*-
"""QGIS Unit tests for QgsImageCache.
.. note:: This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
"""
__author__ = '(C) 2018 by Nyall Dawson'
__date__ = '02/10/2018'
__copyright__ = 'Copyright 2018, The QGIS Project'
import qgis # NOQA
import os
import socketserver
import threading
import http.server
import time
from qgis.PyQt.QtCore import QDir, QCoreApplication, QSize
from qgis.PyQt.QtGui import QColor, QImage, QPainter
from qgis.core import (QgsImageCache, QgsRenderChecker, QgsApplication, QgsMultiRenderChecker)
from qgis.testing import start_app, unittest
from utilities import unitTestDataPath
start_app()
TEST_DATA_DIR = unitTestDataPath()
class SlowHTTPRequestHandler(http.server.SimpleHTTPRequestHandler):
def do_GET(self):
time.sleep(1)
return http.server.SimpleHTTPRequestHandler.do_GET(self)
class TestQgsImageCache(unittest.TestCase):
@classmethod
def setUpClass(cls):
# Bring up a simple HTTP server, for remote SVG tests
os.chdir(unitTestDataPath() + '')
handler = SlowHTTPRequestHandler
cls.httpd = socketserver.TCPServer(('localhost', 0), handler)
cls.port = cls.httpd.server_address[1]
cls.httpd_thread = threading.Thread(target=cls.httpd.serve_forever)
cls.httpd_thread.setDaemon(True)
cls.httpd_thread.start()
def setUp(self):
self.report = "<h1>Python QgsImageCache Tests</h1>\n"
self.fetched = False
QgsApplication.imageCache().remoteImageFetched.connect(self.imageFetched)
def tearDown(self):
report_file_path = "%s/qgistest.html" % QDir.tempPath()
with open(report_file_path, 'a') as report_file:
report_file.write(self.report)
def imageFetched(self):
self.fetched = True
def waitForFetch(self):
self.fetched = False
while not self.fetched:
QCoreApplication.processEvents()
def testRemoteImage(self):
"""Test fetching remote image."""
url = 'http://localhost:{}/qgis_local_server/sample_image.png'.format(str(TestQgsImageCache.port))
image, in_cache = QgsApplication.imageCache().pathAsImage(url, QSize(100, 100), 1.0, True)
# first should be waiting image
self.assertTrue(self.imageCheck('Remote Image', 'waiting_image', image))
self.assertFalse(QgsApplication.imageCache().originalSize(url).isValid())
self.waitForFetch()
# second should be correct image
image, in_cache = QgsApplication.imageCache().pathAsImage(url, QSize(100, 100), 1.0, True)
self.assertTrue(self.imageCheck('Remote Image', 'remote_image', image))
self.assertEqual(QgsApplication.imageCache().originalSize(url), QSize(511, 800), 1.0)
def testRemoteImageMissing(self):
"""Test fetching remote image with bad url"""
url = 'http://localhost:{}/qgis_local_server/xxx.png'.format(str(TestQgsImageCache.port)) # oooo naughty
image, in_cache = QgsApplication.imageCache().pathAsImage(url, QSize(100, 100), 1.0, True)
self.assertTrue(self.imageCheck('Remote image missing', 'waiting_image', image))
def testRemoteImageBlocking(self):
"""Test fetching remote image."""
# remote not yet requested so not in cache
url = 'http://localhost:{}/qgis_local_server/logo_2017.png'.format(str(TestQgsImageCache.port))
image, in_cache = QgsApplication.imageCache().pathAsImage(url, QSize(100, 100), 1.0, True, blocking=1)
# first should be correct image
self.assertTrue(self.imageCheck('Remote image sync', 'remote_image_blocking', image))
# remote probably in cache
url = 'http://localhost:{}/qgis_local_server/sample_image.png'.format(str(TestQgsImageCache.port))
image, in_cache = QgsApplication.imageCache().pathAsImage(url, QSize(100, 100), 1.0, True, blocking=1)
self.assertTrue(self.imageCheck('Remote Image', 'remote_image', image))
# remote probably in cache
url = 'http://localhost:{}/qgis_local_server/xxx.png'.format(str(TestQgsImageCache.port)) # oooo naughty
image, in_cache = QgsApplication.imageCache().pathAsImage(url, QSize(100, 100), 1.0, True, blocking=1)
self.assertTrue(self.imageCheck('Remote image missing', 'waiting_image', image))
def imageCheck(self, name, reference_image, image):
self.report += "<h2>Render {}</h2>\n".format(name)
temp_dir = QDir.tempPath() + '/'
file_name = temp_dir + 'image_' + name + ".png"
output_image = QImage(image.size(), QImage.Format_RGB32)
QgsMultiRenderChecker.drawBackground(output_image)
painter = QPainter(output_image)
painter.drawImage(0, 0, image)
painter.end()
output_image.save(file_name, "PNG")
checker = QgsRenderChecker()
checker.setControlPathPrefix("image_cache")
checker.setControlName("expected_" + reference_image)
checker.setRenderedImage(file_name)
checker.setColorTolerance(2)
result = checker.compareImages(name, 20)
self.report += checker.report()
print((self.report))
return result
if __name__ == '__main__':
unittest.main()
| gpl-2.0 |
saurabhjn76/sympy | examples/advanced/curvilinear_coordinates.py | 96 | 3691 | #!/usr/bin/env python
"""
This example shows how to work with coordinate transformations, curvilinear
coordinates and a little bit with differential geometry.
It takes polar, cylindrical, spherical, rotating disk coordinates and others
and calculates all kinds of interesting properties, like Jacobian, metric
tensor, Laplace operator, ...
"""
from sympy import var, sin, cos, pprint, Matrix, eye, trigsimp, Eq, \
Function, simplify, sinh, cosh, expand, symbols
def laplace(f, g_inv, g_det, X):
"""
Calculates Laplace(f), using the inverse metric g_inv, the determinant of
the metric g_det, all in variables X.
"""
r = 0
for i in range(len(X)):
for j in range(len(X)):
r += g_inv[i, j]*f.diff(X[i]).diff(X[j])
for sigma in range(len(X)):
for alpha in range(len(X)):
r += g_det.diff(X[sigma]) * g_inv[sigma, alpha] * \
f.diff(X[alpha]) / (2*g_det)
return r
def transform(name, X, Y, g_correct=None, recursive=False):
"""
Transforms from cartesian coordinates X to any curvilinear coordinates Y.
It printing useful information, like Jacobian, metric tensor, determinant
of metric, Laplace operator in the new coordinates, ...
g_correct ... if not None, it will be taken as the metric --- this is
useful if sympy's trigsimp() is not powerful enough to
simplify the metric so that it is usable for later
calculation. Leave it as None, only if the metric that
transform() prints is not simplified, you can help it by
specifying the correct one.
recursive ... apply recursive trigonometric simplification (use only when
needed, as it is an expensive operation)
"""
print("_"*80)
print("Transformation:", name)
for x, y in zip(X, Y):
pprint(Eq(y, x))
J = X.jacobian(Y)
print("Jacobian:")
pprint(J)
g = J.T*eye(J.shape[0])*J
g = g.applyfunc(expand)
print("metric tensor g_{ij}:")
pprint(g)
if g_correct is not None:
g = g_correct
print("metric tensor g_{ij} specified by hand:")
pprint(g)
print("inverse metric tensor g^{ij}:")
g_inv = g.inv(method="ADJ")
g_inv = g_inv.applyfunc(simplify)
pprint(g_inv)
print("det g_{ij}:")
g_det = g.det()
pprint(g_det)
f = Function("f")(*list(Y))
print("Laplace:")
pprint(laplace(f, g_inv, g_det, Y))
def main():
mu, nu, rho, theta, phi, sigma, tau, a, t, x, y, z, w = symbols(
"mu, nu, rho, theta, phi, sigma, tau, a, t, x, y, z, w")
transform("polar", Matrix([rho*cos(phi), rho*sin(phi)]), [rho, phi])
transform("cylindrical", Matrix([rho*cos(phi), rho*sin(phi), z]),
[rho, phi, z])
transform("spherical",
Matrix([rho*sin(theta)*cos(phi), rho*sin(theta)*sin(phi),
rho*cos(theta)]),
[rho, theta, phi],
recursive=True
)
transform("rotating disk",
Matrix([t,
x*cos(w*t) - y*sin(w*t),
x*sin(w*t) + y*cos(w*t),
z]),
[t, x, y, z])
transform("parabolic",
Matrix([sigma*tau, (tau**2 - sigma**2) / 2]),
[sigma, tau])
transform("bipolar",
Matrix([a*sinh(tau)/(cosh(tau)-cos(sigma)),
a*sin(sigma)/(cosh(tau)-cos(sigma))]),
[sigma, tau]
)
transform("elliptic",
Matrix([a*cosh(mu)*cos(nu), a*sinh(mu)*sin(nu)]),
[mu, nu]
)
if __name__ == "__main__":
main()
| bsd-3-clause |
Eric-Zhong/odoo | addons/account_asset/account_asset_invoice.py | 141 | 3088 | # -*- encoding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.osv import fields, osv
class account_invoice(osv.osv):
_inherit = 'account.invoice'
def action_number(self, cr, uid, ids, *args, **kargs):
result = super(account_invoice, self).action_number(cr, uid, ids, *args, **kargs)
for inv in self.browse(cr, uid, ids):
self.pool.get('account.invoice.line').asset_create(cr, uid, inv.invoice_line)
return result
def line_get_convert(self, cr, uid, x, part, date, context=None):
res = super(account_invoice, self).line_get_convert(cr, uid, x, part, date, context=context)
res['asset_id'] = x.get('asset_id', False)
return res
class account_invoice_line(osv.osv):
_inherit = 'account.invoice.line'
_columns = {
'asset_category_id': fields.many2one('account.asset.category', 'Asset Category'),
}
def asset_create(self, cr, uid, lines, context=None):
context = context or {}
asset_obj = self.pool.get('account.asset.asset')
for line in lines:
if line.asset_category_id:
vals = {
'name': line.name,
'code': line.invoice_id.number or False,
'category_id': line.asset_category_id.id,
'purchase_value': line.price_subtotal,
'period_id': line.invoice_id.period_id.id,
'partner_id': line.invoice_id.partner_id.id,
'company_id': line.invoice_id.company_id.id,
'currency_id': line.invoice_id.currency_id.id,
'purchase_date' : line.invoice_id.date_invoice,
}
changed_vals = asset_obj.onchange_category_id(cr, uid, [], vals['category_id'], context=context)
vals.update(changed_vals['value'])
asset_id = asset_obj.create(cr, uid, vals, context=context)
if line.asset_category_id.open_asset:
asset_obj.validate(cr, uid, [asset_id], context=context)
return True
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
Juniper/contrail-dev-neutron | neutron/plugins/embrane/common/constants.py | 11 | 2821 | # vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2013 Embrane, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
# @author: Ivar Lazzaro, Embrane, Inc.
from heleosapi import exceptions as h_exc
from neutron.plugins.common import constants
# Router specific constants
UTIF_LIMIT = 7
QUEUE_TIMEOUT = 300
class Status:
# Transient
CREATING = constants.PENDING_CREATE
UPDATING = constants.PENDING_UPDATE
DELETING = constants.PENDING_DELETE
# Final
ACTIVE = constants.ACTIVE
ERROR = constants.ERROR
READY = constants.INACTIVE
DELETED = "DELETED" # not visible
class Events:
CREATE_ROUTER = "create_router"
UPDATE_ROUTER = "update_router"
DELETE_ROUTER = "delete_router"
GROW_ROUTER_IF = "grow_router_if"
SHRINK_ROUTER_IF = "shrink_router_if"
SET_NAT_RULE = "set_nat_rule"
RESET_NAT_RULE = "reset_nat_rule"
_DVA_PENDING_ERROR_MSG = _("Dva is pending for the following reason: %s")
_DVA_NOT_FOUNT_ERROR_MSG = _("Dva can't be found to execute the operation, "
"probably was cancelled through the heleos UI")
_DVA_BROKEN_ERROR_MSG = _("Dva seems to be broken for reason %s")
_DVA_BROKEN_INTERFACE_ERROR_MSG = _("Dva interface seems to be broken "
"for reason %s")
_DVA_CREATION_FAILED_ERROR_MSG = _("Dva creation failed reason %s")
_DVA_CREATION_PENDING_ERROR_MSG = _("Dva creation is in pending state "
"for reason %s")
_CFG_FAILED_ERROR_MSG = _("Dva configuration failed for reason %s")
_DVA_DEL_FAILED_ERROR_MSG = _("Failed to delete the backend "
"router for reason %s. Please remove "
"it manually through the heleos UI")
error_map = {h_exc.PendingDva: _DVA_PENDING_ERROR_MSG,
h_exc.DvaNotFound: _DVA_NOT_FOUNT_ERROR_MSG,
h_exc.BrokenDva: _DVA_BROKEN_ERROR_MSG,
h_exc.BrokenInterface: _DVA_BROKEN_INTERFACE_ERROR_MSG,
h_exc.DvaCreationFailed: _DVA_CREATION_FAILED_ERROR_MSG,
h_exc.DvaCreationPending: _DVA_CREATION_PENDING_ERROR_MSG,
h_exc.ConfigurationFailed: _CFG_FAILED_ERROR_MSG,
h_exc.DvaDeleteFailed: _DVA_DEL_FAILED_ERROR_MSG}
| apache-2.0 |
michaelmior/lemur | docs/conf.py | 8 | 8204 | # -*- coding: utf-8 -*-
#
# lemur documentation build configuration file, created by
# sphinx-quickstart on Sat Jun 7 18:43:48 2014.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys
import os
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
sys.path.insert(0, os.path.abspath('..'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinxcontrib.autohttp.flask',
'sphinx.ext.todo',
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'lemur'
copyright = u'2015, Netflix Inc.'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '0.1'
# The full version, including alpha/beta/rc tags.
release = '0.1.3'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
#keep_warnings = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'default'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'lemurdoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
('index', 'lemur.tex', u'Lemur Documentation',
u'Kevin Glisson', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'Lemur', u'Lemur Documentation',
[u'Kevin Glisson'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'Lemur', u'Lemur Documentation',
u'Kevin Glisson', 'Lemur', 'SSL Certificate Management',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#texinfo_no_detailmenu = False
| apache-2.0 |
stevenbaker/dotfiles | .vim/bundle/jedi-vim/jedi/jedi/modules.py | 2 | 14663 | """
Don't confuse these classes with :mod:`parsing_representation` modules, the
modules here can access these representation with ``module.parser.module``.
``Module`` exists mainly for caching purposes.
Basically :mod:`modules` offers the classes:
- ``CachedModule``, a base class for Cachedmodule.
- ``Module`` the class for all normal Python modules (not builtins, they are at
home at :mod:`builtin`).
- ``ModuleWithCursor``, holds the module information for :class:`api.Script`.
Apart from those classes there's a ``sys.path`` fetching function, as well as
`Virtual Env` and `Django` detection.
"""
from __future__ import with_statement
import re
import tokenizer as tokenize
import sys
import os
from ast import literal_eval
from jedi._compatibility import exec_function, unicode
from jedi import cache
from jedi import parsing_representation as pr
from jedi import fast_parser
from jedi import debug
from jedi import common
class CachedModule(object):
"""
The base type for all modules, which is not to be confused with
`parsing_representation.Module`. Caching happens here.
"""
def __init__(self, path=None, name=None):
self.path = path and os.path.abspath(path)
self.name = name
self._parser = None
@property
def parser(self):
""" get the parser lazy """
if self._parser is None:
self._parser = cache.load_module(self.path, self.name) \
or self._load_module()
return self._parser
def _get_source(self):
raise NotImplementedError()
def _load_module(self):
source = self._get_source()
p = self.path or self.name
p = fast_parser.FastParser(source, p)
cache.save_module(self.path, self.name, p)
return p
class Module(CachedModule):
"""
Manages all files, that are parsed and caches them.
:param path: The module path of the file.
:param source: The source code of the file.
"""
def __init__(self, path, source=None):
super(Module, self).__init__(path=path)
if source is None:
with open(path) as f:
source = f.read()
self.source = source_to_unicode(source)
self._line_cache = None
def _get_source(self):
""" Just one time """
s = self.source
del self.source # memory efficiency
return s
class ModuleWithCursor(Module):
"""
Manages all files, that are parsed and caches them.
Important are the params source and path, one of them has to
be there.
:param source: The source code of the file.
:param path: The module path of the file or None.
:param position: The position, the user is currently in. Only important \
for the main file.
"""
def __init__(self, path, source, position):
super(ModuleWithCursor, self).__init__(path, source)
self.position = position
self.source = source
self._path_until_cursor = None
# this two are only used, because there is no nonlocal in Python 2
self._line_temp = None
self._relevant_temp = None
@property
def parser(self):
""" get the parser lazy """
if not self._parser:
with common.ignored(KeyError):
parser = cache.parser_cache[self.path].parser
cache.invalidate_star_import_cache(parser.module)
# Call the parser already here, because it will be used anyways.
# Also, the position is here important (which will not be used by
# default), therefore fill the cache here.
self._parser = fast_parser.FastParser(self.source, self.path,
self.position)
# don't pickle that module, because it's changing fast
cache.save_module(self.path, self.name, self._parser,
pickling=False)
return self._parser
def get_path_until_cursor(self):
""" Get the path under the cursor. """
if self._path_until_cursor is None: # small caching
self._path_until_cursor, self._start_cursor_pos = \
self._get_path_until_cursor(self.position)
return self._path_until_cursor
def _get_path_until_cursor(self, start_pos=None):
def fetch_line():
if self._is_first:
self._is_first = False
self._line_length = self._column_temp
line = self._first_line
else:
line = self.get_line(self._line_temp)
self._line_length = len(line)
line = line + '\n'
# add lines with a backslash at the end
while True:
self._line_temp -= 1
last_line = self.get_line(self._line_temp)
#print self._line_temp, repr(last_line)
if last_line and last_line[-1] == '\\':
line = last_line[:-1] + ' ' + line
self._line_length = len(last_line)
else:
break
return line[::-1]
self._is_first = True
self._line_temp, self._column_temp = start_cursor = start_pos
self._first_line = self.get_line(self._line_temp)[:self._column_temp]
open_brackets = ['(', '[', '{']
close_brackets = [')', ']', '}']
gen = tokenize.generate_tokens(fetch_line)
string = ''
level = 0
force_point = False
last_type = None
try:
for token_type, tok, start, end, line in gen:
# print 'tok', token_type, tok, force_point
if last_type == token_type == tokenize.NAME:
string += ' '
if level > 0:
if tok in close_brackets:
level += 1
if tok in open_brackets:
level -= 1
elif tok == '.':
force_point = False
elif force_point:
# it is reversed, therefore a number is getting recognized
# as a floating point number
if token_type == tokenize.NUMBER and tok[0] == '.':
force_point = False
else:
break
elif tok in close_brackets:
level += 1
elif token_type in [tokenize.NAME, tokenize.STRING]:
force_point = True
elif token_type == tokenize.NUMBER:
pass
else:
self._column_temp = self._line_length - end[1]
break
x = start_pos[0] - end[0] + 1
l = self.get_line(x)
l = self._first_line if x == start_pos[0] else l
start_cursor = x, len(l) - end[1]
self._column_temp = self._line_length - end[1]
string += tok
last_type = token_type
except tokenize.TokenError:
debug.warning("Tokenize couldn't finish", sys.exc_info)
# string can still contain spaces at the end
return string[::-1].strip(), start_cursor
def get_path_under_cursor(self):
"""
Return the path under the cursor. If there is a rest of the path left,
it will be added to the stuff before it.
"""
return self.get_path_until_cursor() + self.get_path_after_cursor()
def get_path_after_cursor(self):
line = self.get_line(self.position[0])
return re.search("[\w\d]*", line[self.position[1]:]).group(0)
def get_operator_under_cursor(self):
line = self.get_line(self.position[0])
after = re.match("[^\w\s]+", line[self.position[1]:])
before = re.match("[^\w\s]+", line[:self.position[1]][::-1])
return (before.group(0) if before is not None else '') \
+ (after.group(0) if after is not None else '')
def get_context(self, yield_positions=False):
pos = self._start_cursor_pos
while True:
# remove non important white space
line = self.get_line(pos[0])
while True:
if pos[1] == 0:
line = self.get_line(pos[0] - 1)
if line and line[-1] == '\\':
pos = pos[0] - 1, len(line) - 1
continue
else:
break
if line[pos[1] - 1].isspace():
pos = pos[0], pos[1] - 1
else:
break
try:
result, pos = self._get_path_until_cursor(start_pos=pos)
if yield_positions:
yield pos
else:
yield result
except StopIteration:
if yield_positions:
yield None
else:
yield ''
def get_line(self, line_nr):
if not self._line_cache:
self._line_cache = self.source.splitlines()
if self.source:
if self.source[-1] == '\n':
self._line_cache.append('')
else: # ''.splitlines() == []
self._line_cache = ['']
if line_nr == 0:
# This is a fix for the zeroth line. We need a newline there, for
# the backwards parser.
return ''
if line_nr < 0:
raise StopIteration()
try:
return self._line_cache[line_nr - 1]
except IndexError:
raise StopIteration()
def get_position_line(self):
return self.get_line(self.position[0])[:self.position[1]]
def get_sys_path():
def check_virtual_env(sys_path):
""" Add virtualenv's site-packages to the `sys.path`."""
venv = os.getenv('VIRTUAL_ENV')
if not venv:
return
venv = os.path.abspath(venv)
p = os.path.join(
venv, 'lib', 'python%d.%d' % sys.version_info[:2], 'site-packages')
sys_path.insert(0, p)
check_virtual_env(sys.path)
return [p for p in sys.path if p != ""]
@cache.memoize_default([])
def sys_path_with_modifications(module):
def execute_code(code):
c = "import os; from os.path import *; result=%s"
variables = {'__file__': module.path}
try:
exec_function(c % code, variables)
except Exception:
debug.warning('sys path detected, but failed to evaluate')
return None
try:
res = variables['result']
if isinstance(res, str):
return os.path.abspath(res)
else:
return None
except KeyError:
return None
def check_module(module):
try:
possible_stmts = module.used_names['path']
except KeyError:
return get_sys_path()
sys_path = list(get_sys_path()) # copy
for p in possible_stmts:
if not isinstance(p, pr.Statement):
continue
commands = p.get_commands()
if len(commands) != 1: # sys.path command is just one thing.
continue
call = commands[0]
n = call.name
if not isinstance(n, pr.Name) or len(n.names) != 3:
continue
if n.names[:2] != ('sys', 'path'):
continue
array_cmd = n.names[2]
if call.execution is None:
continue
exe = call.execution
if not (array_cmd == 'insert' and len(exe) == 2
or array_cmd == 'append' and len(exe) == 1):
continue
if array_cmd == 'insert':
exe_type, exe.type = exe.type, pr.Array.NOARRAY
exe_pop = exe.values.pop(0)
res = execute_code(exe.get_code())
if res is not None:
sys_path.insert(0, res)
debug.dbg('sys path inserted: %s' % res)
exe.type = exe_type
exe.values.insert(0, exe_pop)
elif array_cmd == 'append':
res = execute_code(exe.get_code())
if res is not None:
sys_path.append(res)
debug.dbg('sys path added: %s' % res)
return sys_path
if module.path is None:
# Support for modules without a path is bad, therefore return the
# normal path.
return list(get_sys_path())
curdir = os.path.abspath(os.curdir)
with common.ignored(OSError):
os.chdir(os.path.dirname(module.path))
result = check_module(module)
result += detect_django_path(module.path)
# cleanup, back to old directory
os.chdir(curdir)
return result
def detect_django_path(module_path):
""" Detects the path of the very well known Django library (if used) """
result = []
while True:
new = os.path.dirname(module_path)
# If the module_path doesn't change anymore, we're finished -> /
if new == module_path:
break
else:
module_path = new
with common.ignored(IOError):
with open(module_path + os.path.sep + 'manage.py'):
debug.dbg('Found django path: %s' % module_path)
result.append(module_path)
return result
def source_to_unicode(source, encoding=None):
def detect_encoding():
""" For the implementation of encoding definitions in Python, look at:
http://www.python.org/dev/peps/pep-0263/
http://docs.python.org/2/reference/lexical_analysis.html#encoding-\
declarations
"""
byte_mark = literal_eval(r"b'\xef\xbb\xbf'")
if source.startswith(byte_mark):
# UTF-8 byte-order mark
return 'utf-8'
first_two_lines = re.match(r'(?:[^\n]*\n){0,2}', str(source)).group(0)
possible_encoding = re.search(r"coding[=:]\s*([-\w.]+)",
first_two_lines)
if possible_encoding:
return possible_encoding.group(1)
else:
# the default if nothing else has been set -> PEP 263
return encoding if encoding is not None else 'iso-8859-1'
if isinstance(source, unicode):
# only cast str/bytes
return source
# cast to unicode by default
return unicode(source, detect_encoding(), 'replace')
| mit |
lanen/youtube-dl | test/test_utils.py | 34 | 31908 | #!/usr/bin/env python
# coding: utf-8
from __future__ import unicode_literals
# Allow direct execution
import os
import sys
import unittest
sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
# Various small unit tests
import io
import json
import xml.etree.ElementTree
from youtube_dl.utils import (
age_restricted,
args_to_str,
clean_html,
DateRange,
detect_exe_version,
encodeFilename,
escape_rfc3986,
escape_url,
ExtractorError,
find_xpath_attr,
fix_xml_ampersands,
InAdvancePagedList,
intlist_to_bytes,
is_html,
js_to_json,
limit_length,
OnDemandPagedList,
orderedSet,
parse_duration,
parse_filesize,
parse_iso8601,
read_batch_urls,
sanitize_filename,
sanitize_path,
prepend_extension,
replace_extension,
shell_quote,
smuggle_url,
str_to_int,
strip_jsonp,
struct_unpack,
timeconvert,
unescapeHTML,
unified_strdate,
unsmuggle_url,
uppercase_escape,
lowercase_escape,
url_basename,
urlencode_postdata,
version_tuple,
xpath_with_ns,
xpath_element,
xpath_text,
xpath_attr,
render_table,
match_str,
parse_dfxp_time_expr,
dfxp2srt,
cli_option,
cli_valueless_option,
cli_bool_option,
)
class TestUtil(unittest.TestCase):
def test_timeconvert(self):
self.assertTrue(timeconvert('') is None)
self.assertTrue(timeconvert('bougrg') is None)
def test_sanitize_filename(self):
self.assertEqual(sanitize_filename('abc'), 'abc')
self.assertEqual(sanitize_filename('abc_d-e'), 'abc_d-e')
self.assertEqual(sanitize_filename('123'), '123')
self.assertEqual('abc_de', sanitize_filename('abc/de'))
self.assertFalse('/' in sanitize_filename('abc/de///'))
self.assertEqual('abc_de', sanitize_filename('abc/<>\\*|de'))
self.assertEqual('xxx', sanitize_filename('xxx/<>\\*|'))
self.assertEqual('yes no', sanitize_filename('yes? no'))
self.assertEqual('this - that', sanitize_filename('this: that'))
self.assertEqual(sanitize_filename('AT&T'), 'AT&T')
aumlaut = 'ä'
self.assertEqual(sanitize_filename(aumlaut), aumlaut)
tests = '\u043a\u0438\u0440\u0438\u043b\u043b\u0438\u0446\u0430'
self.assertEqual(sanitize_filename(tests), tests)
self.assertEqual(
sanitize_filename('New World record at 0:12:34'),
'New World record at 0_12_34')
self.assertEqual(sanitize_filename('--gasdgf'), '_-gasdgf')
self.assertEqual(sanitize_filename('--gasdgf', is_id=True), '--gasdgf')
self.assertEqual(sanitize_filename('.gasdgf'), 'gasdgf')
self.assertEqual(sanitize_filename('.gasdgf', is_id=True), '.gasdgf')
forbidden = '"\0\\/'
for fc in forbidden:
for fbc in forbidden:
self.assertTrue(fbc not in sanitize_filename(fc))
def test_sanitize_filename_restricted(self):
self.assertEqual(sanitize_filename('abc', restricted=True), 'abc')
self.assertEqual(sanitize_filename('abc_d-e', restricted=True), 'abc_d-e')
self.assertEqual(sanitize_filename('123', restricted=True), '123')
self.assertEqual('abc_de', sanitize_filename('abc/de', restricted=True))
self.assertFalse('/' in sanitize_filename('abc/de///', restricted=True))
self.assertEqual('abc_de', sanitize_filename('abc/<>\\*|de', restricted=True))
self.assertEqual('xxx', sanitize_filename('xxx/<>\\*|', restricted=True))
self.assertEqual('yes_no', sanitize_filename('yes? no', restricted=True))
self.assertEqual('this_-_that', sanitize_filename('this: that', restricted=True))
tests = 'a\xe4b\u4e2d\u56fd\u7684c'
self.assertEqual(sanitize_filename(tests, restricted=True), 'a_b_c')
self.assertTrue(sanitize_filename('\xf6', restricted=True) != '') # No empty filename
forbidden = '"\0\\/&!: \'\t\n()[]{}$;`^,#'
for fc in forbidden:
for fbc in forbidden:
self.assertTrue(fbc not in sanitize_filename(fc, restricted=True))
# Handle a common case more neatly
self.assertEqual(sanitize_filename('\u5927\u58f0\u5e26 - Song', restricted=True), 'Song')
self.assertEqual(sanitize_filename('\u603b\u7edf: Speech', restricted=True), 'Speech')
# .. but make sure the file name is never empty
self.assertTrue(sanitize_filename('-', restricted=True) != '')
self.assertTrue(sanitize_filename(':', restricted=True) != '')
def test_sanitize_ids(self):
self.assertEqual(sanitize_filename('_n_cd26wFpw', is_id=True), '_n_cd26wFpw')
self.assertEqual(sanitize_filename('_BD_eEpuzXw', is_id=True), '_BD_eEpuzXw')
self.assertEqual(sanitize_filename('N0Y__7-UOdI', is_id=True), 'N0Y__7-UOdI')
def test_sanitize_path(self):
if sys.platform != 'win32':
return
self.assertEqual(sanitize_path('abc'), 'abc')
self.assertEqual(sanitize_path('abc/def'), 'abc\\def')
self.assertEqual(sanitize_path('abc\\def'), 'abc\\def')
self.assertEqual(sanitize_path('abc|def'), 'abc#def')
self.assertEqual(sanitize_path('<>:"|?*'), '#######')
self.assertEqual(sanitize_path('C:/abc/def'), 'C:\\abc\\def')
self.assertEqual(sanitize_path('C?:/abc/def'), 'C##\\abc\\def')
self.assertEqual(sanitize_path('\\\\?\\UNC\\ComputerName\\abc'), '\\\\?\\UNC\\ComputerName\\abc')
self.assertEqual(sanitize_path('\\\\?\\UNC/ComputerName/abc'), '\\\\?\\UNC\\ComputerName\\abc')
self.assertEqual(sanitize_path('\\\\?\\C:\\abc'), '\\\\?\\C:\\abc')
self.assertEqual(sanitize_path('\\\\?\\C:/abc'), '\\\\?\\C:\\abc')
self.assertEqual(sanitize_path('\\\\?\\C:\\ab?c\\de:f'), '\\\\?\\C:\\ab#c\\de#f')
self.assertEqual(sanitize_path('\\\\?\\C:\\abc'), '\\\\?\\C:\\abc')
self.assertEqual(
sanitize_path('youtube/%(uploader)s/%(autonumber)s-%(title)s-%(upload_date)s.%(ext)s'),
'youtube\\%(uploader)s\\%(autonumber)s-%(title)s-%(upload_date)s.%(ext)s')
self.assertEqual(
sanitize_path('youtube/TheWreckingYard ./00001-Not bad, Especially for Free! (1987 Yamaha 700)-20141116.mp4.part'),
'youtube\\TheWreckingYard #\\00001-Not bad, Especially for Free! (1987 Yamaha 700)-20141116.mp4.part')
self.assertEqual(sanitize_path('abc/def...'), 'abc\\def..#')
self.assertEqual(sanitize_path('abc.../def'), 'abc..#\\def')
self.assertEqual(sanitize_path('abc.../def...'), 'abc..#\\def..#')
self.assertEqual(sanitize_path('../abc'), '..\\abc')
self.assertEqual(sanitize_path('../../abc'), '..\\..\\abc')
self.assertEqual(sanitize_path('./abc'), 'abc')
self.assertEqual(sanitize_path('./../abc'), '..\\abc')
def test_prepend_extension(self):
self.assertEqual(prepend_extension('abc.ext', 'temp'), 'abc.temp.ext')
self.assertEqual(prepend_extension('abc.ext', 'temp', 'ext'), 'abc.temp.ext')
self.assertEqual(prepend_extension('abc.unexpected_ext', 'temp', 'ext'), 'abc.unexpected_ext.temp')
self.assertEqual(prepend_extension('abc', 'temp'), 'abc.temp')
self.assertEqual(prepend_extension('.abc', 'temp'), '.abc.temp')
self.assertEqual(prepend_extension('.abc.ext', 'temp'), '.abc.temp.ext')
def test_replace_extension(self):
self.assertEqual(replace_extension('abc.ext', 'temp'), 'abc.temp')
self.assertEqual(replace_extension('abc.ext', 'temp', 'ext'), 'abc.temp')
self.assertEqual(replace_extension('abc.unexpected_ext', 'temp', 'ext'), 'abc.unexpected_ext.temp')
self.assertEqual(replace_extension('abc', 'temp'), 'abc.temp')
self.assertEqual(replace_extension('.abc', 'temp'), '.abc.temp')
self.assertEqual(replace_extension('.abc.ext', 'temp'), '.abc.temp')
def test_ordered_set(self):
self.assertEqual(orderedSet([1, 1, 2, 3, 4, 4, 5, 6, 7, 3, 5]), [1, 2, 3, 4, 5, 6, 7])
self.assertEqual(orderedSet([]), [])
self.assertEqual(orderedSet([1]), [1])
# keep the list ordered
self.assertEqual(orderedSet([135, 1, 1, 1]), [135, 1])
def test_unescape_html(self):
self.assertEqual(unescapeHTML('%20;'), '%20;')
self.assertEqual(unescapeHTML('/'), '/')
self.assertEqual(unescapeHTML('/'), '/')
self.assertEqual(
unescapeHTML('é'), 'é')
def test_daterange(self):
_20century = DateRange("19000101", "20000101")
self.assertFalse("17890714" in _20century)
_ac = DateRange("00010101")
self.assertTrue("19690721" in _ac)
_firstmilenium = DateRange(end="10000101")
self.assertTrue("07110427" in _firstmilenium)
def test_unified_dates(self):
self.assertEqual(unified_strdate('December 21, 2010'), '20101221')
self.assertEqual(unified_strdate('8/7/2009'), '20090708')
self.assertEqual(unified_strdate('Dec 14, 2012'), '20121214')
self.assertEqual(unified_strdate('2012/10/11 01:56:38 +0000'), '20121011')
self.assertEqual(unified_strdate('1968 12 10'), '19681210')
self.assertEqual(unified_strdate('1968-12-10'), '19681210')
self.assertEqual(unified_strdate('28/01/2014 21:00:00 +0100'), '20140128')
self.assertEqual(
unified_strdate('11/26/2014 11:30:00 AM PST', day_first=False),
'20141126')
self.assertEqual(
unified_strdate('2/2/2015 6:47:40 PM', day_first=False),
'20150202')
self.assertEqual(unified_strdate('25-09-2014'), '20140925')
def test_find_xpath_attr(self):
testxml = '''<root>
<node/>
<node x="a"/>
<node x="a" y="c" />
<node x="b" y="d" />
<node x="" />
</root>'''
doc = xml.etree.ElementTree.fromstring(testxml)
self.assertEqual(find_xpath_attr(doc, './/fourohfour', 'n'), None)
self.assertEqual(find_xpath_attr(doc, './/fourohfour', 'n', 'v'), None)
self.assertEqual(find_xpath_attr(doc, './/node', 'n'), None)
self.assertEqual(find_xpath_attr(doc, './/node', 'n', 'v'), None)
self.assertEqual(find_xpath_attr(doc, './/node', 'x'), doc[1])
self.assertEqual(find_xpath_attr(doc, './/node', 'x', 'a'), doc[1])
self.assertEqual(find_xpath_attr(doc, './/node', 'x', 'b'), doc[3])
self.assertEqual(find_xpath_attr(doc, './/node', 'y'), doc[2])
self.assertEqual(find_xpath_attr(doc, './/node', 'y', 'c'), doc[2])
self.assertEqual(find_xpath_attr(doc, './/node', 'y', 'd'), doc[3])
self.assertEqual(find_xpath_attr(doc, './/node', 'x', ''), doc[4])
def test_xpath_with_ns(self):
testxml = '''<root xmlns:media="http://example.com/">
<media:song>
<media:author>The Author</media:author>
<url>http://server.com/download.mp3</url>
</media:song>
</root>'''
doc = xml.etree.ElementTree.fromstring(testxml)
find = lambda p: doc.find(xpath_with_ns(p, {'media': 'http://example.com/'}))
self.assertTrue(find('media:song') is not None)
self.assertEqual(find('media:song/media:author').text, 'The Author')
self.assertEqual(find('media:song/url').text, 'http://server.com/download.mp3')
def test_xpath_element(self):
doc = xml.etree.ElementTree.Element('root')
div = xml.etree.ElementTree.SubElement(doc, 'div')
p = xml.etree.ElementTree.SubElement(div, 'p')
p.text = 'Foo'
self.assertEqual(xpath_element(doc, 'div/p'), p)
self.assertEqual(xpath_element(doc, 'div/bar', default='default'), 'default')
self.assertTrue(xpath_element(doc, 'div/bar') is None)
self.assertRaises(ExtractorError, xpath_element, doc, 'div/bar', fatal=True)
def test_xpath_text(self):
testxml = '''<root>
<div>
<p>Foo</p>
</div>
</root>'''
doc = xml.etree.ElementTree.fromstring(testxml)
self.assertEqual(xpath_text(doc, 'div/p'), 'Foo')
self.assertEqual(xpath_text(doc, 'div/bar', default='default'), 'default')
self.assertTrue(xpath_text(doc, 'div/bar') is None)
self.assertRaises(ExtractorError, xpath_text, doc, 'div/bar', fatal=True)
def test_xpath_attr(self):
testxml = '''<root>
<div>
<p x="a">Foo</p>
</div>
</root>'''
doc = xml.etree.ElementTree.fromstring(testxml)
self.assertEqual(xpath_attr(doc, 'div/p', 'x'), 'a')
self.assertEqual(xpath_attr(doc, 'div/bar', 'x'), None)
self.assertEqual(xpath_attr(doc, 'div/p', 'y'), None)
self.assertEqual(xpath_attr(doc, 'div/bar', 'x', default='default'), 'default')
self.assertEqual(xpath_attr(doc, 'div/p', 'y', default='default'), 'default')
self.assertRaises(ExtractorError, xpath_attr, doc, 'div/bar', 'x', fatal=True)
self.assertRaises(ExtractorError, xpath_attr, doc, 'div/p', 'y', fatal=True)
def test_smuggle_url(self):
data = {"ö": "ö", "abc": [3]}
url = 'https://foo.bar/baz?x=y#a'
smug_url = smuggle_url(url, data)
unsmug_url, unsmug_data = unsmuggle_url(smug_url)
self.assertEqual(url, unsmug_url)
self.assertEqual(data, unsmug_data)
res_url, res_data = unsmuggle_url(url)
self.assertEqual(res_url, url)
self.assertEqual(res_data, None)
def test_shell_quote(self):
args = ['ffmpeg', '-i', encodeFilename('ñ€ß\'.mp4')]
self.assertEqual(shell_quote(args), """ffmpeg -i 'ñ€ß'"'"'.mp4'""")
def test_str_to_int(self):
self.assertEqual(str_to_int('123,456'), 123456)
self.assertEqual(str_to_int('123.456'), 123456)
def test_url_basename(self):
self.assertEqual(url_basename('http://foo.de/'), '')
self.assertEqual(url_basename('http://foo.de/bar/baz'), 'baz')
self.assertEqual(url_basename('http://foo.de/bar/baz?x=y'), 'baz')
self.assertEqual(url_basename('http://foo.de/bar/baz#x=y'), 'baz')
self.assertEqual(url_basename('http://foo.de/bar/baz/'), 'baz')
self.assertEqual(
url_basename('http://media.w3.org/2010/05/sintel/trailer.mp4'),
'trailer.mp4')
def test_parse_duration(self):
self.assertEqual(parse_duration(None), None)
self.assertEqual(parse_duration(False), None)
self.assertEqual(parse_duration('invalid'), None)
self.assertEqual(parse_duration('1'), 1)
self.assertEqual(parse_duration('1337:12'), 80232)
self.assertEqual(parse_duration('9:12:43'), 33163)
self.assertEqual(parse_duration('12:00'), 720)
self.assertEqual(parse_duration('00:01:01'), 61)
self.assertEqual(parse_duration('x:y'), None)
self.assertEqual(parse_duration('3h11m53s'), 11513)
self.assertEqual(parse_duration('3h 11m 53s'), 11513)
self.assertEqual(parse_duration('3 hours 11 minutes 53 seconds'), 11513)
self.assertEqual(parse_duration('3 hours 11 mins 53 secs'), 11513)
self.assertEqual(parse_duration('62m45s'), 3765)
self.assertEqual(parse_duration('6m59s'), 419)
self.assertEqual(parse_duration('49s'), 49)
self.assertEqual(parse_duration('0h0m0s'), 0)
self.assertEqual(parse_duration('0m0s'), 0)
self.assertEqual(parse_duration('0s'), 0)
self.assertEqual(parse_duration('01:02:03.05'), 3723.05)
self.assertEqual(parse_duration('T30M38S'), 1838)
self.assertEqual(parse_duration('5 s'), 5)
self.assertEqual(parse_duration('3 min'), 180)
self.assertEqual(parse_duration('2.5 hours'), 9000)
self.assertEqual(parse_duration('02:03:04'), 7384)
self.assertEqual(parse_duration('01:02:03:04'), 93784)
self.assertEqual(parse_duration('1 hour 3 minutes'), 3780)
self.assertEqual(parse_duration('87 Min.'), 5220)
def test_fix_xml_ampersands(self):
self.assertEqual(
fix_xml_ampersands('"&x=y&z=a'), '"&x=y&z=a')
self.assertEqual(
fix_xml_ampersands('"&x=y&wrong;&z=a'),
'"&x=y&wrong;&z=a')
self.assertEqual(
fix_xml_ampersands('&'><"'),
'&'><"')
self.assertEqual(
fix_xml_ampersands('Ӓ᪼'), 'Ӓ᪼')
self.assertEqual(fix_xml_ampersands('&#&#'), '&#&#')
def test_paged_list(self):
def testPL(size, pagesize, sliceargs, expected):
def get_page(pagenum):
firstid = pagenum * pagesize
upto = min(size, pagenum * pagesize + pagesize)
for i in range(firstid, upto):
yield i
pl = OnDemandPagedList(get_page, pagesize)
got = pl.getslice(*sliceargs)
self.assertEqual(got, expected)
iapl = InAdvancePagedList(get_page, size // pagesize + 1, pagesize)
got = iapl.getslice(*sliceargs)
self.assertEqual(got, expected)
testPL(5, 2, (), [0, 1, 2, 3, 4])
testPL(5, 2, (1,), [1, 2, 3, 4])
testPL(5, 2, (2,), [2, 3, 4])
testPL(5, 2, (4,), [4])
testPL(5, 2, (0, 3), [0, 1, 2])
testPL(5, 2, (1, 4), [1, 2, 3])
testPL(5, 2, (2, 99), [2, 3, 4])
testPL(5, 2, (20, 99), [])
def test_struct_unpack(self):
self.assertEqual(struct_unpack('!B', b'\x00'), (0,))
def test_read_batch_urls(self):
f = io.StringIO('''\xef\xbb\xbf foo
bar\r
baz
# More after this line\r
; or after this
bam''')
self.assertEqual(read_batch_urls(f), ['foo', 'bar', 'baz', 'bam'])
def test_urlencode_postdata(self):
data = urlencode_postdata({'username': '[email protected]', 'password': '1234'})
self.assertTrue(isinstance(data, bytes))
def test_parse_iso8601(self):
self.assertEqual(parse_iso8601('2014-03-23T23:04:26+0100'), 1395612266)
self.assertEqual(parse_iso8601('2014-03-23T22:04:26+0000'), 1395612266)
self.assertEqual(parse_iso8601('2014-03-23T22:04:26Z'), 1395612266)
self.assertEqual(parse_iso8601('2014-03-23T22:04:26.1234Z'), 1395612266)
def test_strip_jsonp(self):
stripped = strip_jsonp('cb ([ {"id":"532cb",\n\n\n"x":\n3}\n]\n);')
d = json.loads(stripped)
self.assertEqual(d, [{"id": "532cb", "x": 3}])
stripped = strip_jsonp('parseMetadata({"STATUS":"OK"})\n\n\n//epc')
d = json.loads(stripped)
self.assertEqual(d, {'STATUS': 'OK'})
def test_uppercase_escape(self):
self.assertEqual(uppercase_escape('aä'), 'aä')
self.assertEqual(uppercase_escape('\\U0001d550'), '𝕐')
def test_lowercase_escape(self):
self.assertEqual(lowercase_escape('aä'), 'aä')
self.assertEqual(lowercase_escape('\\u0026'), '&')
def test_limit_length(self):
self.assertEqual(limit_length(None, 12), None)
self.assertEqual(limit_length('foo', 12), 'foo')
self.assertTrue(
limit_length('foo bar baz asd', 12).startswith('foo bar'))
self.assertTrue('...' in limit_length('foo bar baz asd', 12))
def test_escape_rfc3986(self):
reserved = "!*'();:@&=+$,/?#[]"
unreserved = 'ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789-_.~'
self.assertEqual(escape_rfc3986(reserved), reserved)
self.assertEqual(escape_rfc3986(unreserved), unreserved)
self.assertEqual(escape_rfc3986('тест'), '%D1%82%D0%B5%D1%81%D1%82')
self.assertEqual(escape_rfc3986('%D1%82%D0%B5%D1%81%D1%82'), '%D1%82%D0%B5%D1%81%D1%82')
self.assertEqual(escape_rfc3986('foo bar'), 'foo%20bar')
self.assertEqual(escape_rfc3986('foo%20bar'), 'foo%20bar')
def test_escape_url(self):
self.assertEqual(
escape_url('http://wowza.imust.org/srv/vod/telemb/new/UPLOAD/UPLOAD/20224_IncendieHavré_FD.mp4'),
'http://wowza.imust.org/srv/vod/telemb/new/UPLOAD/UPLOAD/20224_IncendieHavre%CC%81_FD.mp4'
)
self.assertEqual(
escape_url('http://www.ardmediathek.de/tv/Sturm-der-Liebe/Folge-2036-Zu-Mann-und-Frau-erklärt/Das-Erste/Video?documentId=22673108&bcastId=5290'),
'http://www.ardmediathek.de/tv/Sturm-der-Liebe/Folge-2036-Zu-Mann-und-Frau-erkl%C3%A4rt/Das-Erste/Video?documentId=22673108&bcastId=5290'
)
self.assertEqual(
escape_url('http://тест.рф/фрагмент'),
'http://тест.рф/%D1%84%D1%80%D0%B0%D0%B3%D0%BC%D0%B5%D0%BD%D1%82'
)
self.assertEqual(
escape_url('http://тест.рф/абв?абв=абв#абв'),
'http://тест.рф/%D0%B0%D0%B1%D0%B2?%D0%B0%D0%B1%D0%B2=%D0%B0%D0%B1%D0%B2#%D0%B0%D0%B1%D0%B2'
)
self.assertEqual(escape_url('http://vimeo.com/56015672#at=0'), 'http://vimeo.com/56015672#at=0')
def test_js_to_json_realworld(self):
inp = '''{
'clip':{'provider':'pseudo'}
}'''
self.assertEqual(js_to_json(inp), '''{
"clip":{"provider":"pseudo"}
}''')
json.loads(js_to_json(inp))
inp = '''{
'playlist':[{'controls':{'all':null}}]
}'''
self.assertEqual(js_to_json(inp), '''{
"playlist":[{"controls":{"all":null}}]
}''')
inp = '"SAND Number: SAND 2013-7800P\\nPresenter: Tom Russo\\nHabanero Software Training - Xyce Software\\nXyce, Sandia\\u0027s"'
json_code = js_to_json(inp)
self.assertEqual(json.loads(json_code), json.loads(inp))
def test_js_to_json_edgecases(self):
on = js_to_json("{abc_def:'1\\'\\\\2\\\\\\'3\"4'}")
self.assertEqual(json.loads(on), {"abc_def": "1'\\2\\'3\"4"})
on = js_to_json('{"abc": true}')
self.assertEqual(json.loads(on), {'abc': True})
# Ignore JavaScript code as well
on = js_to_json('''{
"x": 1,
y: "a",
z: some.code
}''')
d = json.loads(on)
self.assertEqual(d['x'], 1)
self.assertEqual(d['y'], 'a')
on = js_to_json('["abc", "def",]')
self.assertEqual(json.loads(on), ['abc', 'def'])
on = js_to_json('{"abc": "def",}')
self.assertEqual(json.loads(on), {'abc': 'def'})
def test_clean_html(self):
self.assertEqual(clean_html('a:\nb'), 'a: b')
self.assertEqual(clean_html('a:\n "b"'), 'a: "b"')
def test_intlist_to_bytes(self):
self.assertEqual(
intlist_to_bytes([0, 1, 127, 128, 255]),
b'\x00\x01\x7f\x80\xff')
def test_args_to_str(self):
self.assertEqual(
args_to_str(['foo', 'ba/r', '-baz', '2 be', '']),
'foo ba/r -baz \'2 be\' \'\''
)
def test_parse_filesize(self):
self.assertEqual(parse_filesize(None), None)
self.assertEqual(parse_filesize(''), None)
self.assertEqual(parse_filesize('91 B'), 91)
self.assertEqual(parse_filesize('foobar'), None)
self.assertEqual(parse_filesize('2 MiB'), 2097152)
self.assertEqual(parse_filesize('5 GB'), 5000000000)
self.assertEqual(parse_filesize('1.2Tb'), 1200000000000)
self.assertEqual(parse_filesize('1,24 KB'), 1240)
def test_version_tuple(self):
self.assertEqual(version_tuple('1'), (1,))
self.assertEqual(version_tuple('10.23.344'), (10, 23, 344))
self.assertEqual(version_tuple('10.1-6'), (10, 1, 6)) # avconv style
def test_detect_exe_version(self):
self.assertEqual(detect_exe_version('''ffmpeg version 1.2.1
built on May 27 2013 08:37:26 with gcc 4.7 (Debian 4.7.3-4)
configuration: --prefix=/usr --extra-'''), '1.2.1')
self.assertEqual(detect_exe_version('''ffmpeg version N-63176-g1fb4685
built on May 15 2014 22:09:06 with gcc 4.8.2 (GCC)'''), 'N-63176-g1fb4685')
self.assertEqual(detect_exe_version('''X server found. dri2 connection failed!
Trying to open render node...
Success at /dev/dri/renderD128.
ffmpeg version 2.4.4 Copyright (c) 2000-2014 the FFmpeg ...'''), '2.4.4')
def test_age_restricted(self):
self.assertFalse(age_restricted(None, 10)) # unrestricted content
self.assertFalse(age_restricted(1, None)) # unrestricted policy
self.assertFalse(age_restricted(8, 10))
self.assertTrue(age_restricted(18, 14))
self.assertFalse(age_restricted(18, 18))
def test_is_html(self):
self.assertFalse(is_html(b'\x49\x44\x43<html'))
self.assertTrue(is_html(b'<!DOCTYPE foo>\xaaa'))
self.assertTrue(is_html( # UTF-8 with BOM
b'\xef\xbb\xbf<!DOCTYPE foo>\xaaa'))
self.assertTrue(is_html( # UTF-16-LE
b'\xff\xfe<\x00h\x00t\x00m\x00l\x00>\x00\xe4\x00'
))
self.assertTrue(is_html( # UTF-16-BE
b'\xfe\xff\x00<\x00h\x00t\x00m\x00l\x00>\x00\xe4'
))
self.assertTrue(is_html( # UTF-32-BE
b'\x00\x00\xFE\xFF\x00\x00\x00<\x00\x00\x00h\x00\x00\x00t\x00\x00\x00m\x00\x00\x00l\x00\x00\x00>\x00\x00\x00\xe4'))
self.assertTrue(is_html( # UTF-32-LE
b'\xFF\xFE\x00\x00<\x00\x00\x00h\x00\x00\x00t\x00\x00\x00m\x00\x00\x00l\x00\x00\x00>\x00\x00\x00\xe4\x00\x00\x00'))
def test_render_table(self):
self.assertEqual(
render_table(
['a', 'bcd'],
[[123, 4], [9999, 51]]),
'a bcd\n'
'123 4\n'
'9999 51')
def test_match_str(self):
self.assertRaises(ValueError, match_str, 'xy>foobar', {})
self.assertFalse(match_str('xy', {'x': 1200}))
self.assertTrue(match_str('!xy', {'x': 1200}))
self.assertTrue(match_str('x', {'x': 1200}))
self.assertFalse(match_str('!x', {'x': 1200}))
self.assertTrue(match_str('x', {'x': 0}))
self.assertFalse(match_str('x>0', {'x': 0}))
self.assertFalse(match_str('x>0', {}))
self.assertTrue(match_str('x>?0', {}))
self.assertTrue(match_str('x>1K', {'x': 1200}))
self.assertFalse(match_str('x>2K', {'x': 1200}))
self.assertTrue(match_str('x>=1200 & x < 1300', {'x': 1200}))
self.assertFalse(match_str('x>=1100 & x < 1200', {'x': 1200}))
self.assertFalse(match_str('y=a212', {'y': 'foobar42'}))
self.assertTrue(match_str('y=foobar42', {'y': 'foobar42'}))
self.assertFalse(match_str('y!=foobar42', {'y': 'foobar42'}))
self.assertTrue(match_str('y!=foobar2', {'y': 'foobar42'}))
self.assertFalse(match_str(
'like_count > 100 & dislike_count <? 50 & description',
{'like_count': 90, 'description': 'foo'}))
self.assertTrue(match_str(
'like_count > 100 & dislike_count <? 50 & description',
{'like_count': 190, 'description': 'foo'}))
self.assertFalse(match_str(
'like_count > 100 & dislike_count <? 50 & description',
{'like_count': 190, 'dislike_count': 60, 'description': 'foo'}))
self.assertFalse(match_str(
'like_count > 100 & dislike_count <? 50 & description',
{'like_count': 190, 'dislike_count': 10}))
def test_parse_dfxp_time_expr(self):
self.assertEqual(parse_dfxp_time_expr(None), 0.0)
self.assertEqual(parse_dfxp_time_expr(''), 0.0)
self.assertEqual(parse_dfxp_time_expr('0.1'), 0.1)
self.assertEqual(parse_dfxp_time_expr('0.1s'), 0.1)
self.assertEqual(parse_dfxp_time_expr('00:00:01'), 1.0)
self.assertEqual(parse_dfxp_time_expr('00:00:01.100'), 1.1)
def test_dfxp2srt(self):
dfxp_data = '''<?xml version="1.0" encoding="UTF-8"?>
<tt xmlns="http://www.w3.org/ns/ttml" xml:lang="en" xmlns:tts="http://www.w3.org/ns/ttml#parameter">
<body>
<div xml:lang="en">
<p begin="0" end="1">The following line contains Chinese characters and special symbols</p>
<p begin="1" end="2">第二行<br/>♪♪</p>
<p begin="2" dur="1"><span>Third<br/>Line</span></p>
</div>
</body>
</tt>'''
srt_data = '''1
00:00:00,000 --> 00:00:01,000
The following line contains Chinese characters and special symbols
2
00:00:01,000 --> 00:00:02,000
第二行
♪♪
3
00:00:02,000 --> 00:00:03,000
Third
Line
'''
self.assertEqual(dfxp2srt(dfxp_data), srt_data)
dfxp_data_no_default_namespace = '''<?xml version="1.0" encoding="UTF-8"?>
<tt xml:lang="en" xmlns:tts="http://www.w3.org/ns/ttml#parameter">
<body>
<div xml:lang="en">
<p begin="0" end="1">The first line</p>
</div>
</body>
</tt>'''
srt_data = '''1
00:00:00,000 --> 00:00:01,000
The first line
'''
self.assertEqual(dfxp2srt(dfxp_data_no_default_namespace), srt_data)
def test_cli_option(self):
self.assertEqual(cli_option({'proxy': '127.0.0.1:3128'}, '--proxy', 'proxy'), ['--proxy', '127.0.0.1:3128'])
self.assertEqual(cli_option({'proxy': None}, '--proxy', 'proxy'), [])
self.assertEqual(cli_option({}, '--proxy', 'proxy'), [])
def test_cli_valueless_option(self):
self.assertEqual(cli_valueless_option(
{'downloader': 'external'}, '--external-downloader', 'downloader', 'external'), ['--external-downloader'])
self.assertEqual(cli_valueless_option(
{'downloader': 'internal'}, '--external-downloader', 'downloader', 'external'), [])
self.assertEqual(cli_valueless_option(
{'nocheckcertificate': True}, '--no-check-certificate', 'nocheckcertificate'), ['--no-check-certificate'])
self.assertEqual(cli_valueless_option(
{'nocheckcertificate': False}, '--no-check-certificate', 'nocheckcertificate'), [])
self.assertEqual(cli_valueless_option(
{'checkcertificate': True}, '--no-check-certificate', 'checkcertificate', False), [])
self.assertEqual(cli_valueless_option(
{'checkcertificate': False}, '--no-check-certificate', 'checkcertificate', False), ['--no-check-certificate'])
def test_cli_bool_option(self):
self.assertEqual(
cli_bool_option(
{'nocheckcertificate': True}, '--no-check-certificate', 'nocheckcertificate'),
['--no-check-certificate', 'true'])
self.assertEqual(
cli_bool_option(
{'nocheckcertificate': True}, '--no-check-certificate', 'nocheckcertificate', separator='='),
['--no-check-certificate=true'])
self.assertEqual(
cli_bool_option(
{'nocheckcertificate': True}, '--check-certificate', 'nocheckcertificate', 'false', 'true'),
['--check-certificate', 'false'])
self.assertEqual(
cli_bool_option(
{'nocheckcertificate': True}, '--check-certificate', 'nocheckcertificate', 'false', 'true', '='),
['--check-certificate=false'])
self.assertEqual(
cli_bool_option(
{'nocheckcertificate': False}, '--check-certificate', 'nocheckcertificate', 'false', 'true'),
['--check-certificate', 'true'])
self.assertEqual(
cli_bool_option(
{'nocheckcertificate': False}, '--check-certificate', 'nocheckcertificate', 'false', 'true', '='),
['--check-certificate=true'])
if __name__ == '__main__':
unittest.main()
| unlicense |